From 5828512e0a1e497402a579955c4a56f919efc30a Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Tue, 7 May 2024 00:34:18 -0700 Subject: [PATCH] chore: Update discovery artifacts (#2393) ## Deleted keys were detected in the following stable discovery artifacts: aiplatform v1 https://togithub.com/googleapis/google-api-python-client/commit/37a1410948751056857437d99bd1a9ff590955cb artifactregistry v1 https://togithub.com/googleapis/google-api-python-client/commit/359420a8ad25cd0be55f5cbcc90003534d456b29 contentwarehouse v1 https://togithub.com/googleapis/google-api-python-client/commit/3c604e4e2a604affd336a83a52888254a5240d0d doubleclickbidmanager v2 https://togithub.com/googleapis/google-api-python-client/commit/8b44cef05698eb58a04e5c90077dceb4b51c1f7e serviceconsumermanagement v1 https://togithub.com/googleapis/google-api-python-client/commit/792350b6ca6656b6be0cb3b539e9f2674614dc54 servicemanagement v1 https://togithub.com/googleapis/google-api-python-client/commit/f63b814b3fbd0784a76eca6ef3cacbd238fd3fae serviceusage v1 https://togithub.com/googleapis/google-api-python-client/commit/e554613ff01c5ec33b632f2d7824ed140344e7a7 sts v1 https://togithub.com/googleapis/google-api-python-client/commit/f976fc54e1981185e1e3fa2d4a5124c4e67ba451 ## Deleted keys were detected in the following pre-stable discovery artifacts: aiplatform v1beta1 https://togithub.com/googleapis/google-api-python-client/commit/37a1410948751056857437d99bd1a9ff590955cb cloudsupport v2beta https://togithub.com/googleapis/google-api-python-client/commit/5b232d3f8a52fe43eab525af08668c5fb3ccf297 discoveryengine v1alpha https://togithub.com/googleapis/google-api-python-client/commit/2b5366f4e2d4da9290bc239a89d6b2abd84cdb1e migrationcenter v1alpha1 https://togithub.com/googleapis/google-api-python-client/commit/654a81be321dda145f6a98399488dd18ad23b792 serviceconsumermanagement v1beta1 https://togithub.com/googleapis/google-api-python-client/commit/792350b6ca6656b6be0cb3b539e9f2674614dc54 serviceusage v1beta1 https://togithub.com/googleapis/google-api-python-client/commit/e554613ff01c5ec33b632f2d7824ed140344e7a7 sts v1beta https://togithub.com/googleapis/google-api-python-client/commit/f976fc54e1981185e1e3fa2d4a5124c4e67ba451 ## Discovery Artifact Change Summary: feat(accesscontextmanager): update the api https://togithub.com/googleapis/google-api-python-client/commit/1f6482ca7e98551a3aa5dc24a51f0637cc02158c feat(admin): update the api https://togithub.com/googleapis/google-api-python-client/commit/72f93af2429e030378901b69d5f2ca14a2734e3b feat(aiplatform): update the api https://togithub.com/googleapis/google-api-python-client/commit/37a1410948751056857437d99bd1a9ff590955cb feat(alloydb): update the api https://togithub.com/googleapis/google-api-python-client/commit/509e9e6afb5716d97f3b659409a600e89d18f63e feat(artifactregistry): update the api https://togithub.com/googleapis/google-api-python-client/commit/359420a8ad25cd0be55f5cbcc90003534d456b29 feat(baremetalsolution): update the api https://togithub.com/googleapis/google-api-python-client/commit/9367a7efc0c9216ea7bca7747e2d9bc27598a7ac feat(beyondcorp): update the api https://togithub.com/googleapis/google-api-python-client/commit/b8a9669f46d0cd4d5e9ce69ee21dac5656c36771 feat(bigtableadmin): update the api https://togithub.com/googleapis/google-api-python-client/commit/f0cb589ad64aa51a3c966dd57df8f88bda2918d7 feat(cloudbuild): update the api https://togithub.com/googleapis/google-api-python-client/commit/b879625324b81a8e129c802018b85f88938005dc feat(cloudkms): update the api https://togithub.com/googleapis/google-api-python-client/commit/9e58b1c6294a283622315959fe0a4f98536b2b6e feat(cloudsupport): update the api https://togithub.com/googleapis/google-api-python-client/commit/5b232d3f8a52fe43eab525af08668c5fb3ccf297 feat(compute): update the api https://togithub.com/googleapis/google-api-python-client/commit/f79e1b03090128402e8183f26592c2af9b97edeb feat(config): update the api https://togithub.com/googleapis/google-api-python-client/commit/9dc826a922d30d8be7cdbcb28f0ab6493facfdde feat(contentwarehouse): update the api https://togithub.com/googleapis/google-api-python-client/commit/3c604e4e2a604affd336a83a52888254a5240d0d feat(dataflow): update the api https://togithub.com/googleapis/google-api-python-client/commit/eb85c1616efa50487f0cf8d7ad0e08dd8a83d9e9 feat(dataform): update the api https://togithub.com/googleapis/google-api-python-client/commit/920fde4570a9c42e2ac84fa879e44b9f7d35c26f feat(discoveryengine): update the api https://togithub.com/googleapis/google-api-python-client/commit/2b5366f4e2d4da9290bc239a89d6b2abd84cdb1e feat(documentai): update the api https://togithub.com/googleapis/google-api-python-client/commit/1e5c51a18ddbe6f1691f546a5a42e27fcf74f011 feat(doubleclickbidmanager): update the api https://togithub.com/googleapis/google-api-python-client/commit/8b44cef05698eb58a04e5c90077dceb4b51c1f7e feat(firebaseappcheck): update the api https://togithub.com/googleapis/google-api-python-client/commit/9049c39f026e225e6bc7bbd5700eba4982097fdd feat(firebaseml): update the api https://togithub.com/googleapis/google-api-python-client/commit/c508c9cec7416f5f6e78f79d7fc9bed2ed1f74fb feat(gkehub): update the api https://togithub.com/googleapis/google-api-python-client/commit/a277f6a37f7527165933bb8ea395587c7b4c788c feat(iap): update the api https://togithub.com/googleapis/google-api-python-client/commit/ce8869b0e1ea222575e85d2ca61bb21b6de23a74 feat(migrationcenter): update the api https://togithub.com/googleapis/google-api-python-client/commit/654a81be321dda145f6a98399488dd18ad23b792 feat(monitoring): update the api https://togithub.com/googleapis/google-api-python-client/commit/97a8aa363666b3406c6504946dc5f24321415541 feat(ondemandscanning): update the api https://togithub.com/googleapis/google-api-python-client/commit/76c7d295a168526aad69d31ae795e005ea0db484 feat(privateca): update the api https://togithub.com/googleapis/google-api-python-client/commit/69f6b173565058fbf0fa565ae66031cbce35cb17 feat(retail): update the api https://togithub.com/googleapis/google-api-python-client/commit/3efc01675f28842beceff50ae23ad299bface114 feat(run): update the api https://togithub.com/googleapis/google-api-python-client/commit/f3ad0246fc6ac2d4198e9c943b262a7d155f758c fix(secretmanager): update the api https://togithub.com/googleapis/google-api-python-client/commit/c001da44e765c0c66763be6f48fc6bc32c468a6e feat(serviceconsumermanagement): update the api https://togithub.com/googleapis/google-api-python-client/commit/792350b6ca6656b6be0cb3b539e9f2674614dc54 feat(servicecontrol): update the api https://togithub.com/googleapis/google-api-python-client/commit/7f793a39deda23d2e200d4968e7fecb6a6306da4 feat(servicemanagement): update the api https://togithub.com/googleapis/google-api-python-client/commit/f63b814b3fbd0784a76eca6ef3cacbd238fd3fae feat(serviceusage): update the api https://togithub.com/googleapis/google-api-python-client/commit/e554613ff01c5ec33b632f2d7824ed140344e7a7 feat(sheets): update the api https://togithub.com/googleapis/google-api-python-client/commit/de1a5c1a083ae768488e48421272392155706c13 feat(solar): update the api https://togithub.com/googleapis/google-api-python-client/commit/731579c329c4b647f358ce3105ee9767a073408c feat(spanner): update the api https://togithub.com/googleapis/google-api-python-client/commit/26b83e86f286d718d6651e66534c87c16906c8d5 feat(sts): update the api https://togithub.com/googleapis/google-api-python-client/commit/f976fc54e1981185e1e3fa2d4a5124c4e67ba451 feat(youtube): update the api https://togithub.com/googleapis/google-api-python-client/commit/b1b4c54fb01af138dda617c95ef4e4bce5eafb91 --- ...1.organizations.gcpUserAccessBindings.html | 24 + .../admin_directory_v1.chromeosdevices.html | 36 +- ...tform_v1.projects.locations.endpoints.html | 8 + ...m_v1.projects.locations.featureGroups.html | 2 +- ..._v1.projects.locations.indexEndpoints.html | 36 +- ...latform_v1.projects.locations.indexes.html | 22 +- ...ts.locations.notebookRuntimeTemplates.html | 6 +- ....projects.locations.publishers.models.html | 8 + ...form_v1.projects.locations.tuningJobs.html | 40 +- docs/dyn/aiplatform_v1.publishers.models.html | 15 + docs/dyn/aiplatform_v1beta1.html | 5 + docs/dyn/aiplatform_v1beta1.media.html | 4 +- ...orm_v1beta1.projects.locations.agents.html | 91 + ....projects.locations.agents.operations.html | 268 + ...tform_v1beta1.projects.locations.apps.html | 91 + ...a1.projects.locations.apps.operations.html | 268 + ..._v1beta1.projects.locations.endpoints.html | 28 +- ...v1beta1.projects.locations.extensions.html | 20 +- ...eta1.projects.locations.featureGroups.html | 2 +- ...aiplatform_v1beta1.projects.locations.html | 64 + ...ta1.projects.locations.indexEndpoints.html | 36 +- ...rm_v1beta1.projects.locations.indexes.html | 22 +- ...eta1.projects.locations.modelMonitors.html | 4 - ...jects.locations.notebookExecutionJobs.html | 40 + ...ts.locations.notebookRuntimeTemplates.html | 6 +- ....projects.locations.publishers.models.html | 28 +- ...v1beta1.projects.locations.ragCorpora.html | 164 + ...rojects.locations.ragCorpora.ragFiles.html | 211 + ..._v1beta1.projects.locations.schedules.html | 324 + .../aiplatform_v1beta1.publishers.models.html | 30 + ...lloydb_v1.projects.locations.clusters.html | 18 + docs/dyn/artifactregistry_v1.html | 5 - ...projects.locations.repositories.files.html | 45 +- ...ry_v1.projects.locations.repositories.html | 27 +- ...ution_v2.projects.locations.instances.html | 137 + .../dyn/batch_v1.projects.locations.jobs.html | 8 +- ...jects.locations.jobs.taskGroups.tasks.html | 4 +- .../batch_v1.projects.locations.state.html | 4 +- ..._v1.projects.locations.appConnections.html | 10 + ...orp_v1.projects.locations.appGateways.html | 6 + ...organizations.locations.subscriptions.html | 31 +- ...pha.projects.locations.appConnections.html | 10 + ...1alpha.projects.locations.appGateways.html | 6 + docs/dyn/bigquery_v2.tables.html | 14 +- .../bigtableadmin_v2.projects.instances.html | 6 + ...ects.instances.tables.authorizedViews.html | 2 +- docs/dyn/cloudbuild_v1.projects.builds.html | 40 + ...oudbuild_v1.projects.locations.builds.html | 40 + ...dbuild_v1.projects.locations.triggers.html | 60 + docs/dyn/cloudbuild_v1.projects.triggers.html | 60 + ....locations.deliveryPipelines.releases.html | 12 +- ...ddeploy_v1.projects.locations.targets.html | 16 +- docs/dyn/cloudkms_v1.folders.html | 141 + docs/dyn/cloudkms_v1.html | 5 + docs/dyn/cloudkms_v1.projects.html | 22 + docs/dyn/cloudkms_v1.projects.locations.html | 10 + ...dkms_v1.projects.locations.keyHandles.html | 187 + ...dkms_v1.projects.locations.operations.html | 124 + docs/dyn/cloudsupport_v2beta.cases.html | 7 +- docs/dyn/compute_beta.healthChecks.html | 48 +- docs/dyn/compute_beta.instanceTemplates.html | 16 +- docs/dyn/compute_beta.instances.html | 32 +- docs/dyn/compute_beta.machineImages.html | 24 +- ...ute_beta.organizationSecurityPolicies.html | 53 + docs/dyn/compute_beta.regionHealthChecks.html | 40 +- .../compute_beta.regionInstanceTemplates.html | 12 +- docs/dyn/compute_beta.regionInstances.html | 4 +- .../compute_beta.regionSecurityPolicies.html | 32 + docs/dyn/compute_beta.securityPolicies.html | 40 + docs/dyn/compute_beta.snapshotSettings.html | 8 +- ...fig_v1.projects.locations.deployments.html | 8 +- ...config_v1.projects.locations.previews.html | 15 + docs/dyn/dataflow_v1b3.projects.jobs.html | 56 +- ...dataflow_v1b3.projects.jobs.workItems.html | 10 + ...v1b3.projects.locations.flexTemplates.html | 8 +- ...dataflow_v1b3.projects.locations.jobs.html | 48 +- ...1b3.projects.locations.jobs.workItems.html | 10 + ...low_v1b3.projects.locations.templates.html | 16 +- .../dyn/dataflow_v1b3.projects.templates.html | 16 +- ...tions.repositories.compilationResults.html | 12 + ...beta1.projects.locations.repositories.html | 27 +- ...ions.repositories.workflowInvocations.html | 12 + ...cts.locations.repositories.workspaces.html | 12 + ...ects.locations.collections.dataStores.html | 2 +- ...collections.dataStores.servingConfigs.html | 2 +- ...llections.dataStores.siteSearchEngine.html | 1 + ...taStores.siteSearchEngine.targetSites.html | 5 + ...ns.collections.engines.servingConfigs.html | 2 +- ...gine_v1.projects.locations.dataStores.html | 2 +- ...s.locations.dataStores.servingConfigs.html | 2 +- ...taStores.siteSearchEngine.targetSites.html | 5 + docs/dyn/discoveryengine_v1alpha.html | 5 - ...ections.dataStores.branches.documents.html | 3 +- ....collections.dataStores.conversations.html | 1 - ...s.collections.dataStores.customModels.html | 117 + ...ects.locations.collections.dataStores.html | 8 +- ...collections.dataStores.servingConfigs.html | 3 +- ...llections.dataStores.siteSearchEngine.html | 1 + ...taStores.siteSearchEngine.targetSites.html | 5 + ...ons.collections.engines.conversations.html | 1 - ...ns.collections.engines.servingConfigs.html | 3 +- ...cations.dataStores.branches.documents.html | 3 +- ...ts.locations.dataStores.conversations.html | 1 - ...v1alpha.projects.locations.dataStores.html | 2 +- ...s.locations.dataStores.servingConfigs.html | 3 +- ...taStores.siteSearchEngine.targetSites.html | 5 + ...a.projects.locations.groundingConfigs.html | 7 +- ...s.collections.dataStores.customModels.html | 117 + ...ects.locations.collections.dataStores.html | 8 +- ...collections.dataStores.servingConfigs.html | 2 +- ...llections.dataStores.siteSearchEngine.html | 1 + ...taStores.siteSearchEngine.targetSites.html | 5 + ...ns.collections.engines.servingConfigs.html | 2 +- ..._v1beta.projects.locations.dataStores.html | 2 +- ...s.locations.dataStores.servingConfigs.html | 2 +- ...taStores.siteSearchEngine.targetSites.html | 5 + ...a.projects.locations.groundingConfigs.html | 7 +- docs/dyn/dlp_v2.infoTypes.html | 2 +- docs/dyn/dlp_v2.locations.infoTypes.html | 2 +- ...projects.locations.processors.dataset.html | 3 + ...v1beta3.projects.locations.processors.html | 9 + ...ocations.processors.humanReviewConfig.html | 6 + ...ocations.processors.processorVersions.html | 9 + .../dyn/doubleclickbidmanager_v2.queries.html | 210 - ...bleclickbidmanager_v2.queries.reports.html | 84 - ...appcheck_v1.projects.apps.debugTokens.html | 6 + ...heck_v1beta.projects.apps.debugTokens.html | 6 + ....projects.locations.publishers.models.html | 2 + ...gkehub_v1.projects.locations.features.html | 40 + ...b_v1alpha.projects.locations.features.html | 36 + ...ub_v1beta.projects.locations.features.html | 40 + docs/dyn/iap_v1.v1.html | 39 + docs/dyn/index.md | 1 - docs/dyn/logging_v2.entries.html | 4 +- ...er_v1alpha1.projects.locations.assets.html | 411 +- ...rojects.locations.sources.errorFrames.html | 114 +- .../monitoring_v1.projects.dashboards.html | 6 + ...toring_v3.projects.uptimeCheckConfigs.html | 12 +- ...jects.locations.scans.vulnerabilities.html | 5 + ...jects.locations.scans.vulnerabilities.html | 5 + ...ations.caPools.certificateAuthorities.html | 8 + ...illingAccounts.locations.insightTypes.html | 8 +- ...illingAccounts.locations.recommenders.html | 8 +- ....organizations.locations.insightTypes.html | 8 +- ....organizations.locations.recommenders.html | 8 +- ...er_v1.projects.locations.insightTypes.html | 8 +- ...er_v1.projects.locations.recommenders.html | 8 +- ...illingAccounts.locations.insightTypes.html | 8 +- ...illingAccounts.locations.recommenders.html | 8 +- ....organizations.locations.insightTypes.html | 8 +- ....organizations.locations.recommenders.html | 8 +- ...beta1.projects.locations.insightTypes.html | 8 +- ...beta1.projects.locations.recommenders.html | 8 +- ...retail_v2.projects.locations.catalogs.html | 5 +- ...il_v2beta.projects.locations.catalogs.html | 5 +- .../dyn/run_v1.namespaces.configurations.html | 6 + docs/dyn/run_v1.namespaces.revisions.html | 6 + docs/dyn/run_v1.namespaces.services.html | 18 + ..._v1.projects.locations.configurations.html | 6 + .../run_v1.projects.locations.revisions.html | 6 + .../run_v1.projects.locations.services.html | 18 + .../run_v2.projects.locations.services.html | 12 + ...projects.locations.services.revisions.html | 6 + ...servicemanagement_v1.services.configs.html | 8 +- docs/dyn/servicemanagement_v1.services.html | 2 +- docs/dyn/serviceusage_v1.services.html | 6 +- docs/dyn/serviceusage_v1beta1.services.html | 4 +- docs/dyn/sheets_v4.spreadsheets.html | 56 + docs/dyn/solar_v1.dataLayers.html | 5 +- ...spanner_v1.projects.instances.backups.html | 83 +- ...anner_v1.projects.instances.databases.html | 15 + ...projects.instances.databases.sessions.html | 32 +- docs/dyn/sqladmin_v1.instances.html | 130 +- docs/dyn/sqladmin_v1.projects.instances.html | 6 +- docs/dyn/sqladmin_v1beta4.instances.html | 110 +- .../sqladmin_v1beta4.projects.instances.html | 6 +- docs/dyn/storagetransfer_v1.transferJobs.html | 6 + docs/dyn/walletobjects_v1.genericobject.html | 18 +- ...orkstationClusters.workstationConfigs.html | 20 +- ...orkstationClusters.workstationConfigs.html | 20 +- docs/dyn/youtube_v3.youtube.v3.html | 5 + docs/dyn/youtube_v3.youtube.v3.liveChat.html | 91 + ...utube_v3.youtube.v3.liveChat.messages.html | 205 + .../acceleratedmobilepageurl.v1.json | 2 +- .../documents/accessapproval.v1.json | 4 +- .../documents/accesscontextmanager.v1.json | 24 +- .../discovery_cache/documents/acmedns.v1.json | 2 +- .../documents/addressvalidation.v1.json | 2 +- .../documents/admin.datatransfer_v1.json | 2 +- .../documents/admin.directory_v1.json | 25 +- .../documents/admin.reports_v1.json | 2 +- .../discovery_cache/documents/admob.v1.json | 2 +- .../documents/admob.v1beta.json | 2 +- .../discovery_cache/documents/adsense.v2.json | 2 +- .../documents/aiplatform.v1.json | 320 +- .../documents/aiplatform.v1beta1.json | 1564 +- .../documents/alertcenter.v1beta1.json | 2 +- .../discovery_cache/documents/alloydb.v1.json | 23 +- .../documents/alloydb.v1alpha.json | 5 +- .../documents/alloydb.v1beta.json | 5 +- .../documents/analyticsadmin.v1alpha.json | 2 +- .../documents/analyticsadmin.v1beta.json | 2 +- .../documents/analyticsdata.v1beta.json | 2 +- .../documents/analyticshub.v1.json | 2 +- .../documents/analyticshub.v1beta1.json | 2 +- .../androiddeviceprovisioning.v1.json | 2 +- .../documents/androidenterprise.v1.json | 2 +- .../documents/androidpublisher.v3.json | 2 +- .../documents/apigateway.v1.json | 2 +- .../documents/apigateway.v1beta.json | 2 +- .../discovery_cache/documents/apigee.v1.json | 2 +- .../discovery_cache/documents/apikeys.v2.json | 2 +- .../documents/appengine.v1.json | 2 +- .../documents/appengine.v1alpha.json | 2 +- .../documents/appengine.v1beta.json | 2 +- .../discovery_cache/documents/apphub.v1.json | 2 +- .../documents/apphub.v1alpha.json | 2 +- .../documents/area120tables.v1alpha1.json | 2 +- .../documents/artifactregistry.v1.json | 75 +- .../documents/artifactregistry.v1beta1.json | 2 +- .../documents/artifactregistry.v1beta2.json | 2 +- .../documents/assuredworkloads.v1beta1.json | 2 +- .../documents/baremetalsolution.v2.json | 120 +- .../discovery_cache/documents/batch.v1.json | 4 +- .../documents/beyondcorp.v1.json | 22 +- .../documents/beyondcorp.v1alpha.json | 67 +- .../discovery_cache/documents/biglake.v1.json | 2 +- .../documents/bigquery.v2.json | 4 +- .../documents/bigqueryconnection.v1.json | 2 +- .../documents/bigqueryconnection.v1beta1.json | 2 +- .../documents/bigquerydatapolicy.v1.json | 2 +- .../documents/bigquerydatatransfer.v1.json | 2 +- .../documents/bigqueryreservation.v1.json | 2 +- .../documents/bigtableadmin.v2.json | 11 +- .../documents/binaryauthorization.v1.json | 2 +- .../binaryauthorization.v1beta1.json | 2 +- .../documents/blockchainnodeengine.v1.json | 2 +- .../discovery_cache/documents/blogger.v2.json | 2 +- .../discovery_cache/documents/blogger.v3.json | 2 +- .../businessprofileperformance.v1.json | 2 +- .../documents/calendar.v3.json | 2 +- .../discovery_cache/documents/chat.v1.json | 2 +- .../documents/checks.v1alpha.json | 2 +- .../documents/chromemanagement.v1.json | 2 +- .../documents/chromepolicy.v1.json | 2 +- .../documents/civicinfo.v2.json | 2 +- .../documents/classroom.v1.json | 2 +- .../documents/cloudasset.v1.json | 2 +- .../documents/cloudasset.v1beta1.json | 2 +- .../documents/cloudasset.v1p1beta1.json | 2 +- .../documents/cloudasset.v1p5beta1.json | 2 +- .../documents/cloudasset.v1p7beta1.json | 2 +- .../documents/cloudbuild.v1.json | 51 +- .../documents/cloudbuild.v2.json | 65 +- .../documents/cloudchannel.v1.json | 2 +- .../documents/cloudcontrolspartner.v1.json | 2 +- .../cloudcontrolspartner.v1beta.json | 2 +- .../documents/clouddeploy.v1.json | 6 +- .../documents/cloudfunctions.v1.json | 2 +- .../documents/cloudfunctions.v2.json | 2 +- .../documents/cloudfunctions.v2alpha.json | 2 +- .../documents/cloudfunctions.v2beta.json | 2 +- .../documents/cloudidentity.v1.json | 2 +- .../documents/cloudidentity.v1beta1.json | 2 +- .../documents/cloudkms.v1.json | 351 +- .../documents/cloudprofiler.v2.json | 2 +- .../cloudresourcemanager.v2beta1.json | 2 +- .../documents/cloudshell.v1.json | 2 +- .../documents/cloudsupport.v2.json | 2 +- .../documents/cloudsupport.v2beta.json | 14 +- .../documents/cloudtrace.v1.json | 2 +- .../documents/cloudtrace.v2.json | 2 +- .../documents/cloudtrace.v2beta1.json | 2 +- .../documents/composer.v1.json | 2 +- .../documents/composer.v1beta1.json | 2 +- .../documents/compute.beta.json | 45 +- .../discovery_cache/documents/config.v1.json | 20 +- .../documents/contactcenterinsights.v1.json | 2 +- .../documents/content.v2.1.json | 2 +- .../documents/contentwarehouse.v1.json | 151168 +-------------- .../documents/customsearch.v1.json | 2 +- .../documents/dataflow.v1b3.json | 67 +- .../documents/dataform.v1beta1.json | 50 +- .../documents/datamigration.v1.json | 4 +- .../documents/datamigration.v1beta1.json | 2 +- .../documents/dataplex.v1.json | 2 +- .../documents/dataportability.v1.json | 2 +- .../documents/dataportability.v1beta.json | 2 +- .../documents/dialogflow.v2.json | 2 +- .../documents/dialogflow.v2beta1.json | 2 +- .../documents/dialogflow.v3.json | 8 +- .../documents/dialogflow.v3beta1.json | 8 +- .../documents/digitalassetlinks.v1.json | 2 +- .../documents/discoveryengine.v1.json | 161 +- .../documents/discoveryengine.v1alpha.json | 846 +- .../documents/discoveryengine.v1beta.json | 212 +- .../documents/displayvideo.v2.json | 2 +- .../documents/displayvideo.v3.json | 2 +- .../discovery_cache/documents/dlp.v2.json | 6 +- .../discovery_cache/documents/dns.v1.json | 2 +- .../documents/dns.v1beta2.json | 2 +- .../discovery_cache/documents/docs.v1.json | 2 +- .../documents/documentai.v1.json | 2 +- .../documents/documentai.v1beta2.json | 2 +- .../documents/documentai.v1beta3.json | 9 +- .../documents/domainsrdap.v1.json | 2 +- .../documents/doubleclickbidmanager.v2.json | 165 +- .../documents/doubleclicksearch.v2.json | 2 +- .../discovery_cache/documents/drive.v2.json | 2 +- .../discovery_cache/documents/drive.v3.json | 2 +- .../documents/driveactivity.v2.json | 2 +- .../documents/drivelabels.v2.json | 2 +- .../documents/drivelabels.v2beta.json | 2 +- .../documents/essentialcontacts.v1.json | 2 +- .../documents/eventarc.v1.json | 2 +- .../discovery_cache/documents/fcm.v1.json | 2 +- .../documents/fcmdata.v1beta1.json | 2 +- .../documents/firebase.v1beta1.json | 2 +- .../documents/firebaseappcheck.v1.json | 8 +- .../documents/firebaseappcheck.v1beta.json | 8 +- .../documents/firebaseappdistribution.v1.json | 2 +- .../firebaseappdistribution.v1alpha.json | 2 +- .../documents/firebasedatabase.v1beta.json | 2 +- .../documents/firebasedynamiclinks.v1.json | 2 +- .../documents/firebasehosting.v1.json | 2 +- .../documents/firebasehosting.v1beta1.json | 2 +- .../documents/firebaseml.v1.json | 2 +- .../documents/firebaseml.v1beta2.json | 2 +- .../documents/firebaseml.v2beta.json | 19 +- .../documents/firebasestorage.v1beta.json | 2 +- .../discovery_cache/documents/fitness.v1.json | 2 +- .../discovery_cache/documents/games.v1.json | 2 +- .../gamesConfiguration.v1configuration.json | 2 +- .../gamesManagement.v1management.json | 2 +- .../documents/gkebackup.v1.json | 2 +- .../discovery_cache/documents/gkehub.v1.json | 75 +- .../documents/gkehub.v1alpha.json | 58 +- .../documents/gkehub.v1beta.json | 75 +- .../documents/gkehub.v1beta1.json | 2 +- .../documents/gkehub.v2alpha.json | 2 +- .../discovery_cache/documents/gmail.v1.json | 2 +- .../documents/gmailpostmastertools.v1.json | 2 +- .../gmailpostmastertools.v1beta1.json | 2 +- .../documents/groupsmigration.v1.json | 2 +- .../documents/homegraph.v1.json | 2 +- .../documents/iamcredentials.v1.json | 2 +- .../discovery_cache/documents/iap.v1.json | 59 +- .../documents/iap.v1beta1.json | 2 +- .../documents/identitytoolkit.v1.json | 2 +- .../documents/identitytoolkit.v2.json | 2 +- .../discovery_cache/documents/keep.v1.json | 2 +- .../documents/kgsearch.v1.json | 2 +- .../documents/kmsinventory.v1.json | 2 +- .../documents/language.v1.json | 2 +- .../documents/language.v1beta2.json | 2 +- .../documents/language.v2.json | 2 +- .../documents/libraryagent.v1.json | 2 +- .../documents/licensing.v1.json | 2 +- .../documents/lifesciences.v2beta.json | 2 +- .../documents/localservices.v1.json | 2 +- .../discovery_cache/documents/logging.v2.json | 6 +- .../discovery_cache/documents/looker.v1.json | 2 +- .../marketingplatformadmin.v1alpha.json | 2 +- .../documents/metastore.v1.json | 2 +- .../documents/metastore.v1alpha.json | 2 +- .../documents/metastore.v1beta.json | 2 +- .../documents/migrationcenter.v1.json | 2 +- .../documents/migrationcenter.v1alpha1.json | 291 +- .../discovery_cache/documents/ml.v1.json | 2 +- .../documents/monitoring.v1.json | 6 +- .../documents/monitoring.v3.json | 4 +- .../mybusinessaccountmanagement.v1.json | 2 +- .../mybusinessbusinessinformation.v1.json | 2 +- .../documents/mybusinesslodging.v1.json | 2 +- .../documents/mybusinessnotifications.v1.json | 2 +- .../documents/mybusinessplaceactions.v1.json | 2 +- .../documents/mybusinessqanda.v1.json | 2 +- .../documents/mybusinessverifications.v1.json | 2 +- .../documents/networkconnectivity.v1.json | 2 +- .../networkconnectivity.v1alpha1.json | 2 +- .../documents/networkmanagement.v1.json | 6 +- .../documents/networkmanagement.v1beta1.json | 6 +- .../documents/notebooks.v1.json | 2 +- .../documents/notebooks.v2.json | 2 +- .../documents/ondemandscanning.v1.json | 25 +- .../documents/ondemandscanning.v1beta1.json | 25 +- .../documents/orgpolicy.v2.json | 2 +- .../documents/osconfig.v1.json | 2 +- .../documents/osconfig.v1alpha.json | 2 +- .../documents/osconfig.v1beta.json | 2 +- .../discovery_cache/documents/oslogin.v1.json | 2 +- .../documents/oslogin.v1alpha.json | 2 +- .../documents/oslogin.v1beta.json | 2 +- .../documents/pagespeedonline.v5.json | 2 +- .../paymentsresellersubscription.v1.json | 2 +- .../discovery_cache/documents/people.v1.json | 2 +- .../discovery_cache/documents/places.v1.json | 2 +- .../documents/playcustomapp.v1.json | 2 +- .../playdeveloperreporting.v1alpha1.json | 2 +- .../playdeveloperreporting.v1beta1.json | 2 +- .../documents/playgrouping.v1alpha1.json | 2 +- .../documents/playintegrity.v1.json | 2 +- .../documents/policyanalyzer.v1.json | 2 +- .../documents/policyanalyzer.v1beta1.json | 2 +- .../documents/policysimulator.v1.json | 2 +- .../documents/policysimulator.v1alpha.json | 2 +- .../documents/policysimulator.v1beta.json | 2 +- .../documents/privateca.v1.json | 12 +- .../documents/privateca.v1beta1.json | 2 +- .../documents/prod_tt_sasportal.v1alpha1.json | 2 +- .../documents/publicca.v1.json | 2 +- .../documents/publicca.v1alpha1.json | 2 +- .../documents/publicca.v1beta1.json | 2 +- .../discovery_cache/documents/pubsub.v1.json | 2 +- .../documents/pubsub.v1beta1a.json | 2 +- .../documents/pubsub.v1beta2.json | 2 +- .../documents/pubsublite.v1.json | 2 +- .../rapidmigrationassessment.v1.json | 2 +- .../readerrevenuesubscriptionlinking.v1.json | 2 +- .../documents/recaptchaenterprise.v1.json | 2 +- .../documents/recommender.v1.json | 18 +- .../documents/recommender.v1beta1.json | 18 +- .../discovery_cache/documents/redis.v1.json | 5 +- .../documents/redis.v1beta1.json | 5 +- .../documents/reseller.v1.json | 2 +- .../documents/resourcesettings.v1.json | 2 +- .../discovery_cache/documents/retail.v2.json | 7 +- .../documents/retail.v2alpha.json | 2 +- .../documents/retail.v2beta.json | 7 +- .../discovery_cache/documents/run.v1.json | 58 +- .../discovery_cache/documents/run.v2.json | 70 +- .../documents/safebrowsing.v4.json | 2 +- .../documents/safebrowsing.v5.json | 2 +- .../documents/sasportal.v1alpha1.json | 2 +- .../discovery_cache/documents/script.v1.json | 2 +- .../documents/searchconsole.v1.json | 2 +- .../documents/secretmanager.v1.json | 12 +- .../documents/secretmanager.v1beta1.json | 12 +- .../documents/secretmanager.v1beta2.json | 12 +- .../serviceconsumermanagement.v1.json | 5 +- .../serviceconsumermanagement.v1beta1.json | 5 +- .../documents/servicecontrol.v1.json | 61 +- .../documents/servicecontrol.v2.json | 61 +- .../documents/servicedirectory.v1.json | 2 +- .../documents/servicedirectory.v1beta1.json | 2 +- .../documents/servicemanagement.v1.json | 5 +- .../documents/servicenetworking.v1.json | 2 +- .../documents/servicenetworking.v1beta.json | 2 +- .../documents/serviceusage.v1.json | 5 +- .../documents/serviceusage.v1beta1.json | 5 +- .../discovery_cache/documents/sheets.v4.json | 99 +- .../discovery_cache/documents/slides.v1.json | 2 +- .../documents/smartdevicemanagement.v1.json | 2 +- .../discovery_cache/documents/solar.v1.json | 7 +- .../documents/sourcerepo.v1.json | 2 +- .../discovery_cache/documents/spanner.v1.json | 39 +- .../discovery_cache/documents/speech.v1.json | 2 +- .../documents/speech.v1p1beta1.json | 2 +- .../documents/sqladmin.v1.json | 68 +- .../documents/sqladmin.v1beta4.json | 62 +- .../discovery_cache/documents/storage.v1.json | 4 +- .../documents/streetviewpublish.v1.json | 2 +- .../discovery_cache/documents/sts.v1.json | 16 +- .../discovery_cache/documents/sts.v1beta.json | 16 +- .../documents/tagmanager.v1.json | 2 +- .../documents/tagmanager.v2.json | 2 +- .../discovery_cache/documents/tasks.v1.json | 2 +- .../discovery_cache/documents/testing.v1.json | 2 +- .../documents/texttospeech.v1.json | 2 +- .../documents/texttospeech.v1beta1.json | 2 +- .../documents/toolresults.v1beta3.json | 2 +- .../discovery_cache/documents/tpu.v1.json | 2 +- .../documents/tpu.v1alpha1.json | 2 +- .../discovery_cache/documents/tpu.v2.json | 2 +- .../documents/tpu.v2alpha1.json | 2 +- .../documents/trafficdirector.v2.json | 2 +- .../documents/trafficdirector.v3.json | 2 +- .../documents/transcoder.v1.json | 2 +- .../documents/travelimpactmodel.v1.json | 2 +- .../discovery_cache/documents/vault.v1.json | 2 +- .../documents/versionhistory.v1.json | 2 +- .../discovery_cache/documents/vision.v1.json | 2 +- .../documents/vmwareengine.v1.json | 2 +- .../documents/walletobjects.v1.json | 4 +- .../documents/workflowexecutions.v1.json | 2 +- .../documents/workflowexecutions.v1beta.json | 2 +- .../documents/workflows.v1.json | 2 +- .../documents/workflows.v1beta.json | 2 +- .../documents/workspaceevents.v1.json | 4 +- .../documents/workstations.v1.json | 6 +- .../documents/workstations.v1beta.json | 6 +- .../discovery_cache/documents/youtube.v3.json | 43 +- .../documents/youtubeAnalytics.v2.json | 2 +- .../documents/youtubereporting.v1.json | 2 +- 494 files changed, 12856 insertions(+), 150492 deletions(-) create mode 100644 docs/dyn/aiplatform_v1beta1.projects.locations.agents.html create mode 100644 docs/dyn/aiplatform_v1beta1.projects.locations.agents.operations.html create mode 100644 docs/dyn/aiplatform_v1beta1.projects.locations.apps.html create mode 100644 docs/dyn/aiplatform_v1beta1.projects.locations.apps.operations.html create mode 100644 docs/dyn/cloudkms_v1.folders.html create mode 100644 docs/dyn/cloudkms_v1.projects.locations.keyHandles.html create mode 100644 docs/dyn/cloudkms_v1.projects.locations.operations.html create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html create mode 100644 docs/dyn/youtube_v3.youtube.v3.liveChat.html create mode 100644 docs/dyn/youtube_v3.youtube.v3.liveChat.messages.html diff --git a/docs/dyn/accesscontextmanager_v1.organizations.gcpUserAccessBindings.html b/docs/dyn/accesscontextmanager_v1.organizations.gcpUserAccessBindings.html index dbbc57dd258..14431a610a6 100644 --- a/docs/dyn/accesscontextmanager_v1.organizations.gcpUserAccessBindings.html +++ b/docs/dyn/accesscontextmanager_v1.organizations.gcpUserAccessBindings.html @@ -119,6 +119,12 @@

Method Details

], "groupKey": "A String", # Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the [G Suite Directory API's Groups resource] (https://developers.google.com/admin-sdk/directory/v1/reference/groups#resource). If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht" "name": "A String", # Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by [RFC 3986 Section 2.3](https://tools.ietf.org/html/rfc3986#section-2.3)). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N" + "restrictedClientApplications": [ # Optional. A list of applications that are subject to this binding's restrictions. If the list is empty, the binding restrictions will universally apply to all applications. + { # An application that accesses Google Cloud APIs. + "clientId": "A String", # The OAuth client ID of the application. + "name": "A String", # The name of the application. Example: "Cloud Console" + }, + ], } x__xgafv: string, V1 error format. @@ -208,6 +214,12 @@

Method Details

], "groupKey": "A String", # Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the [G Suite Directory API's Groups resource] (https://developers.google.com/admin-sdk/directory/v1/reference/groups#resource). If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht" "name": "A String", # Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by [RFC 3986 Section 2.3](https://tools.ietf.org/html/rfc3986#section-2.3)). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N" + "restrictedClientApplications": [ # Optional. A list of applications that are subject to this binding's restrictions. If the list is empty, the binding restrictions will universally apply to all applications. + { # An application that accesses Google Cloud APIs. + "clientId": "A String", # The OAuth client ID of the application. + "name": "A String", # The name of the application. Example: "Cloud Console" + }, + ], } @@ -238,6 +250,12 @@

Method Details

], "groupKey": "A String", # Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the [G Suite Directory API's Groups resource] (https://developers.google.com/admin-sdk/directory/v1/reference/groups#resource). If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht" "name": "A String", # Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by [RFC 3986 Section 2.3](https://tools.ietf.org/html/rfc3986#section-2.3)). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N" + "restrictedClientApplications": [ # Optional. A list of applications that are subject to this binding's restrictions. If the list is empty, the binding restrictions will universally apply to all applications. + { # An application that accesses Google Cloud APIs. + "clientId": "A String", # The OAuth client ID of the application. + "name": "A String", # The name of the application. Example: "Cloud Console" + }, + ], }, ], "nextPageToken": "A String", # Token to get the next page of items. If blank, there are no more items. @@ -276,6 +294,12 @@

Method Details

], "groupKey": "A String", # Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the [G Suite Directory API's Groups resource] (https://developers.google.com/admin-sdk/directory/v1/reference/groups#resource). If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht" "name": "A String", # Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by [RFC 3986 Section 2.3](https://tools.ietf.org/html/rfc3986#section-2.3)). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N" + "restrictedClientApplications": [ # Optional. A list of applications that are subject to this binding's restrictions. If the list is empty, the binding restrictions will universally apply to all applications. + { # An application that accesses Google Cloud APIs. + "clientId": "A String", # The OAuth client ID of the application. + "name": "A String", # The name of the application. Example: "Cloud Console" + }, + ], } updateMask: string, Required. Only the fields specified in this mask are updated. Because name and group_key cannot be changed, update_mask is required and may only contain the following fields: `access_levels`, `dry_run_access_levels`. update_mask { paths: "access_levels" } diff --git a/docs/dyn/admin_directory_v1.chromeosdevices.html b/docs/dyn/admin_directory_v1.chromeosdevices.html index a671faaf7c7..0f8eebd7e08 100644 --- a/docs/dyn/admin_directory_v1.chromeosdevices.html +++ b/docs/dyn/admin_directory_v1.chromeosdevices.html @@ -155,7 +155,8 @@

Method Details

"annotatedAssetId": "A String", # The asset identifier as noted by an administrator or specified during enrollment. "annotatedLocation": "A String", # The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed. "annotatedUser": "A String", # The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed. - "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support + "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use "autoUpdateThrough" instead. + "autoUpdateThrough": "A String", # Output only. The timestamp after which the device will stop receiving Chrome updates or support. "backlightInfo": [ # Output only. Contains backlight information for the device. { # Information about the device's backlights. "brightness": 42, # Output only. Current brightness of the backlight, between 0 and max_brightness. @@ -224,6 +225,9 @@

Method Details

"etag": "A String", # ETag of the resource. "ethernetMacAddress": "A String", # The device's MAC address on the ethernet network interface. "ethernetMacAddress0": "A String", # (Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices. + "extendedSupportEligible": True or False, # Output only. Whether or not the device requires the extended support opt in. + "extendedSupportEnabled": True or False, # Output only. Whether extended support policy is enabled on the device. + "extendedSupportStart": "A String", # Output only. Date of the device when extended support policy for automatic updates starts. "firmwareVersion": "A String", # The Chrome device's firmware version. "firstEnrollmentTime": "A String", # Date and time for the first time the device was enrolled. "kind": "admin#directory#chromeosdevice", # The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`. @@ -339,7 +343,8 @@

Method Details

"annotatedAssetId": "A String", # The asset identifier as noted by an administrator or specified during enrollment. "annotatedLocation": "A String", # The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed. "annotatedUser": "A String", # The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed. - "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support + "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use "autoUpdateThrough" instead. + "autoUpdateThrough": "A String", # Output only. The timestamp after which the device will stop receiving Chrome updates or support. "backlightInfo": [ # Output only. Contains backlight information for the device. { # Information about the device's backlights. "brightness": 42, # Output only. Current brightness of the backlight, between 0 and max_brightness. @@ -408,6 +413,9 @@

Method Details

"etag": "A String", # ETag of the resource. "ethernetMacAddress": "A String", # The device's MAC address on the ethernet network interface. "ethernetMacAddress0": "A String", # (Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices. + "extendedSupportEligible": True or False, # Output only. Whether or not the device requires the extended support opt in. + "extendedSupportEnabled": True or False, # Output only. Whether extended support policy is enabled on the device. + "extendedSupportStart": "A String", # Output only. Date of the device when extended support policy for automatic updates starts. "firmwareVersion": "A String", # The Chrome device's firmware version. "firstEnrollmentTime": "A String", # Date and time for the first time the device was enrolled. "kind": "admin#directory#chromeosdevice", # The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`. @@ -538,7 +546,8 @@

Method Details

"annotatedAssetId": "A String", # The asset identifier as noted by an administrator or specified during enrollment. "annotatedLocation": "A String", # The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed. "annotatedUser": "A String", # The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed. - "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support + "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use "autoUpdateThrough" instead. + "autoUpdateThrough": "A String", # Output only. The timestamp after which the device will stop receiving Chrome updates or support. "backlightInfo": [ # Output only. Contains backlight information for the device. { # Information about the device's backlights. "brightness": 42, # Output only. Current brightness of the backlight, between 0 and max_brightness. @@ -607,6 +616,9 @@

Method Details

"etag": "A String", # ETag of the resource. "ethernetMacAddress": "A String", # The device's MAC address on the ethernet network interface. "ethernetMacAddress0": "A String", # (Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices. + "extendedSupportEligible": True or False, # Output only. Whether or not the device requires the extended support opt in. + "extendedSupportEnabled": True or False, # Output only. Whether extended support policy is enabled on the device. + "extendedSupportStart": "A String", # Output only. Date of the device when extended support policy for automatic updates starts. "firmwareVersion": "A String", # The Chrome device's firmware version. "firstEnrollmentTime": "A String", # Date and time for the first time the device was enrolled. "kind": "admin#directory#chromeosdevice", # The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`. @@ -696,7 +708,8 @@

Method Details

"annotatedAssetId": "A String", # The asset identifier as noted by an administrator or specified during enrollment. "annotatedLocation": "A String", # The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed. "annotatedUser": "A String", # The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed. - "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support + "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use "autoUpdateThrough" instead. + "autoUpdateThrough": "A String", # Output only. The timestamp after which the device will stop receiving Chrome updates or support. "backlightInfo": [ # Output only. Contains backlight information for the device. { # Information about the device's backlights. "brightness": 42, # Output only. Current brightness of the backlight, between 0 and max_brightness. @@ -765,6 +778,9 @@

Method Details

"etag": "A String", # ETag of the resource. "ethernetMacAddress": "A String", # The device's MAC address on the ethernet network interface. "ethernetMacAddress0": "A String", # (Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices. + "extendedSupportEligible": True or False, # Output only. Whether or not the device requires the extended support opt in. + "extendedSupportEnabled": True or False, # Output only. Whether extended support policy is enabled on the device. + "extendedSupportStart": "A String", # Output only. Date of the device when extended support policy for automatic updates starts. "firmwareVersion": "A String", # The Chrome device's firmware version. "firstEnrollmentTime": "A String", # Date and time for the first time the device was enrolled. "kind": "admin#directory#chromeosdevice", # The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`. @@ -853,7 +869,8 @@

Method Details

"annotatedAssetId": "A String", # The asset identifier as noted by an administrator or specified during enrollment. "annotatedLocation": "A String", # The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed. "annotatedUser": "A String", # The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed. - "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support + "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use "autoUpdateThrough" instead. + "autoUpdateThrough": "A String", # Output only. The timestamp after which the device will stop receiving Chrome updates or support. "backlightInfo": [ # Output only. Contains backlight information for the device. { # Information about the device's backlights. "brightness": 42, # Output only. Current brightness of the backlight, between 0 and max_brightness. @@ -922,6 +939,9 @@

Method Details

"etag": "A String", # ETag of the resource. "ethernetMacAddress": "A String", # The device's MAC address on the ethernet network interface. "ethernetMacAddress0": "A String", # (Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices. + "extendedSupportEligible": True or False, # Output only. Whether or not the device requires the extended support opt in. + "extendedSupportEnabled": True or False, # Output only. Whether extended support policy is enabled on the device. + "extendedSupportStart": "A String", # Output only. Date of the device when extended support policy for automatic updates starts. "firmwareVersion": "A String", # The Chrome device's firmware version. "firstEnrollmentTime": "A String", # Date and time for the first time the device was enrolled. "kind": "admin#directory#chromeosdevice", # The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`. @@ -1011,7 +1031,8 @@

Method Details

"annotatedAssetId": "A String", # The asset identifier as noted by an administrator or specified during enrollment. "annotatedLocation": "A String", # The address or location of the device as noted by the administrator. Maximum length is `200` characters. Empty values are allowed. "annotatedUser": "A String", # The user of the device as noted by the administrator. Maximum length is 100 characters. Empty values are allowed. - "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support + "autoUpdateExpiration": "A String", # (Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use "autoUpdateThrough" instead. + "autoUpdateThrough": "A String", # Output only. The timestamp after which the device will stop receiving Chrome updates or support. "backlightInfo": [ # Output only. Contains backlight information for the device. { # Information about the device's backlights. "brightness": 42, # Output only. Current brightness of the backlight, between 0 and max_brightness. @@ -1080,6 +1101,9 @@

Method Details

"etag": "A String", # ETag of the resource. "ethernetMacAddress": "A String", # The device's MAC address on the ethernet network interface. "ethernetMacAddress0": "A String", # (Read-only) MAC address used by the Chromebook’s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices. + "extendedSupportEligible": True or False, # Output only. Whether or not the device requires the extended support opt in. + "extendedSupportEnabled": True or False, # Output only. Whether extended support policy is enabled on the device. + "extendedSupportStart": "A String", # Output only. Date of the device when extended support policy for automatic updates starts. "firmwareVersion": "A String", # The Chrome device's firmware version. "firstEnrollmentTime": "A String", # Date and time for the first time the device was enrolled. "kind": "admin#directory#chromeosdevice", # The type of resource. For the Chromeosdevices resource, the value is `admin#directory#chromeosdevice`. diff --git a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html index 384f947856f..e1c683d47b8 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html @@ -1259,6 +1259,10 @@

Method Details

"finishMessage": "A String", # Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. "groundingMetadata": { # Metadata returned to client when grounding is enabled. # Output only. Metadata specifies sources used to ground generated content. + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -2755,6 +2759,10 @@

Method Details

"finishMessage": "A String", # Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. "groundingMetadata": { # Metadata returned to client when grounding is enabled. # Output only. Metadata specifies sources used to ground generated content. + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.featureGroups.html b/docs/dyn/aiplatform_v1.projects.locations.featureGroups.html index 1dde4114bf0..9c7f5396d92 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.featureGroups.html +++ b/docs/dyn/aiplatform_v1.projects.locations.featureGroups.html @@ -116,7 +116,7 @@

Method Details

Creates a new FeatureGroup in a given project and location.
 
 Args:
-  parent: string, Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}'` (required)
+  parent: string, Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}` (required)
   body: object, The request body.
     The object takes the form of:
 
diff --git a/docs/dyn/aiplatform_v1.projects.locations.indexEndpoints.html b/docs/dyn/aiplatform_v1.projects.locations.indexEndpoints.html
index 9c9df7d2c88..32f76907faa 100644
--- a/docs/dyn/aiplatform_v1.projects.locations.indexEndpoints.html
+++ b/docs/dyn/aiplatform_v1.projects.locations.indexEndpoints.html
@@ -387,7 +387,7 @@ 

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -410,10 +410,21 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, "fractionLeafNodesToSearchOverride": 3.14, # The fraction of the number of leaves to search, set at query time allows user to tune search performance. This value increase result in both search accuracy and latency increase. The value should be between 0.0 and 1.0. If not set or set to 0.0, query uses the default value specified in NearestNeighborSearchConfig.TreeAHConfig.fraction_leaf_nodes_to_search. "neighborCount": 42, # The number of nearest neighbors to be retrieved from database for each query. If not set, will use the default from the service configuration (https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes#nearest-neighbor-search-config). "perCrowdingAttributeNeighborCount": 42, # Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. + "rrf": { # Parameters for RRF algorithm that combines search results. # Optional. Represents RRF algorithm that combines search results. + "alpha": 3.14, # Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. + }, }, ], "returnFullDatapoint": True or False, # If set to true, the full datapoints (including all vector values and restricts) of the nearest neighbors are returned. Note that returning full datapoint will significantly increase the latency and cost of the query. @@ -438,7 +449,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -461,8 +472,17 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, - "distance": 3.14, # The distance between the neighbor and the query vector. + "distance": 3.14, # The distance between the neighbor and the dense embedding query. + "sparseDistance": 3.14, # The distance between the neighbor and the query sparse_embedding. }, ], }, @@ -985,7 +1005,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -1008,6 +1028,14 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, ], }
diff --git a/docs/dyn/aiplatform_v1.projects.locations.indexes.html b/docs/dyn/aiplatform_v1.projects.locations.indexes.html index 9bb95ad6875..c5ec9a7dd28 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.indexes.html +++ b/docs/dyn/aiplatform_v1.projects.locations.indexes.html @@ -138,7 +138,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -245,7 +246,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -295,7 +297,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -351,7 +354,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -436,7 +440,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -459,6 +463,14 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, ], "updateMask": "A String", # Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts. diff --git a/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimeTemplates.html b/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimeTemplates.html index 7c44148bf5c..83eda7c6db2 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimeTemplates.html +++ b/docs/dyn/aiplatform_v1.projects.locations.notebookRuntimeTemplates.html @@ -143,7 +143,7 @@

Method Details

"machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). }, - "name": "A String", # Output only. The resource name of the NotebookRuntimeTemplate. + "name": "A String", # The resource name of the NotebookRuntimeTemplate. "networkSpec": { # Network spec. # Optional. Network spec. "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) @@ -273,7 +273,7 @@

Method Details

"machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). }, - "name": "A String", # Output only. The resource name of the NotebookRuntimeTemplate. + "name": "A String", # The resource name of the NotebookRuntimeTemplate. "networkSpec": { # Network spec. # Optional. Network spec. "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) @@ -382,7 +382,7 @@

Method Details

"machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). }, - "name": "A String", # Output only. The resource name of the NotebookRuntimeTemplate. + "name": "A String", # The resource name of the NotebookRuntimeTemplate. "networkSpec": { # Network spec. # Optional. Network spec. "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) diff --git a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html index 49fc654f990..676fec7de38 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html @@ -412,6 +412,10 @@

Method Details

"finishMessage": "A String", # Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. "groundingMetadata": { # Metadata returned to client when grounding is enabled. # Output only. Metadata specifies sources used to ground generated content. + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -919,6 +923,10 @@

Method Details

"finishMessage": "A String", # Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when `finish_reason` is set. "finishReason": "A String", # Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. "groundingMetadata": { # Metadata returned to client when grounding is enabled. # Output only. Metadata specifies sources used to ground generated content. + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html b/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html index 56c4db6bc38..1628f239617 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.html @@ -137,7 +137,7 @@

Method Details

The object takes the form of: { # Represents a TuningJob that runs with Google owned models. - "baseModel": "A String", # Model name for tuning, e.g., "gemini-1.0-pro-002". + "baseModel": "A String", # The base model that is being tuned, e.g., "gemini-1.0-pro-002". "createTime": "A String", # Output only. Time when the TuningJob was created. "description": "A String", # Optional. The description of the TuningJob. "endTime": "A String", # Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. @@ -160,11 +160,11 @@

Method Details

"supervisedTuningSpec": { # Tuning Spec for Supervised Tuning. # Tuning Spec for Supervised Fine Tuning. "hyperParameters": { # Hyperparameters for SFT. # Optional. Hyperparameters for SFT. "adapterSize": "A String", # Optional. Adapter size for tuning. - "epochCount": "A String", # Optional. Number of training epoches for this tuning job. - "learningRateMultiplier": 3.14, # Optional. Learning rate multiplier for tuning. + "epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. + "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. }, - "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. - "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. + "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. + "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. }, "tunedModel": { # The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. # Output only. The tuned model resources assiociated with this TuningJob. "endpoint": "A String", # Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`. @@ -273,7 +273,7 @@

Method Details

An object of the form: { # Represents a TuningJob that runs with Google owned models. - "baseModel": "A String", # Model name for tuning, e.g., "gemini-1.0-pro-002". + "baseModel": "A String", # The base model that is being tuned, e.g., "gemini-1.0-pro-002". "createTime": "A String", # Output only. Time when the TuningJob was created. "description": "A String", # Optional. The description of the TuningJob. "endTime": "A String", # Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. @@ -296,11 +296,11 @@

Method Details

"supervisedTuningSpec": { # Tuning Spec for Supervised Tuning. # Tuning Spec for Supervised Fine Tuning. "hyperParameters": { # Hyperparameters for SFT. # Optional. Hyperparameters for SFT. "adapterSize": "A String", # Optional. Adapter size for tuning. - "epochCount": "A String", # Optional. Number of training epoches for this tuning job. - "learningRateMultiplier": 3.14, # Optional. Learning rate multiplier for tuning. + "epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. + "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. }, - "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. - "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. + "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. + "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. }, "tunedModel": { # The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. # Output only. The tuned model resources assiociated with this TuningJob. "endpoint": "A String", # Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`. @@ -416,7 +416,7 @@

Method Details

An object of the form: { # Represents a TuningJob that runs with Google owned models. - "baseModel": "A String", # Model name for tuning, e.g., "gemini-1.0-pro-002". + "baseModel": "A String", # The base model that is being tuned, e.g., "gemini-1.0-pro-002". "createTime": "A String", # Output only. Time when the TuningJob was created. "description": "A String", # Optional. The description of the TuningJob. "endTime": "A String", # Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. @@ -439,11 +439,11 @@

Method Details

"supervisedTuningSpec": { # Tuning Spec for Supervised Tuning. # Tuning Spec for Supervised Fine Tuning. "hyperParameters": { # Hyperparameters for SFT. # Optional. Hyperparameters for SFT. "adapterSize": "A String", # Optional. Adapter size for tuning. - "epochCount": "A String", # Optional. Number of training epoches for this tuning job. - "learningRateMultiplier": 3.14, # Optional. Learning rate multiplier for tuning. + "epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. + "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. }, - "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. - "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. + "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. + "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. }, "tunedModel": { # The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. # Output only. The tuned model resources assiociated with this TuningJob. "endpoint": "A String", # Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`. @@ -565,7 +565,7 @@

Method Details

"nextPageToken": "A String", # A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page. "tuningJobs": [ # List of TuningJobs in the requested page. { # Represents a TuningJob that runs with Google owned models. - "baseModel": "A String", # Model name for tuning, e.g., "gemini-1.0-pro-002". + "baseModel": "A String", # The base model that is being tuned, e.g., "gemini-1.0-pro-002". "createTime": "A String", # Output only. Time when the TuningJob was created. "description": "A String", # Optional. The description of the TuningJob. "endTime": "A String", # Output only. Time when the TuningJob entered any of the following JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. @@ -588,11 +588,11 @@

Method Details

"supervisedTuningSpec": { # Tuning Spec for Supervised Tuning. # Tuning Spec for Supervised Fine Tuning. "hyperParameters": { # Hyperparameters for SFT. # Optional. Hyperparameters for SFT. "adapterSize": "A String", # Optional. Adapter size for tuning. - "epochCount": "A String", # Optional. Number of training epoches for this tuning job. - "learningRateMultiplier": 3.14, # Optional. Learning rate multiplier for tuning. + "epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. + "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. }, - "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. - "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. + "trainingDatasetUri": "A String", # Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file. + "validationDatasetUri": "A String", # Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file. }, "tunedModel": { # The Model Registry Model and Online Prediction Endpoint assiociated with this TuningJob. # Output only. The tuned model resources assiociated with this TuningJob. "endpoint": "A String", # Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`. diff --git a/docs/dyn/aiplatform_v1.publishers.models.html b/docs/dyn/aiplatform_v1.publishers.models.html index 7727c7007ee..083fd0a9f82 100644 --- a/docs/dyn/aiplatform_v1.publishers.models.html +++ b/docs/dyn/aiplatform_v1.publishers.models.html @@ -204,6 +204,7 @@

Method Details

"maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). "minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, + "deployTaskName": "A String", # Optional. The name of the deploy task (e.g., "text to image generation"). "largeModelReference": { # Contains information about the Large Model. # Optional. Large model reference. When this is set, model_artifact_spec is not needed. "name": "A String", # Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. }, @@ -217,6 +218,20 @@

Method Details

"A String", ], }, + "fineTune": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Fine tune the PublisherModel with the third-party model tuning UI. + "references": { # Required. + "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. + "resourceName": "A String", # The resource name of the Google Cloud resource. + "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. + }, + }, + "resourceDescription": "A String", # Optional. Description of the resource. + "resourceTitle": "A String", # Optional. Title of the resource. + "resourceUseCase": "A String", # Optional. Use case (CUJ) of the resource. + "title": "A String", # Required. + }, "openEvaluationPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open evaluation pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. diff --git a/docs/dyn/aiplatform_v1beta1.html b/docs/dyn/aiplatform_v1beta1.html index 909495b3453..554ada442ab 100644 --- a/docs/dyn/aiplatform_v1beta1.html +++ b/docs/dyn/aiplatform_v1beta1.html @@ -74,6 +74,11 @@

Vertex AI API

Instance Methods

+

+ media() +

+

Returns the media Resource.

+

projects()

diff --git a/docs/dyn/aiplatform_v1beta1.media.html b/docs/dyn/aiplatform_v1beta1.media.html index 86306402205..789f4350106 100644 --- a/docs/dyn/aiplatform_v1beta1.media.html +++ b/docs/dyn/aiplatform_v1beta1.media.html @@ -102,7 +102,7 @@

Method Details

"directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request. }, "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters. - "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now. + "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now. "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. "A String", ], @@ -154,7 +154,7 @@

Method Details

"directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request. }, "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters. - "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now. + "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now. "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.agents.html b/docs/dyn/aiplatform_v1beta1.projects.locations.agents.html new file mode 100644 index 00000000000..35b44aee3a4 --- /dev/null +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.agents.html @@ -0,0 +1,91 @@ + + + +

Vertex AI API . projects . locations . agents

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.agents.operations.html b/docs/dyn/aiplatform_v1beta1.projects.locations.agents.operations.html new file mode 100644 index 00000000000..268e58d96d0 --- /dev/null +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.agents.operations.html @@ -0,0 +1,268 @@ + + + +

Vertex AI API . projects . locations . agents . operations

+

Instance Methods

+

+ cancel(name, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

+ wait(name, timeout=None, x__xgafv=None)

+

Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.

+

Method Details

+
+ cancel(name, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ wait(name, timeout=None, x__xgafv=None) +
Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.
+
+Args:
+  name: string, The name of the operation resource to wait on. (required)
+  timeout: string, The maximum duration to wait before timing out. If left blank, the wait will be at most the time permitted by the underlying HTTP/RPC protocol. If RPC context deadline is also specified, the shorter one will be used.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.apps.html b/docs/dyn/aiplatform_v1beta1.projects.locations.apps.html new file mode 100644 index 00000000000..90afa329019 --- /dev/null +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.apps.html @@ -0,0 +1,91 @@ + + + +

Vertex AI API . projects . locations . apps

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.apps.operations.html b/docs/dyn/aiplatform_v1beta1.projects.locations.apps.operations.html new file mode 100644 index 00000000000..bba2d40043b --- /dev/null +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.apps.operations.html @@ -0,0 +1,268 @@ + + + +

Vertex AI API . projects . locations . apps . operations

+

Instance Methods

+

+ cancel(name, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

+ wait(name, timeout=None, x__xgafv=None)

+

Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.

+

Method Details

+
+ cancel(name, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ wait(name, timeout=None, x__xgafv=None) +
Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.
+
+Args:
+  name: string, The name of the operation resource to wait on. (required)
+  timeout: string, The maximum duration to wait before timing out. If left blank, the wait will be at most the time permitted by the underlying HTTP/RPC protocol. If RPC context deadline is also specified, the shorter one will be used.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html index 5b43994c02c..52783d10e6d 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html @@ -1373,9 +1373,17 @@

Method Details

"datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` }, "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. - "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: `projects/{project}/locations/{location}/ragCorpora/{ragCorpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "ragCorpora": [ # Optional. Deprecated. Please use rag_resources instead. "A String", ], + "ragResources": [ # Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. + { # The definition of the Rag resource. + "ragCorpus": "A String", # Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` + "ragFileIds": [ # Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + "A String", + ], + }, + ], "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. "vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold. }, @@ -1449,6 +1457,10 @@

Method Details

"retrievalQueries": [ # Optional. Queries executed by the retrieval tools. "A String", ], + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -3032,9 +3044,17 @@

Method Details

"datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` }, "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. - "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: `projects/{project}/locations/{location}/ragCorpora/{ragCorpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "ragCorpora": [ # Optional. Deprecated. Please use rag_resources instead. "A String", ], + "ragResources": [ # Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. + { # The definition of the Rag resource. + "ragCorpus": "A String", # Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` + "ragFileIds": [ # Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + "A String", + ], + }, + ], "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. "vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold. }, @@ -3108,6 +3128,10 @@

Method Details

"retrievalQueries": [ # Optional. Queries executed by the retrieval tools. "A String", ], + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html b/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html index ef81da15c07..8515632ee47 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html @@ -74,11 +74,6 @@

Vertex AI API . projects . locations . extensions

Instance Methods

-

- deployments() -

-

Returns the deployments Resource.

-

operations()

@@ -330,7 +325,8 @@

Method Details

"a_key": "", # Properties of the object. }, "vertexAiSearchRuntimeConfig": { # Runtime configuration for Vertext AI Search extension. - "servingConfigName": "A String", # Required. Vertext AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` + "appId": "A String", # Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. + "servingConfigName": "A String", # [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` }, }, "toolUseExamples": [ # Optional. Examples to illustrate the usage of the extension as a tool. @@ -477,7 +473,8 @@

Method Details

"a_key": "", # Properties of the object. }, "vertexAiSearchRuntimeConfig": { # Runtime configuration for Vertext AI Search extension. - "servingConfigName": "A String", # Required. Vertext AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` + "appId": "A String", # Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. + "servingConfigName": "A String", # [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` }, }, "toolUseExamples": [ # Optional. Examples to illustrate the usage of the extension as a tool. @@ -663,7 +660,8 @@

Method Details

"a_key": "", # Properties of the object. }, "vertexAiSearchRuntimeConfig": { # Runtime configuration for Vertext AI Search extension. - "servingConfigName": "A String", # Required. Vertext AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` + "appId": "A String", # Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. + "servingConfigName": "A String", # [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` }, }, "toolUseExamples": [ # Optional. Examples to illustrate the usage of the extension as a tool. @@ -827,7 +825,8 @@

Method Details

"a_key": "", # Properties of the object. }, "vertexAiSearchRuntimeConfig": { # Runtime configuration for Vertext AI Search extension. - "servingConfigName": "A String", # Required. Vertext AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` + "appId": "A String", # Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. + "servingConfigName": "A String", # [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` }, }, "toolUseExamples": [ # Optional. Examples to illustrate the usage of the extension as a tool. @@ -973,7 +972,8 @@

Method Details

"a_key": "", # Properties of the object. }, "vertexAiSearchRuntimeConfig": { # Runtime configuration for Vertext AI Search extension. - "servingConfigName": "A String", # Required. Vertext AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` + "appId": "A String", # Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time. + "servingConfigName": "A String", # [Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` }, }, "toolUseExamples": [ # Optional. Examples to illustrate the usage of the extension as a tool. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.html index 91c68d9004f..760fe375b0d 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featureGroups.html @@ -116,7 +116,7 @@

Method Details

Creates a new FeatureGroup in a given project and location.
 
 Args:
-  parent: string, Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}'` (required)
+  parent: string, Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}` (required)
   body: object, The request body.
     The object takes the form of:
 
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.html b/docs/dyn/aiplatform_v1beta1.projects.locations.html
index c946676e133..8fe7d5c50a5 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.html
@@ -74,6 +74,16 @@
 
 

Vertex AI API . projects . locations

Instance Methods

+

+ agents() +

+

Returns the agents Resource.

+ +

+ apps() +

+

Returns the apps Resource.

+

batchPredictionJobs()

@@ -279,6 +289,9 @@

Instance Methods

list_next()

Retrieves the next page of results.

+

+ retrieveContexts(parent, body=None, x__xgafv=None)

+

Retrieves relevant contexts for a query.

Method Details

close() @@ -730,4 +743,55 @@

Method Details

+
+ retrieveContexts(parent, body=None, x__xgafv=None) +
Retrieves relevant contexts for a query.
+
+Args:
+  parent: string, Required. The resource name of the Location from which to retrieve RagContexts. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for VertexRagService.RetrieveContexts.
+  "query": { # A query to retrieve relevant contexts. # Required. Single RAG retrieve query.
+    "similarityTopK": 42, # Optional. The number of contexts to retrieve.
+    "text": "A String", # Optional. The query in text format to get relevant contexts.
+  },
+  "vertexRagStore": { # The data source for Vertex RagStore. # The data source for Vertex RagStore.
+    "ragCorpora": [ # Optional. Deprecated. Please use rag_resources to specify the data source.
+      "A String",
+    ],
+    "ragResources": [ # Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.
+      { # The definition of the Rag resource.
+        "ragCorpus": "A String", # Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`
+        "ragFileIds": [ # Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.
+          "A String",
+        ],
+      },
+    ],
+    "vectorDistanceThreshold": 3.14, # Optional. Only return contexts with vector distance smaller than the threshold.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagService.RetrieveContexts.
+  "contexts": { # Relevant contexts for one query. # The contexts of the query.
+    "contexts": [ # All its contexts.
+      { # A context of the query.
+        "distance": 3.14, # The distance between the query vector and the context text vector.
+        "sourceUri": "A String", # For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name.
+        "text": "A String", # The text chunk.
+      },
+    ],
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.indexEndpoints.html b/docs/dyn/aiplatform_v1beta1.projects.locations.indexEndpoints.html index 0dbb1c6d114..7d496c10ac7 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.indexEndpoints.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.indexEndpoints.html @@ -387,7 +387,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -410,10 +410,21 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, "fractionLeafNodesToSearchOverride": 3.14, # The fraction of the number of leaves to search, set at query time allows user to tune search performance. This value increase result in both search accuracy and latency increase. The value should be between 0.0 and 1.0. If not set or set to 0.0, query uses the default value specified in NearestNeighborSearchConfig.TreeAHConfig.fraction_leaf_nodes_to_search. "neighborCount": 42, # The number of nearest neighbors to be retrieved from database for each query. If not set, will use the default from the service configuration (https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes#nearest-neighbor-search-config). "perCrowdingAttributeNeighborCount": 42, # Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag. + "rrf": { # Parameters for RRF algorithm that combines search results. # Optional. Represents RRF algorithm that combines search results. + "alpha": 3.14, # Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense. + }, }, ], "returnFullDatapoint": True or False, # If set to true, the full datapoints (including all vector values and restricts) of the nearest neighbors are returned. Note that returning full datapoint will significantly increase the latency and cost of the query. @@ -438,7 +449,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -461,8 +472,17 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, - "distance": 3.14, # The distance between the neighbor and the query vector. + "distance": 3.14, # The distance between the neighbor and the dense embedding query. + "sparseDistance": 3.14, # The distance between the neighbor and the query sparse_embedding. }, ], }, @@ -985,7 +1005,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -1008,6 +1028,14 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, ], } diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.indexes.html b/docs/dyn/aiplatform_v1beta1.projects.locations.indexes.html index 32493828ff3..cd18fa25c2e 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.indexes.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.indexes.html @@ -138,7 +138,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -245,7 +246,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -295,7 +297,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -351,7 +354,8 @@

Method Details

"etag": "A String", # Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "indexStats": { # Stats of the Index. # Output only. Stats of the index resource. "shardsCount": 42, # Output only. The number of shards in the Index. - "vectorsCount": "A String", # Output only. The number of vectors in the Index. + "sparseVectorsCount": "A String", # Output only. The number of sparse vectors in the Index. + "vectorsCount": "A String", # Output only. The number of dense vectors in the Index. }, "indexUpdateMethod": "A String", # Immutable. The update method to use with this Index. If not set, BATCH_UPDATE will be used by default. "labels": { # The labels with user-defined metadata to organize your Indexes. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. @@ -436,7 +440,7 @@

Method Details

"crowdingAttribute": "A String", # The attribute value used for crowding. The maximum number of neighbors to return per crowding attribute value (per_crowding_attribute_num_neighbors) is configured per-query. This field is ignored if per_crowding_attribute_num_neighbors is larger than the total number of neighbors to return for a given query. }, "datapointId": "A String", # Required. Unique identifier of the datapoint. - "featureVector": [ # Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. + "featureVector": [ # Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions]. 3.14, ], "numericRestricts": [ # Optional. List of Restrict of the datapoint, used to perform "restricted searches" where boolean rule are used to filter the subset of the database eligible for matching. This uses numeric comparisons. @@ -459,6 +463,14 @@

Method Details

"namespace": "A String", # The namespace of this restriction. e.g.: color. }, ], + "sparseEmbedding": { # Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions. # Optional. Feature embedding vector for sparse index. + "dimensions": [ # Optional. The list of indexes for the embedding values of the sparse vector. + "A String", + ], + "values": [ # Optional. The list of embedding values of the sparse vector. + 3.14, + ], + }, }, ], "updateMask": "A String", # Optional. Update mask is used to specify the fields to be overwritten in the datapoints by the update. The fields specified in the update_mask are relative to each IndexDatapoint inside datapoints, not the full request. Updatable fields: * Use `all_restricts` to update both restricts and numeric_restricts. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html b/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html index 81c0b1d7e3d..1aa138b3431 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.modelMonitors.html @@ -263,7 +263,6 @@

Method Details

"repeated": True or False, # Describes if the schema field is an array of given data type. }, ], - "instanceType": "A String", # The prediction instance type that the Model accepts when serving. Supported values are: * `object`: Each input is a JSON object format. * `array`: Each input is a JSON array format. "predictionFields": [ # Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed. { # Schema field definition. "dataType": "A String", # Supported data types are: `float` `integer` `boolean` `string` `categorical` @@ -602,7 +601,6 @@

Method Details

"repeated": True or False, # Describes if the schema field is an array of given data type. }, ], - "instanceType": "A String", # The prediction instance type that the Model accepts when serving. Supported values are: * `object`: Each input is a JSON object format. * `array`: Each input is a JSON array format. "predictionFields": [ # Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed. { # Schema field definition. "dataType": "A String", # Supported data types are: `float` `integer` `boolean` `string` `categorical` @@ -882,7 +880,6 @@

Method Details

"repeated": True or False, # Describes if the schema field is an array of given data type. }, ], - "instanceType": "A String", # The prediction instance type that the Model accepts when serving. Supported values are: * `object`: Each input is a JSON object format. * `array`: Each input is a JSON array format. "predictionFields": [ # Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed. { # Schema field definition. "dataType": "A String", # Supported data types are: `float` `integer` `boolean` `string` `categorical` @@ -1168,7 +1165,6 @@

Method Details

"repeated": True or False, # Describes if the schema field is an array of given data type. }, ], - "instanceType": "A String", # The prediction instance type that the Model accepts when serving. Supported values are: * `object`: Each input is a JSON object format. * `array`: Each input is a JSON array format. "predictionFields": [ # Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed. { # Schema field definition. "dataType": "A String", # Supported data types are: `float` `integer` `boolean` `string` `categorical` diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.notebookExecutionJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.notebookExecutionJobs.html index 45c08b7025d..9fb3986037f 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.notebookExecutionJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.notebookExecutionJobs.html @@ -186,10 +186,30 @@

Method Details

{ # NotebookExecutionJob represents an instance of a notebook execution. "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. @@ -244,10 +264,30 @@

Method Details

"notebookExecutionJobs": [ # List of NotebookExecutionJobs in the requested page. { # NotebookExecutionJob represents an instance of a notebook execution. "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimeTemplates.html b/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimeTemplates.html index 15ad3b7e296..e4cb277a091 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimeTemplates.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.notebookRuntimeTemplates.html @@ -143,7 +143,7 @@

Method Details

"machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). }, - "name": "A String", # Output only. The resource name of the NotebookRuntimeTemplate. + "name": "A String", # The resource name of the NotebookRuntimeTemplate. "networkSpec": { # Network spec. # Optional. Network spec. "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) @@ -273,7 +273,7 @@

Method Details

"machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). }, - "name": "A String", # Output only. The resource name of the NotebookRuntimeTemplate. + "name": "A String", # The resource name of the NotebookRuntimeTemplate. "networkSpec": { # Network spec. # Optional. Network spec. "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) @@ -382,7 +382,7 @@

Method Details

"machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). }, - "name": "A String", # Output only. The resource name of the NotebookRuntimeTemplate. + "name": "A String", # The resource name of the NotebookRuntimeTemplate. "networkSpec": { # Network spec. # Optional. Network spec. "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html index 4b81873b441..441f68ebcf6 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html @@ -382,9 +382,17 @@

Method Details

"datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` }, "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. - "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: `projects/{project}/locations/{location}/ragCorpora/{ragCorpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "ragCorpora": [ # Optional. Deprecated. Please use rag_resources instead. "A String", ], + "ragResources": [ # Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. + { # The definition of the Rag resource. + "ragCorpus": "A String", # Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` + "ragFileIds": [ # Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + "A String", + ], + }, + ], "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. "vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold. }, @@ -458,6 +466,10 @@

Method Details

"retrievalQueries": [ # Optional. Queries executed by the retrieval tools. "A String", ], + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -970,9 +982,17 @@

Method Details

"datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` }, "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. - "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: `projects/{project}/locations/{location}/ragCorpora/{ragCorpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "ragCorpora": [ # Optional. Deprecated. Please use rag_resources instead. "A String", ], + "ragResources": [ # Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support. + { # The definition of the Rag resource. + "ragCorpus": "A String", # Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` + "ragFileIds": [ # Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field. + "A String", + ], + }, + ], "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. "vectorDistanceThreshold": 3.14, # Optional. Only return results with vector distance smaller than the threshold. }, @@ -1046,6 +1066,10 @@

Method Details

"retrievalQueries": [ # Optional. Queries executed by the retrieval tools. "A String", ], + "searchEntryPoint": { # Google search entry point. # Optional. Google search entry for the following-up web searches. + "renderedContent": "A String", # Optional. Web content snippet that can be embedded in a web page or an app webview. + "sdkBlob": "A String", # Optional. Base64 encoded JSON representing array of tuple. + }, "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html index 98616af8adc..e255cbab287 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html @@ -87,10 +87,174 @@

Instance Methods

close()

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Creates a RagCorpus.

+

+ delete(name, force=None, x__xgafv=None)

+

Deletes a RagCorpus.

+

+ get(name, x__xgafv=None)

+

Gets a RagCorpus.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists RagCorpora in a Location.

+

+ list_next()

+

Retrieves the next page of results.

Method Details

close()
Close httplib2 connections.
+
+ create(parent, body=None, x__xgafv=None) +
Creates a RagCorpus.
+
+Args:
+  parent: string, Required. The resource name of the Location to create the RagCorpus in. Format: `projects/{project}/locations/{location}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A RagCorpus is a RagFile container and a project can have multiple RagCorpora.
+  "createTime": "A String", # Output only. Timestamp when this RagCorpus was created.
+  "description": "A String", # Optional. The description of the RagCorpus.
+  "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+  "name": "A String", # Output only. The resource name of the RagCorpus.
+  "updateTime": "A String", # Output only. Timestamp when this RagCorpus was last updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, force=None, x__xgafv=None) +
Deletes a RagCorpus.
+
+Args:
+  name: string, Required. The name of the RagCorpus resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  force: boolean, Optional. If set to true, any RagFiles in this RagCorpus will also be deleted. Otherwise, the request will only work if the RagCorpus has no RagFiles.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets a RagCorpus.
+
+Args:
+  name: string, Required. The name of the RagCorpus resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A RagCorpus is a RagFile container and a project can have multiple RagCorpora.
+  "createTime": "A String", # Output only. Timestamp when this RagCorpus was created.
+  "description": "A String", # Optional. The description of the RagCorpus.
+  "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+  "name": "A String", # Output only. The resource name of the RagCorpus.
+  "updateTime": "A String", # Output only. Timestamp when this RagCorpus was last updated.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists RagCorpora in a Location.
+
+Args:
+  parent: string, Required. The resource name of the Location from which to list the RagCorpora. Format: `projects/{project}/locations/{location}` (required)
+  pageSize: integer, Optional. The standard list page size.
+  pageToken: string, Optional. The standard list page token. Typically obtained via ListRagCorporaResponse.next_page_token of the previous VertexRagDataService.ListRagCorpora call.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagDataService.ListRagCorpora.
+  "nextPageToken": "A String", # A token to retrieve the next page of results. Pass to ListRagCorporaRequest.page_token to obtain that page.
+  "ragCorpora": [ # List of RagCorpora in the requested page.
+    { # A RagCorpus is a RagFile container and a project can have multiple RagCorpora.
+      "createTime": "A String", # Output only. Timestamp when this RagCorpus was created.
+      "description": "A String", # Optional. The description of the RagCorpus.
+      "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+      "name": "A String", # Output only. The resource name of the RagCorpus.
+      "updateTime": "A String", # Output only. Timestamp when this RagCorpus was last updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html index 91817d8cb9e..eadd3888959 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html @@ -82,10 +82,221 @@

Instance Methods

close()

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a RagFile.

+

+ get(name, x__xgafv=None)

+

Gets a RagFile.

+

+ import_(parent, body=None, x__xgafv=None)

+

Import files from Google Cloud Storage or Google Drive into a RagCorpus.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists RagFiles in a RagCorpus.

+

+ list_next()

+

Retrieves the next page of results.

Method Details

close()
Close httplib2 connections.
+
+ delete(name, x__xgafv=None) +
Deletes a RagFile.
+
+Args:
+  name: string, Required. The name of the RagFile resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets a RagFile.
+
+Args:
+  name: string, Required. The name of the RagFile resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A RagFile contains user data for chunking, embedding and indexing.
+  "createTime": "A String", # Output only. Timestamp when this RagFile was created.
+  "description": "A String", # Optional. The description of the RagFile.
+  "directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.
+  },
+  "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+  "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now.
+    "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+      "A String",
+    ],
+  },
+  "googleDriveSource": { # The Google Drive location for the input content. # Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.
+    "resourceIds": [ # Required. Google Drive resource IDs.
+      { # The type and ID of the Google Drive resource.
+        "resourceId": "A String", # Required. The ID of the Google Drive resource.
+        "resourceType": "A String", # Required. The type of the Google Drive resource.
+      },
+    ],
+  },
+  "name": "A String", # Output only. The resource name of the RagFile.
+  "ragFileType": "A String", # Output only. The type of the RagFile.
+  "sizeBytes": "A String", # Output only. The size of the RagFile in bytes.
+  "updateTime": "A String", # Output only. Timestamp when this RagFile was last updated.
+}
+
+ +
+ import_(parent, body=None, x__xgafv=None) +
Import files from Google Cloud Storage or Google Drive into a RagCorpus.
+
+Args:
+  parent: string, Required. The name of the RagCorpus resource into which to import files. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for VertexRagDataService.ImportRagFiles.
+  "importRagFilesConfig": { # Config for importing RagFiles. # Required. The config for the RagFiles to be synced and imported into the RagCorpus. VertexRagDataService.ImportRagFiles.
+    "gcsSource": { # The Google Cloud Storage location for the input content. # Google Cloud Storage location. Supports importing individual files as well as entire Google Cloud Storage directories. Sample formats: - `gs://bucket_name/my_directory/object_name/my_file.txt` - `gs://bucket_name/my_directory`
+      "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+        "A String",
+      ],
+    },
+    "googleDriveSource": { # The Google Drive location for the input content. # Google Drive location. Supports importing individual files as well as Google Drive folders.
+      "resourceIds": [ # Required. Google Drive resource IDs.
+        { # The type and ID of the Google Drive resource.
+          "resourceId": "A String", # Required. The ID of the Google Drive resource.
+          "resourceType": "A String", # Required. The type of the Google Drive resource.
+        },
+      ],
+    },
+    "ragFileChunkingConfig": { # Specifies the size and overlap of chunks for RagFiles. # Specifies the size and overlap of chunks after importing RagFiles.
+      "chunkOverlap": 42, # The overlap between chunks.
+      "chunkSize": 42, # The size of the chunks.
+    },
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists RagFiles in a RagCorpus.
+
+Args:
+  parent: string, Required. The resource name of the RagCorpus from which to list the RagFiles. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  pageSize: integer, Optional. The standard list page size.
+  pageToken: string, Optional. The standard list page token. Typically obtained via ListRagFilesResponse.next_page_token of the previous VertexRagDataService.ListRagFiles call.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagDataService.ListRagFiles.
+  "nextPageToken": "A String", # A token to retrieve the next page of results. Pass to ListRagFilesRequest.page_token to obtain that page.
+  "ragFiles": [ # List of RagFiles in the requested page.
+    { # A RagFile contains user data for chunking, embedding and indexing.
+      "createTime": "A String", # Output only. Timestamp when this RagFile was created.
+      "description": "A String", # Optional. The description of the RagFile.
+      "directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.
+      },
+      "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+      "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now.
+        "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+          "A String",
+        ],
+      },
+      "googleDriveSource": { # The Google Drive location for the input content. # Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.
+        "resourceIds": [ # Required. Google Drive resource IDs.
+          { # The type and ID of the Google Drive resource.
+            "resourceId": "A String", # Required. The ID of the Google Drive resource.
+            "resourceType": "A String", # Required. The type of the Google Drive resource.
+          },
+        ],
+      },
+      "name": "A String", # Output only. The resource name of the RagFile.
+      "ragFileType": "A String", # Output only. The type of the RagFile.
+      "sizeBytes": "A String", # Output only. The size of the RagFile in bytes.
+      "updateTime": "A String", # Output only. Timestamp when this RagFile was last updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html b/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html index a5d79d21681..caee99f2e90 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html @@ -433,6 +433,60 @@

Method Details

"modelMonitoringJobId": "A String", # Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. "parent": "A String", # Required. The parent of the ModelMonitoringJob. Format: `projects/{project}/locations/{location}/modelMoniitors/{model_monitor}` }, + "createNotebookExecutionJobRequest": { # Request message for [NotebookService.CreateNotebookExecutionJob] # Request for NotebookService.CreateNotebookExecutionJob. + "notebookExecutionJob": { # NotebookExecutionJob represents an instance of a notebook execution. # Required. The NotebookExecutionJob to create. + "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, + "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. + "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. + "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` + }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, + "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). + "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. + "gcsNotebookSource": { # The Cloud Storage uri for the input notebook. # The GCS url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + "generation": "A String", # The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + "uri": "A String", # The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + }, + "gcsOutputUri": "A String", # The GCS location to upload the result to. Format: `gs://bucket-name` + "jobState": "A String", # Output only. The state of the NotebookExecutionJob. + "name": "A String", # Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` + "notebookRuntimeTemplateResourceName": "A String", # The NotebookRuntimeTemplate to source compute configuration from. + "scheduleResourceName": "A String", # Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id}/locations/{location}/schedules/{schedule_id}` + "serviceAccount": "A String", # The service account to run the execution as. + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "updateTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was most recently updated. + }, + "notebookExecutionJobId": "A String", # Optional. User specified ID for the NotebookExecutionJob. + "parent": "A String", # Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` + }, "createPipelineJobRequest": { # Request message for PipelineService.CreatePipelineJob. # Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). "parent": "A String", # Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` "pipelineJob": { # An instance of a machine learning PipelineJob. # Required. The PipelineJob to create. @@ -992,6 +1046,60 @@

Method Details

"modelMonitoringJobId": "A String", # Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. "parent": "A String", # Required. The parent of the ModelMonitoringJob. Format: `projects/{project}/locations/{location}/modelMoniitors/{model_monitor}` }, + "createNotebookExecutionJobRequest": { # Request message for [NotebookService.CreateNotebookExecutionJob] # Request for NotebookService.CreateNotebookExecutionJob. + "notebookExecutionJob": { # NotebookExecutionJob represents an instance of a notebook execution. # Required. The NotebookExecutionJob to create. + "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, + "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. + "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. + "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` + }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, + "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). + "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. + "gcsNotebookSource": { # The Cloud Storage uri for the input notebook. # The GCS url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + "generation": "A String", # The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + "uri": "A String", # The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + }, + "gcsOutputUri": "A String", # The GCS location to upload the result to. Format: `gs://bucket-name` + "jobState": "A String", # Output only. The state of the NotebookExecutionJob. + "name": "A String", # Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` + "notebookRuntimeTemplateResourceName": "A String", # The NotebookRuntimeTemplate to source compute configuration from. + "scheduleResourceName": "A String", # Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id}/locations/{location}/schedules/{schedule_id}` + "serviceAccount": "A String", # The service account to run the execution as. + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "updateTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was most recently updated. + }, + "notebookExecutionJobId": "A String", # Optional. User specified ID for the NotebookExecutionJob. + "parent": "A String", # Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` + }, "createPipelineJobRequest": { # Request message for PipelineService.CreatePipelineJob. # Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). "parent": "A String", # Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` "pipelineJob": { # An instance of a machine learning PipelineJob. # Required. The PipelineJob to create. @@ -1593,6 +1701,60 @@

Method Details

"modelMonitoringJobId": "A String", # Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. "parent": "A String", # Required. The parent of the ModelMonitoringJob. Format: `projects/{project}/locations/{location}/modelMoniitors/{model_monitor}` }, + "createNotebookExecutionJobRequest": { # Request message for [NotebookService.CreateNotebookExecutionJob] # Request for NotebookService.CreateNotebookExecutionJob. + "notebookExecutionJob": { # NotebookExecutionJob represents an instance of a notebook execution. # Required. The NotebookExecutionJob to create. + "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, + "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. + "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. + "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` + }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, + "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). + "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. + "gcsNotebookSource": { # The Cloud Storage uri for the input notebook. # The GCS url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + "generation": "A String", # The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + "uri": "A String", # The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + }, + "gcsOutputUri": "A String", # The GCS location to upload the result to. Format: `gs://bucket-name` + "jobState": "A String", # Output only. The state of the NotebookExecutionJob. + "name": "A String", # Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` + "notebookRuntimeTemplateResourceName": "A String", # The NotebookRuntimeTemplate to source compute configuration from. + "scheduleResourceName": "A String", # Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id}/locations/{location}/schedules/{schedule_id}` + "serviceAccount": "A String", # The service account to run the execution as. + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "updateTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was most recently updated. + }, + "notebookExecutionJobId": "A String", # Optional. User specified ID for the NotebookExecutionJob. + "parent": "A String", # Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` + }, "createPipelineJobRequest": { # Request message for PipelineService.CreatePipelineJob. # Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). "parent": "A String", # Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` "pipelineJob": { # An instance of a machine learning PipelineJob. # Required. The PipelineJob to create. @@ -2166,6 +2328,60 @@

Method Details

"modelMonitoringJobId": "A String", # Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. "parent": "A String", # Required. The parent of the ModelMonitoringJob. Format: `projects/{project}/locations/{location}/modelMoniitors/{model_monitor}` }, + "createNotebookExecutionJobRequest": { # Request message for [NotebookService.CreateNotebookExecutionJob] # Request for NotebookService.CreateNotebookExecutionJob. + "notebookExecutionJob": { # NotebookExecutionJob represents an instance of a notebook execution. # Required. The NotebookExecutionJob to create. + "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, + "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. + "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. + "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` + }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, + "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). + "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. + "gcsNotebookSource": { # The Cloud Storage uri for the input notebook. # The GCS url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + "generation": "A String", # The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + "uri": "A String", # The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + }, + "gcsOutputUri": "A String", # The GCS location to upload the result to. Format: `gs://bucket-name` + "jobState": "A String", # Output only. The state of the NotebookExecutionJob. + "name": "A String", # Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` + "notebookRuntimeTemplateResourceName": "A String", # The NotebookRuntimeTemplate to source compute configuration from. + "scheduleResourceName": "A String", # Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id}/locations/{location}/schedules/{schedule_id}` + "serviceAccount": "A String", # The service account to run the execution as. + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "updateTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was most recently updated. + }, + "notebookExecutionJobId": "A String", # Optional. User specified ID for the NotebookExecutionJob. + "parent": "A String", # Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` + }, "createPipelineJobRequest": { # Request message for PipelineService.CreatePipelineJob. # Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). "parent": "A String", # Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` "pipelineJob": { # An instance of a machine learning PipelineJob. # Required. The PipelineJob to create. @@ -2743,6 +2959,60 @@

Method Details

"modelMonitoringJobId": "A String", # Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. "parent": "A String", # Required. The parent of the ModelMonitoringJob. Format: `projects/{project}/locations/{location}/modelMoniitors/{model_monitor}` }, + "createNotebookExecutionJobRequest": { # Request message for [NotebookService.CreateNotebookExecutionJob] # Request for NotebookService.CreateNotebookExecutionJob. + "notebookExecutionJob": { # NotebookExecutionJob represents an instance of a notebook execution. # Required. The NotebookExecutionJob to create. + "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, + "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. + "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. + "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` + }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, + "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). + "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. + "gcsNotebookSource": { # The Cloud Storage uri for the input notebook. # The GCS url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + "generation": "A String", # The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + "uri": "A String", # The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + }, + "gcsOutputUri": "A String", # The GCS location to upload the result to. Format: `gs://bucket-name` + "jobState": "A String", # Output only. The state of the NotebookExecutionJob. + "name": "A String", # Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` + "notebookRuntimeTemplateResourceName": "A String", # The NotebookRuntimeTemplate to source compute configuration from. + "scheduleResourceName": "A String", # Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id}/locations/{location}/schedules/{schedule_id}` + "serviceAccount": "A String", # The service account to run the execution as. + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "updateTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was most recently updated. + }, + "notebookExecutionJobId": "A String", # Optional. User specified ID for the NotebookExecutionJob. + "parent": "A String", # Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` + }, "createPipelineJobRequest": { # Request message for PipelineService.CreatePipelineJob. # Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). "parent": "A String", # Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` "pipelineJob": { # An instance of a machine learning PipelineJob. # Required. The PipelineJob to create. @@ -3303,6 +3573,60 @@

Method Details

"modelMonitoringJobId": "A String", # Optional. The ID to use for the Model Monitoring Job, which will become the final component of the model monitoring job resource name. The maximum length is 63 characters, and valid characters are `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. "parent": "A String", # Required. The parent of the ModelMonitoringJob. Format: `projects/{project}/locations/{location}/modelMoniitors/{model_monitor}` }, + "createNotebookExecutionJobRequest": { # Request message for [NotebookService.CreateNotebookExecutionJob] # Request for NotebookService.CreateNotebookExecutionJob. + "notebookExecutionJob": { # NotebookExecutionJob represents an instance of a notebook execution. # Required. The NotebookExecutionJob to create. + "createTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was created. + "customEnvironmentSpec": { # Compute configuration to use for an execution job. # The custom compute configuration for an execution job. + "machineSpec": { # Specification of a single machine. # The specification of a single machine for the execution job. + "acceleratorCount": 42, # The number of accelerators to attach to the machine. + "acceleratorType": "A String", # Immutable. The type of accelerator(s) that may be attached to the machine as per accelerator_count. + "machineType": "A String", # Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. + "tpuTopology": "A String", # Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: "2x2x1"). + }, + "networkSpec": { # Network spec. # The network configuration to use for the execution job. + "enableInternetAccess": True or False, # Whether to enable public internet access. Default false. + "network": "A String", # The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) + "subnetwork": "A String", # The name of the subnet that this instance is in. Format: `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` + }, + "persistentDiskSpec": { # Represents the spec of persistent disk options. # The specification of a persistent disk to attach for the execution job. + "diskSizeGb": "A String", # Size in GB of the disk (default is 100GB). + "diskType": "A String", # Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme Persistent Disk) + }, + }, + "dataformRepositorySource": { # The Dataform Repository containing the input notebook. # The Dataform Repository pointing to a single file notebook repository. + "commitSha": "A String", # The commit SHA to read repository with. If unset, the file will be read at HEAD. + "dataformRepositoryResourceName": "A String", # The resource name of the Dataform Repository. Format: `projects/{project_id}/locations/{location}/repositories/{repository_id}` + }, + "directNotebookSource": { # The content of the input notebook in ipynb format. # The contents of an input notebook file. + "content": "A String", # The base64-encoded contents of the input notebook file. + }, + "displayName": "A String", # The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters. + "executionTimeout": "A String", # Max running time of the execution job in seconds (default 86400s / 24 hrs). + "executionUser": "A String", # The user email to run the execution as. Only supported by Colab runtimes. + "gcsNotebookSource": { # The Cloud Storage uri for the input notebook. # The GCS url pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + "generation": "A String", # The version of the Cloud Storage object to read. If unset, the current version of the object is read. See https://cloud.google.com/storage/docs/metadata#generation-number. + "uri": "A String", # The Cloud Storage uri pointing to the ipynb file. Format: `gs://bucket/notebook_file.ipynb` + }, + "gcsOutputUri": "A String", # The GCS location to upload the result to. Format: `gs://bucket-name` + "jobState": "A String", # Output only. The state of the NotebookExecutionJob. + "name": "A String", # Output only. The resource name of this NotebookExecutionJob. Format: `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` + "notebookRuntimeTemplateResourceName": "A String", # The NotebookRuntimeTemplate to source compute configuration from. + "scheduleResourceName": "A String", # Output only. The Schedule resource name if this job is triggered by one. Format: `projects/{project_id}/locations/{location}/schedules/{schedule_id}` + "serviceAccount": "A String", # The service account to run the execution as. + "status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Populated when the NotebookExecutionJob is completed. When there is an error during notebook execution, the error details are populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "updateTime": "A String", # Output only. Timestamp when this NotebookExecutionJob was most recently updated. + }, + "notebookExecutionJobId": "A String", # Optional. User specified ID for the NotebookExecutionJob. + "parent": "A String", # Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` + }, "createPipelineJobRequest": { # Request message for PipelineService.CreatePipelineJob. # Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). "parent": "A String", # Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` "pipelineJob": { # An instance of a machine learning PipelineJob. # Required. The PipelineJob to create. diff --git a/docs/dyn/aiplatform_v1beta1.publishers.models.html b/docs/dyn/aiplatform_v1beta1.publishers.models.html index a5233c4bd2e..9e78faa3509 100644 --- a/docs/dyn/aiplatform_v1beta1.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.publishers.models.html @@ -219,6 +219,7 @@

Method Details

"maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). "minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, + "deployTaskName": "A String", # Optional. The name of the deploy task (e.g., "text to image generation"). "largeModelReference": { # Contains information about the Large Model. # Optional. Large model reference. When this is set, model_artifact_spec is not needed. "name": "A String", # Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. }, @@ -232,6 +233,20 @@

Method Details

"A String", ], }, + "fineTune": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Fine tune the PublisherModel with the third-party model tuning UI. + "references": { # Required. + "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. + "resourceName": "A String", # The resource name of the Google Cloud resource. + "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. + }, + }, + "resourceDescription": "A String", # Optional. Description of the resource. + "resourceTitle": "A String", # Optional. Title of the resource. + "resourceUseCase": "A String", # Optional. Use case (CUJ) of the resource. + "title": "A String", # Required. + }, "openEvaluationPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open evaluation pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. @@ -515,6 +530,7 @@

Method Details

"maxReplicaCount": 42, # Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for (max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). "minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, + "deployTaskName": "A String", # Optional. The name of the deploy task (e.g., "text to image generation"). "largeModelReference": { # Contains information about the Large Model. # Optional. Large model reference. When this is set, model_artifact_spec is not needed. "name": "A String", # Required. The unique name of the large Foundation or pre-built model. Like "chat-bison", "text-bison". Or model name with version ID, like "chat-bison@001", "text-bison@005", etc. }, @@ -528,6 +544,20 @@

Method Details

"A String", ], }, + "fineTune": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Fine tune the PublisherModel with the third-party model tuning UI. + "references": { # Required. + "a_key": { # Reference to a resource. + "description": "A String", # Description of the resource. + "resourceName": "A String", # The resource name of the Google Cloud resource. + "uri": "A String", # The URI of the resource. + "useCase": "A String", # Use case (CUJ) of the resource. + }, + }, + "resourceDescription": "A String", # Optional. Description of the resource. + "resourceTitle": "A String", # Optional. Title of the resource. + "resourceUseCase": "A String", # Optional. Use case (CUJ) of the resource. + "title": "A String", # Required. + }, "openEvaluationPipeline": { # The regional resource name or the URI. Key is region, e.g., us-central1, europe-west2, global, etc.. # Optional. Open evaluation pipeline of the PublisherModel. "references": { # Required. "a_key": { # Reference to a resource. diff --git a/docs/dyn/alloydb_v1.projects.locations.clusters.html b/docs/dyn/alloydb_v1.projects.locations.clusters.html index a516b97818e..8787b522322 100644 --- a/docs/dyn/alloydb_v1.projects.locations.clusters.html +++ b/docs/dyn/alloydb_v1.projects.locations.clusters.html @@ -209,6 +209,9 @@

Method Details

"labels": { # Labels as key value pairs "a_key": "A String", }, + "maintenanceSchedule": { # MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead. # Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set. + "startTime": "A String", # Output only. The scheduled start time for the maintenance. + }, "maintenanceUpdatePolicy": { # MaintenanceUpdatePolicy defines the policy for system updates. # Optional. The maintenance update policy determines when to allow or deny updates. "maintenanceWindows": [ # Preferred windows to perform maintenance. Currently limited to 1. { # MaintenanceWindow specifies a preferred day and time for maintenance. @@ -376,6 +379,9 @@

Method Details

"labels": { # Labels as key value pairs "a_key": "A String", }, + "maintenanceSchedule": { # MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead. # Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set. + "startTime": "A String", # Output only. The scheduled start time for the maintenance. + }, "maintenanceUpdatePolicy": { # MaintenanceUpdatePolicy defines the policy for system updates. # Optional. The maintenance update policy determines when to allow or deny updates. "maintenanceWindows": [ # Preferred windows to perform maintenance. Currently limited to 1. { # MaintenanceWindow specifies a preferred day and time for maintenance. @@ -592,6 +598,9 @@

Method Details

"labels": { # Labels as key value pairs "a_key": "A String", }, + "maintenanceSchedule": { # MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead. # Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set. + "startTime": "A String", # Output only. The scheduled start time for the maintenance. + }, "maintenanceUpdatePolicy": { # MaintenanceUpdatePolicy defines the policy for system updates. # Optional. The maintenance update policy determines when to allow or deny updates. "maintenanceWindows": [ # Preferred windows to perform maintenance. Currently limited to 1. { # MaintenanceWindow specifies a preferred day and time for maintenance. @@ -739,6 +748,9 @@

Method Details

"labels": { # Labels as key value pairs "a_key": "A String", }, + "maintenanceSchedule": { # MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead. # Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set. + "startTime": "A String", # Output only. The scheduled start time for the maintenance. + }, "maintenanceUpdatePolicy": { # MaintenanceUpdatePolicy defines the policy for system updates. # Optional. The maintenance update policy determines when to allow or deny updates. "maintenanceWindows": [ # Preferred windows to perform maintenance. Currently limited to 1. { # MaintenanceWindow specifies a preferred day and time for maintenance. @@ -895,6 +907,9 @@

Method Details

"labels": { # Labels as key value pairs "a_key": "A String", }, + "maintenanceSchedule": { # MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead. # Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set. + "startTime": "A String", # Output only. The scheduled start time for the maintenance. + }, "maintenanceUpdatePolicy": { # MaintenanceUpdatePolicy defines the policy for system updates. # Optional. The maintenance update policy determines when to allow or deny updates. "maintenanceWindows": [ # Preferred windows to perform maintenance. Currently limited to 1. { # MaintenanceWindow specifies a preferred day and time for maintenance. @@ -1112,6 +1127,9 @@

Method Details

"labels": { # Labels as key value pairs "a_key": "A String", }, + "maintenanceSchedule": { # MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead. # Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set. + "startTime": "A String", # Output only. The scheduled start time for the maintenance. + }, "maintenanceUpdatePolicy": { # MaintenanceUpdatePolicy defines the policy for system updates. # Optional. The maintenance update policy determines when to allow or deny updates. "maintenanceWindows": [ # Preferred windows to perform maintenance. Currently limited to 1. { # MaintenanceWindow specifies a preferred day and time for maintenance. diff --git a/docs/dyn/artifactregistry_v1.html b/docs/dyn/artifactregistry_v1.html index a1711d3d629..9cd086ebfb3 100644 --- a/docs/dyn/artifactregistry_v1.html +++ b/docs/dyn/artifactregistry_v1.html @@ -74,11 +74,6 @@

Artifact Registry API

Instance Methods

-

- media() -

-

Returns the media Resource.

-

projects()

diff --git a/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html b/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html index 2c58f65d4cc..5761d29efe5 100644 --- a/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html +++ b/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html @@ -77,6 +77,12 @@

Instance Methods

close()

Close httplib2 connections.

+

+ download(name, x__xgafv=None)

+

Download a file.

+

+ download_media(name, x__xgafv=None)

+

Download a file.

get(name, x__xgafv=None)

Gets a file.

@@ -92,6 +98,41 @@

Method Details

Close httplib2 connections.
+
+ download(name, x__xgafv=None) +
Download a file.
+
+Args:
+  name: string, Required. The name of the file to download. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response to download a file.
+}
+
+ +
+ download_media(name, x__xgafv=None) +
Download a file.
+
+Args:
+  name: string, Required. The name of the file to download. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  The media object as a string.
+
+    
+
+
get(name, x__xgafv=None)
Gets a file.
@@ -115,7 +156,7 @@ 

Method Details

"value": "A String", # The hash value. }, ], - "name": "A String", # The name of the file, for example: "projects/p1/locations/us-central1/repositories/repo1/files/a%2Fb%2Fc.txt". If the file ID part contains slashes, they are escaped. + "name": "A String", # The name of the file, for example: `projects/p1/locations/us-central1/repositories/repo1/files/a%2Fb%2Fc.txt`. If the file ID part contains slashes, they are escaped. "owner": "A String", # The name of the Package or Version that owns this file, if any. "sizeBytes": "A String", # The size of the File in bytes. "updateTime": "A String", # Output only. The time when the File was last updated. @@ -151,7 +192,7 @@

Method Details

"value": "A String", # The hash value. }, ], - "name": "A String", # The name of the file, for example: "projects/p1/locations/us-central1/repositories/repo1/files/a%2Fb%2Fc.txt". If the file ID part contains slashes, they are escaped. + "name": "A String", # The name of the file, for example: `projects/p1/locations/us-central1/repositories/repo1/files/a%2Fb%2Fc.txt`. If the file ID part contains slashes, they are escaped. "owner": "A String", # The name of the Package or Version that owns this file, if any. "sizeBytes": "A String", # The size of the File in bytes. "updateTime": "A String", # Output only. The time when the File was last updated. diff --git a/docs/dyn/artifactregistry_v1.projects.locations.repositories.html b/docs/dyn/artifactregistry_v1.projects.locations.repositories.html index aba3c5aa8ff..fac86d9b9af 100644 --- a/docs/dyn/artifactregistry_v1.projects.locations.repositories.html +++ b/docs/dyn/artifactregistry_v1.projects.locations.repositories.html @@ -223,7 +223,7 @@

Method Details

"versionPolicy": "A String", # Version policy defines the versions that the registry will accept. }, "mode": "A String", # Optional. The mode of the repository. - "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. + "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique. "remoteRepositoryConfig": { # Remote repository configuration. # Configuration specific for a Remote Repository. "aptRepository": { # Configuration for an Apt remote repository. # Specific settings for an Apt remote repository. "customRepository": { # Customer-specified publicly available remote repository. # Customer-specified remote repository. @@ -276,10 +276,11 @@

Method Details

}, }, }, + "satisfiesPzi": True or False, # Output only. If set, the repository satisfies physical zone isolation. "satisfiesPzs": True or False, # Output only. If set, the repository satisfies physical zone separation. "sizeBytes": "A String", # Output only. The size, in bytes, of all artifact storage in this repository. Repositories that are generally available or in public preview use this to calculate storage costs. "updateTime": "A String", # Output only. The time when the repository was last updated. - "virtualRepositoryConfig": { # Virtual repository configuration. # Configuration specific for a Virtual Repository. + "virtualRepositoryConfig": { # LINT.IfChange Virtual repository configuration. # Configuration specific for a Virtual Repository. "upstreamPolicies": [ # Policies that configure the upstream artifacts distributed by the Virtual Repository. Upstream policies cannot be set on a standard repository. { # Artifact policy configuration for the repository contents. "id": "A String", # The user-provided ID of the upstream policy. @@ -413,7 +414,7 @@

Method Details

"versionPolicy": "A String", # Version policy defines the versions that the registry will accept. }, "mode": "A String", # Optional. The mode of the repository. - "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. + "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique. "remoteRepositoryConfig": { # Remote repository configuration. # Configuration specific for a Remote Repository. "aptRepository": { # Configuration for an Apt remote repository. # Specific settings for an Apt remote repository. "customRepository": { # Customer-specified publicly available remote repository. # Customer-specified remote repository. @@ -466,10 +467,11 @@

Method Details

}, }, }, + "satisfiesPzi": True or False, # Output only. If set, the repository satisfies physical zone isolation. "satisfiesPzs": True or False, # Output only. If set, the repository satisfies physical zone separation. "sizeBytes": "A String", # Output only. The size, in bytes, of all artifact storage in this repository. Repositories that are generally available or in public preview use this to calculate storage costs. "updateTime": "A String", # Output only. The time when the repository was last updated. - "virtualRepositoryConfig": { # Virtual repository configuration. # Configuration specific for a Virtual Repository. + "virtualRepositoryConfig": { # LINT.IfChange Virtual repository configuration. # Configuration specific for a Virtual Repository. "upstreamPolicies": [ # Policies that configure the upstream artifacts distributed by the Virtual Repository. Upstream policies cannot be set on a standard repository. { # Artifact policy configuration for the repository contents. "id": "A String", # The user-provided ID of the upstream policy. @@ -579,7 +581,7 @@

Method Details

"versionPolicy": "A String", # Version policy defines the versions that the registry will accept. }, "mode": "A String", # Optional. The mode of the repository. - "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. + "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique. "remoteRepositoryConfig": { # Remote repository configuration. # Configuration specific for a Remote Repository. "aptRepository": { # Configuration for an Apt remote repository. # Specific settings for an Apt remote repository. "customRepository": { # Customer-specified publicly available remote repository. # Customer-specified remote repository. @@ -632,10 +634,11 @@

Method Details

}, }, }, + "satisfiesPzi": True or False, # Output only. If set, the repository satisfies physical zone isolation. "satisfiesPzs": True or False, # Output only. If set, the repository satisfies physical zone separation. "sizeBytes": "A String", # Output only. The size, in bytes, of all artifact storage in this repository. Repositories that are generally available or in public preview use this to calculate storage costs. "updateTime": "A String", # Output only. The time when the repository was last updated. - "virtualRepositoryConfig": { # Virtual repository configuration. # Configuration specific for a Virtual Repository. + "virtualRepositoryConfig": { # LINT.IfChange Virtual repository configuration. # Configuration specific for a Virtual Repository. "upstreamPolicies": [ # Policies that configure the upstream artifacts distributed by the Virtual Repository. Upstream policies cannot be set on a standard repository. { # Artifact policy configuration for the repository contents. "id": "A String", # The user-provided ID of the upstream policy. @@ -668,7 +671,7 @@

Method Details

Updates a repository.
 
 Args:
-  name: string, The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. (required)
+  name: string, The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -716,7 +719,7 @@ 

Method Details

"versionPolicy": "A String", # Version policy defines the versions that the registry will accept. }, "mode": "A String", # Optional. The mode of the repository. - "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. + "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique. "remoteRepositoryConfig": { # Remote repository configuration. # Configuration specific for a Remote Repository. "aptRepository": { # Configuration for an Apt remote repository. # Specific settings for an Apt remote repository. "customRepository": { # Customer-specified publicly available remote repository. # Customer-specified remote repository. @@ -769,10 +772,11 @@

Method Details

}, }, }, + "satisfiesPzi": True or False, # Output only. If set, the repository satisfies physical zone isolation. "satisfiesPzs": True or False, # Output only. If set, the repository satisfies physical zone separation. "sizeBytes": "A String", # Output only. The size, in bytes, of all artifact storage in this repository. Repositories that are generally available or in public preview use this to calculate storage costs. "updateTime": "A String", # Output only. The time when the repository was last updated. - "virtualRepositoryConfig": { # Virtual repository configuration. # Configuration specific for a Virtual Repository. + "virtualRepositoryConfig": { # LINT.IfChange Virtual repository configuration. # Configuration specific for a Virtual Repository. "upstreamPolicies": [ # Policies that configure the upstream artifacts distributed by the Virtual Repository. Upstream policies cannot be set on a standard repository. { # Artifact policy configuration for the repository contents. "id": "A String", # The user-provided ID of the upstream policy. @@ -836,7 +840,7 @@

Method Details

"versionPolicy": "A String", # Version policy defines the versions that the registry will accept. }, "mode": "A String", # Optional. The mode of the repository. - "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. + "name": "A String", # The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique. "remoteRepositoryConfig": { # Remote repository configuration. # Configuration specific for a Remote Repository. "aptRepository": { # Configuration for an Apt remote repository. # Specific settings for an Apt remote repository. "customRepository": { # Customer-specified publicly available remote repository. # Customer-specified remote repository. @@ -889,10 +893,11 @@

Method Details

}, }, }, + "satisfiesPzi": True or False, # Output only. If set, the repository satisfies physical zone isolation. "satisfiesPzs": True or False, # Output only. If set, the repository satisfies physical zone separation. "sizeBytes": "A String", # Output only. The size, in bytes, of all artifact storage in this repository. Repositories that are generally available or in public preview use this to calculate storage costs. "updateTime": "A String", # Output only. The time when the repository was last updated. - "virtualRepositoryConfig": { # Virtual repository configuration. # Configuration specific for a Virtual Repository. + "virtualRepositoryConfig": { # LINT.IfChange Virtual repository configuration. # Configuration specific for a Virtual Repository. "upstreamPolicies": [ # Policies that configure the upstream artifacts distributed by the Virtual Repository. Upstream policies cannot be set on a standard repository. { # Artifact policy configuration for the repository contents. "id": "A String", # The user-provided ID of the upstream policy. diff --git a/docs/dyn/baremetalsolution_v2.projects.locations.instances.html b/docs/dyn/baremetalsolution_v2.projects.locations.instances.html index 122bc928f1a..41dca123aee 100644 --- a/docs/dyn/baremetalsolution_v2.projects.locations.instances.html +++ b/docs/dyn/baremetalsolution_v2.projects.locations.instances.html @@ -80,9 +80,15 @@

Instance Methods

detachLun(instance, body=None, x__xgafv=None)

Detach LUN from Instance.

+

+ disableHyperthreading(name, body=None, x__xgafv=None)

+

Perform disable hyperthreading operation on a single server.

disableInteractiveSerialConsole(name, body=None, x__xgafv=None)

Disable the interactive serial console feature on an instance.

+

+ enableHyperthreading(name, body=None, x__xgafv=None)

+

Perform enable hyperthreading operation on a single server.

enableInteractiveSerialConsole(name, body=None, x__xgafv=None)

Enable the interactive serial console feature on an instance.

@@ -101,6 +107,9 @@

Instance Methods

patch(name, body=None, updateMask=None, x__xgafv=None)

Update details of a single server.

+

+ reimage(name, body=None, x__xgafv=None)

+

Perform reimage operation on a single server.

rename(name, body=None, x__xgafv=None)

RenameInstance sets a new name for an instance. Use with caution, previous names become immediately invalidated.

@@ -162,6 +171,47 @@

Method Details

}
+
+ disableHyperthreading(name, body=None, x__xgafv=None) +
Perform disable hyperthreading operation on a single server.
+
+Args:
+  name: string, Required. The `name` field is used to identify the instance. Format: projects/{project}/locations/{location}/instances/{instance} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Message requesting to perform disable hyperthreading operation on a server.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
disableInteractiveSerialConsole(name, body=None, x__xgafv=None)
Disable the interactive serial console feature on an instance.
@@ -203,6 +253,47 @@ 

Method Details

}
+
+ enableHyperthreading(name, body=None, x__xgafv=None) +
Perform enable hyperthreading operation on a single server.
+
+Args:
+  name: string, Required. The `name` field is used to identify the instance. Format: projects/{project}/locations/{location}/instances/{instance} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Message requesting to perform enable hyperthreading operation on a server.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
enableInteractiveSerialConsole(name, body=None, x__xgafv=None)
Enable the interactive serial console feature on an instance.
@@ -821,6 +912,52 @@ 

Method Details

}
+
+ reimage(name, body=None, x__xgafv=None) +
Perform reimage operation on a single server.
+
+Args:
+  name: string, Required. The `name` field is used to identify the instance. Format: projects/{project}/locations/{location}/instances/{instance} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Message requesting to perform reimage operation on a server.
+  "kmsKeyVersion": "A String", # Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`.
+  "osImage": "A String", # Required. The OS image code of the image which will be used in the reimage operation.
+  "sshKeys": [ # Optional. List of SSH Keys used during reimaging an instance.
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
rename(name, body=None, x__xgafv=None)
RenameInstance sets a new name for an instance. Use with caution, previous names become immediately invalidated.
diff --git a/docs/dyn/batch_v1.projects.locations.jobs.html b/docs/dyn/batch_v1.projects.locations.jobs.html
index 4a5ff22cb66..c6e3f08d565 100644
--- a/docs/dyn/batch_v1.projects.locations.jobs.html
+++ b/docs/dyn/batch_v1.projects.locations.jobs.html
@@ -216,7 +216,7 @@ 

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. @@ -484,7 +484,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. @@ -794,7 +794,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. @@ -1073,7 +1073,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. diff --git a/docs/dyn/batch_v1.projects.locations.jobs.taskGroups.tasks.html b/docs/dyn/batch_v1.projects.locations.jobs.taskGroups.tasks.html index 4ebf8c537f0..629b4d62b3b 100644 --- a/docs/dyn/batch_v1.projects.locations.jobs.taskGroups.tasks.html +++ b/docs/dyn/batch_v1.projects.locations.jobs.taskGroups.tasks.html @@ -115,7 +115,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. @@ -154,7 +154,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. diff --git a/docs/dyn/batch_v1.projects.locations.state.html b/docs/dyn/batch_v1.projects.locations.state.html index 9c71d1a4964..ef6e5ce3641 100644 --- a/docs/dyn/batch_v1.projects.locations.state.html +++ b/docs/dyn/batch_v1.projects.locations.state.html @@ -112,7 +112,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. @@ -314,7 +314,7 @@

Method Details

"description": "A String", # Description of the event. "eventTime": "A String", # The time this event occurred. "taskExecution": { # This Task Execution field includes detail information for task execution procedures, based on StatusEvent types. # Task Execution - "exitCode": 42, # When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success. + "exitCode": 42, # The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now. }, "taskState": "A String", # Task State "type": "A String", # Type of the event. diff --git a/docs/dyn/beyondcorp_v1.projects.locations.appConnections.html b/docs/dyn/beyondcorp_v1.projects.locations.appConnections.html index 6a373767e98..656aafc48c1 100644 --- a/docs/dyn/beyondcorp_v1.projects.locations.appConnections.html +++ b/docs/dyn/beyondcorp_v1.projects.locations.appConnections.html @@ -146,6 +146,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -256,6 +258,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -352,6 +356,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -409,6 +415,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -489,6 +497,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. diff --git a/docs/dyn/beyondcorp_v1.projects.locations.appGateways.html b/docs/dyn/beyondcorp_v1.projects.locations.appGateways.html index dd89ec10da9..caca9d73884 100644 --- a/docs/dyn/beyondcorp_v1.projects.locations.appGateways.html +++ b/docs/dyn/beyondcorp_v1.projects.locations.appGateways.html @@ -130,6 +130,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppGateway. "type": "A String", # Required. The type of network connectivity used by the AppGateway. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -234,6 +236,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppGateway. "type": "A String", # Required. The type of network connectivity used by the AppGateway. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -324,6 +328,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppGateway. "type": "A String", # Required. The type of network connectivity used by the AppGateway. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. diff --git a/docs/dyn/beyondcorp_v1alpha.organizations.locations.subscriptions.html b/docs/dyn/beyondcorp_v1alpha.organizations.locations.subscriptions.html index 1534df5fc1a..3db68158d2e 100644 --- a/docs/dyn/beyondcorp_v1alpha.organizations.locations.subscriptions.html +++ b/docs/dyn/beyondcorp_v1alpha.organizations.locations.subscriptions.html @@ -74,6 +74,9 @@

BeyondCorp API . organizations . locations . subscriptions

Instance Methods

+

+ cancel(name, requestId=None, x__xgafv=None)

+

Cancels an existing BeyondCorp Enterprise Subscription in a given organization. Location will always be global as BeyondCorp subscriptions are per organization. Returns the timestamp for when the cancellation will become effective

close()

Close httplib2 connections.

@@ -93,6 +96,26 @@

Instance Methods

patch(name, body=None, requestId=None, updateMask=None, x__xgafv=None)

Updates an existing BeyondCorp Enterprise Subscription in a given organization. Location will always be global as BeyondCorp subscriptions are per organization.

Method Details

+
+ cancel(name, requestId=None, x__xgafv=None) +
Cancels an existing BeyondCorp Enterprise Subscription in a given organization. Location will always be global as BeyondCorp subscriptions are per organization. Returns the timestamp for when the cancellation will become effective
+
+Args:
+  name: string, Required. Name of the resource. (required)
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for BeyondCorp.CancelSubscription
+  "effectiveCancellationTime": "A String", # Time when the cancellation will become effective
+}
+
+
close()
Close httplib2 connections.
@@ -109,6 +132,7 @@

Method Details

{ # A BeyondCorp Subscription resource represents BeyondCorp Enterprise Subscription. BeyondCorp Enterprise Subscription enables BeyondCorp Enterprise permium features for an organization. "autoRenewEnabled": True or False, # Output only. Represents that, if subscription will renew or end when the term ends. + "billingAccount": "A String", # Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type. "createTime": "A String", # Output only. Create time of the subscription. "endTime": "A String", # Output only. End time of the subscription. "name": "A String", # Required. Unique resource name of the Subscription. The name is ignored when creating a subscription. @@ -129,6 +153,7 @@

Method Details

{ # A BeyondCorp Subscription resource represents BeyondCorp Enterprise Subscription. BeyondCorp Enterprise Subscription enables BeyondCorp Enterprise permium features for an organization. "autoRenewEnabled": True or False, # Output only. Represents that, if subscription will renew or end when the term ends. + "billingAccount": "A String", # Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type. "createTime": "A String", # Output only. Create time of the subscription. "endTime": "A String", # Output only. End time of the subscription. "name": "A String", # Required. Unique resource name of the Subscription. The name is ignored when creating a subscription. @@ -156,6 +181,7 @@

Method Details

{ # A BeyondCorp Subscription resource represents BeyondCorp Enterprise Subscription. BeyondCorp Enterprise Subscription enables BeyondCorp Enterprise permium features for an organization. "autoRenewEnabled": True or False, # Output only. Represents that, if subscription will renew or end when the term ends. + "billingAccount": "A String", # Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type. "createTime": "A String", # Output only. Create time of the subscription. "endTime": "A String", # Output only. End time of the subscription. "name": "A String", # Required. Unique resource name of the Subscription. The name is ignored when creating a subscription. @@ -188,6 +214,7 @@

Method Details

"subscriptions": [ # A list of BeyondCorp Subscriptions in the organization. { # A BeyondCorp Subscription resource represents BeyondCorp Enterprise Subscription. BeyondCorp Enterprise Subscription enables BeyondCorp Enterprise permium features for an organization. "autoRenewEnabled": True or False, # Output only. Represents that, if subscription will renew or end when the term ends. + "billingAccount": "A String", # Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type. "createTime": "A String", # Output only. Create time of the subscription. "endTime": "A String", # Output only. End time of the subscription. "name": "A String", # Required. Unique resource name of the Subscription. The name is ignored when creating a subscription. @@ -226,6 +253,7 @@

Method Details

{ # A BeyondCorp Subscription resource represents BeyondCorp Enterprise Subscription. BeyondCorp Enterprise Subscription enables BeyondCorp Enterprise permium features for an organization. "autoRenewEnabled": True or False, # Output only. Represents that, if subscription will renew or end when the term ends. + "billingAccount": "A String", # Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type. "createTime": "A String", # Output only. Create time of the subscription. "endTime": "A String", # Output only. End time of the subscription. "name": "A String", # Required. Unique resource name of the Subscription. The name is ignored when creating a subscription. @@ -237,7 +265,7 @@

Method Details

} requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000). - updateMask: string, Required. Field mask is used to specify the fields to be overwritten in the Subscription resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then all mutable fields will be overwritten. Mutable fields: type, state. + updateMask: string, Required. Field mask is used to specify the fields to be overwritten in the Subscription resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. Mutable fields: seat_count. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -248,6 +276,7 @@

Method Details

{ # A BeyondCorp Subscription resource represents BeyondCorp Enterprise Subscription. BeyondCorp Enterprise Subscription enables BeyondCorp Enterprise permium features for an organization. "autoRenewEnabled": True or False, # Output only. Represents that, if subscription will renew or end when the term ends. + "billingAccount": "A String", # Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type. "createTime": "A String", # Output only. Create time of the subscription. "endTime": "A String", # Output only. End time of the subscription. "name": "A String", # Required. Unique resource name of the Subscription. The name is ignored when creating a subscription. diff --git a/docs/dyn/beyondcorp_v1alpha.projects.locations.appConnections.html b/docs/dyn/beyondcorp_v1alpha.projects.locations.appConnections.html index 7e2791b208a..1741223a207 100644 --- a/docs/dyn/beyondcorp_v1alpha.projects.locations.appConnections.html +++ b/docs/dyn/beyondcorp_v1alpha.projects.locations.appConnections.html @@ -146,6 +146,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -256,6 +258,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -352,6 +356,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -409,6 +415,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -489,6 +497,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppConnection. "type": "A String", # Required. The type of network connectivity used by the AppConnection. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. diff --git a/docs/dyn/beyondcorp_v1alpha.projects.locations.appGateways.html b/docs/dyn/beyondcorp_v1alpha.projects.locations.appGateways.html index 82dc7c58f67..67e642acef7 100644 --- a/docs/dyn/beyondcorp_v1alpha.projects.locations.appGateways.html +++ b/docs/dyn/beyondcorp_v1alpha.projects.locations.appGateways.html @@ -130,6 +130,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppGateway. "type": "A String", # Required. The type of network connectivity used by the AppGateway. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -234,6 +236,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppGateway. "type": "A String", # Required. The type of network connectivity used by the AppGateway. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. @@ -324,6 +328,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway. + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the AppGateway. "type": "A String", # Required. The type of network connectivity used by the AppGateway. "uid": "A String", # Output only. A unique identifier for the instance generated by the system. diff --git a/docs/dyn/bigquery_v2.tables.html b/docs/dyn/bigquery_v2.tables.html index f9ed22cf31a..1f4676168e1 100644 --- a/docs/dyn/bigquery_v2.tables.html +++ b/docs/dyn/bigquery_v2.tables.html @@ -497,7 +497,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. @@ -962,7 +962,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. @@ -1368,7 +1368,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. @@ -1882,7 +1882,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. @@ -2289,7 +2289,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. @@ -2814,7 +2814,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. @@ -3221,7 +3221,7 @@

Method Details

"message": "A String", # A human-readable description of the error. "reason": "A String", # A short error code that summarizes the error. }, - "replicationIntervalMs": "A String", # Required. Specifies the interval at which the source table is polled for updates. + "replicationIntervalMs": "A String", # Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied. "replicationStatus": "A String", # Optional. Output only. Replication status of configured replication. "sourceTable": { # Required. Source table reference that is replicated. "datasetId": "A String", # Required. The ID of the dataset containing this table. diff --git a/docs/dyn/bigtableadmin_v2.projects.instances.html b/docs/dyn/bigtableadmin_v2.projects.instances.html index 4c590a60045..75155570333 100644 --- a/docs/dyn/bigtableadmin_v2.projects.instances.html +++ b/docs/dyn/bigtableadmin_v2.projects.instances.html @@ -169,6 +169,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the instance. "type": "A String", # The type of the instance. Defaults to `PRODUCTION`. @@ -245,6 +246,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the instance. "type": "A String", # The type of the instance. Defaults to `PRODUCTION`. @@ -334,6 +336,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the instance. "type": "A String", # The type of the instance. Defaults to `PRODUCTION`. @@ -373,6 +376,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the instance. "type": "A String", # The type of the instance. Defaults to `PRODUCTION`. @@ -539,6 +543,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the instance. "type": "A String", # The type of the instance. Defaults to `PRODUCTION`. @@ -559,6 +564,7 @@

Method Details

"a_key": "A String", }, "name": "A String", # The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`. + "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The current state of the instance. "type": "A String", # The type of the instance. Defaults to `PRODUCTION`. diff --git a/docs/dyn/bigtableadmin_v2.projects.instances.tables.authorizedViews.html b/docs/dyn/bigtableadmin_v2.projects.instances.tables.authorizedViews.html index 235109b1017..51c019a2c3c 100644 --- a/docs/dyn/bigtableadmin_v2.projects.instances.tables.authorizedViews.html +++ b/docs/dyn/bigtableadmin_v2.projects.instances.tables.authorizedViews.html @@ -295,7 +295,7 @@

Method Details

parent: string, Required. The unique name of the table for which AuthorizedViews should be listed. Values are of the form `projects/{project}/instances/{instance}/tables/{table}`. (required) pageSize: integer, Optional. Maximum number of results per page. A page_size of zero lets the server choose the number of items to return. A page_size which is strictly positive will return at most that many items. A negative page_size will cause an error. Following the first request, subsequent paginated calls are not required to pass a page_size. If a page_size is set in subsequent calls, it must match the page_size given in the first request. pageToken: string, Optional. The value of `next_page_token` returned by a previous call. - view: string, Optional. The resource_view to be applied to the returned views' fields. Default to NAME_ONLY. + view: string, Optional. The resource_view to be applied to the returned AuthorizedViews' fields. Default to NAME_ONLY. Allowed values RESPONSE_VIEW_UNSPECIFIED - Uses the default view for each method as documented in the request. NAME_ONLY - Only populates `name`. diff --git a/docs/dyn/cloudbuild_v1.projects.builds.html b/docs/dyn/cloudbuild_v1.projects.builds.html index 4150c17cb18..820ac5238ac 100644 --- a/docs/dyn/cloudbuild_v1.projects.builds.html +++ b/docs/dyn/cloudbuild_v1.projects.builds.html @@ -246,6 +246,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -375,6 +380,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -606,6 +616,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -735,6 +750,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -997,6 +1017,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1126,6 +1151,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -1363,6 +1393,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1492,6 +1527,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. diff --git a/docs/dyn/cloudbuild_v1.projects.locations.builds.html b/docs/dyn/cloudbuild_v1.projects.locations.builds.html index a9ce1ba9873..1edafb250b0 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.builds.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.builds.html @@ -245,6 +245,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -374,6 +379,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -605,6 +615,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -734,6 +749,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -996,6 +1016,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1125,6 +1150,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -1362,6 +1392,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1491,6 +1526,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. diff --git a/docs/dyn/cloudbuild_v1.projects.locations.triggers.html b/docs/dyn/cloudbuild_v1.projects.locations.triggers.html index 491d81af7b3..cd1468444b1 100644 --- a/docs/dyn/cloudbuild_v1.projects.locations.triggers.html +++ b/docs/dyn/cloudbuild_v1.projects.locations.triggers.html @@ -234,6 +234,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -363,6 +368,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -763,6 +773,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -892,6 +907,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -1320,6 +1340,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1449,6 +1474,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -1861,6 +1891,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1990,6 +2025,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -2407,6 +2447,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -2536,6 +2581,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -2938,6 +2988,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -3067,6 +3122,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. diff --git a/docs/dyn/cloudbuild_v1.projects.triggers.html b/docs/dyn/cloudbuild_v1.projects.triggers.html index c91d64ddb1e..2473ae2222e 100644 --- a/docs/dyn/cloudbuild_v1.projects.triggers.html +++ b/docs/dyn/cloudbuild_v1.projects.triggers.html @@ -234,6 +234,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -363,6 +368,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -763,6 +773,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -892,6 +907,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -1320,6 +1340,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1449,6 +1474,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -1861,6 +1891,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -1990,6 +2025,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -2408,6 +2448,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -2537,6 +2582,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. @@ -2937,6 +2987,11 @@

Method Details

"type": "A String", # The name of the failure. }, "finishTime": "A String", # Output only. Time at which execution of the build was finished. The difference between finish_time and start_time is the duration of the build's execution. + "gitConfig": { # GitConfig is a configuration for git operations. # Optional. Configuration for git operations. + "http": { # HttpConfig is a configuration for HTTP related git operations. # Configuration for HTTP related git operations. + "proxySecretVersionName": "A String", # SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port]. + }, + }, "id": "A String", # Output only. Unique identifier of the build. "images": [ # A list of images to be pushed upon the successful completion of all build steps. The images are pushed using the builder service account's credentials. The digests of the pushed images will be stored in the `Build` resource's results field. If any of the images fail to be pushed, the build status is marked `FAILURE`. "A String", @@ -3066,6 +3121,11 @@

Method Details

"repository": "A String", # Required. Name of the Google Cloud Build repository, formatted as `projects/*/locations/*/connections/*/repositories/*`. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. }, + "developerConnectConfig": { # This config defines the location of a source through Developer Connect. # If provided, get the source from this Developer Connect config. + "dir": "A String", # Required. Directory, relative to the source root, in which to run the build. + "gitRepositoryLink": "A String", # Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`. + "revision": "A String", # Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. + }, "gitSource": { # Location of the source in any accessible Git repository. # If provided, get the source from this Git repository. "dir": "A String", # Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution. "revision": "A String", # The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html index 48e54f6d950..a68cde72e48 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.html @@ -389,7 +389,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -421,7 +421,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. @@ -744,7 +744,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -776,7 +776,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. @@ -1075,7 +1075,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -1107,7 +1107,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.targets.html b/docs/dyn/clouddeploy_v1.projects.locations.targets.html index 4b969ced2bf..48b7b95bd1f 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.targets.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.targets.html @@ -124,7 +124,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -156,7 +156,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. @@ -267,7 +267,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -299,7 +299,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. @@ -395,7 +395,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -427,7 +427,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. @@ -482,7 +482,7 @@

Method Details

"a_key": "A String", }, "anthosCluster": { # Information specifying an Anthos Cluster. # Optional. Information specifying an Anthos Cluster. - "membership": "A String", # Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + "membership": "A String", # Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. }, "createTime": "A String", # Output only. Time at which the `Target` was created. "customTarget": { # Information specifying a Custom Target. # Optional. Information specifying a Custom Target. @@ -514,7 +514,7 @@

Method Details

}, ], "gke": { # Information specifying a GKE Cluster. # Optional. Information specifying a GKE Cluster. - "cluster": "A String", # Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + "cluster": "A String", # Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. "internalIp": True or False, # Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). }, "labels": { # Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. diff --git a/docs/dyn/cloudkms_v1.folders.html b/docs/dyn/cloudkms_v1.folders.html new file mode 100644 index 00000000000..8788274037b --- /dev/null +++ b/docs/dyn/cloudkms_v1.folders.html @@ -0,0 +1,141 @@ + + + +

Cloud Key Management Service (KMS) API . folders

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ getAutokeyConfig(name, x__xgafv=None)

+

Returns the AutokeyConfig for a folder.

+

+ updateAutokeyConfig(name, body=None, updateMask=None, x__xgafv=None)

+

Updates the AutokeyConfig for a folder. The caller must have both `cloudkms.autokeyConfigs.update` permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` permission on the provided key project. An empty key project may be provided to clear the configuration.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ getAutokeyConfig(name, x__xgafv=None) +
Returns the AutokeyConfig for a folder.
+
+Args:
+  name: string, Required. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Cloud KMS Autokey configuration for a folder.
+  "keyProject": "A String", # Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision new CryptoKeys. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions).
+  "name": "A String", # Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
+}
+
+ +
+ updateAutokeyConfig(name, body=None, updateMask=None, x__xgafv=None) +
Updates the AutokeyConfig for a folder. The caller must have both `cloudkms.autokeyConfigs.update` permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` permission on the provided key project. An empty key project may be provided to clear the configuration.
+
+Args:
+  name: string, Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Cloud KMS Autokey configuration for a folder.
+  "keyProject": "A String", # Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision new CryptoKeys. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions).
+  "name": "A String", # Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
+}
+
+  updateMask: string, Required. Masks which fields of the AutokeyConfig to update, e.g. `keyProject`.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Cloud KMS Autokey configuration for a folder.
+  "keyProject": "A String", # Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision new CryptoKeys. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions).
+  "name": "A String", # Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudkms_v1.html b/docs/dyn/cloudkms_v1.html index fafe6fdf9af..56ddcb4f020 100644 --- a/docs/dyn/cloudkms_v1.html +++ b/docs/dyn/cloudkms_v1.html @@ -74,6 +74,11 @@

Cloud Key Management Service (KMS) API

Instance Methods

+

+ folders() +

+

Returns the folders Resource.

+

projects()

diff --git a/docs/dyn/cloudkms_v1.projects.html b/docs/dyn/cloudkms_v1.projects.html index 0aa67171649..e0e0c98a1ca 100644 --- a/docs/dyn/cloudkms_v1.projects.html +++ b/docs/dyn/cloudkms_v1.projects.html @@ -82,10 +82,32 @@

Instance Methods

close()

Close httplib2 connections.

+

+ showEffectiveAutokeyConfig(parent, x__xgafv=None)

+

Returns the effective Cloud KMS Autokey configuration for a given project.

Method Details

close()
Close httplib2 connections.
+
+ showEffectiveAutokeyConfig(parent, x__xgafv=None) +
Returns the effective Cloud KMS Autokey configuration for a given project.
+
+Args:
+  parent: string, Required. Name of the resource project to the show effective Cloud KMS Autokey configuration for. This may be helpful for interrogating the effect of nested folder configurations on a given resource project. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ShowEffectiveAutokeyConfig.
+  "keyProject": "A String", # Name of the key project configured in the resource project's folder ancestry.
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/cloudkms_v1.projects.locations.html b/docs/dyn/cloudkms_v1.projects.locations.html index cd3bffeee4d..9def016fbb1 100644 --- a/docs/dyn/cloudkms_v1.projects.locations.html +++ b/docs/dyn/cloudkms_v1.projects.locations.html @@ -84,11 +84,21 @@

Instance Methods

Returns the ekmConnections Resource.

+

+ keyHandles() +

+

Returns the keyHandles Resource.

+

keyRings()

Returns the keyRings Resource.

+

+ operations() +

+

Returns the operations Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html b/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html new file mode 100644 index 00000000000..e8ea3fa8307 --- /dev/null +++ b/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html @@ -0,0 +1,187 @@ + + + +

Cloud Key Management Service (KMS) API . projects . locations . keyHandles

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, keyHandleId=None, x__xgafv=None)

+

Creates a new KeyHandle, triggering the provisioning of a new CryptoKey for CMEK use with the given resource type in the configured key project and the same location. GetOperation should be used to resolve the resulting long-running operation and get the resulting KeyHandle and CryptoKey.

+

+ get(name, x__xgafv=None)

+

Returns the KeyHandle.

+

+ list(parent, filter=None, x__xgafv=None)

+

Lists KeyHandles.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, keyHandleId=None, x__xgafv=None) +
Creates a new KeyHandle, triggering the provisioning of a new CryptoKey for CMEK use with the given resource type in the configured key project and the same location. GetOperation should be used to resolve the resulting long-running operation and get the resulting KeyHandle and CryptoKey.
+
+Args:
+  parent: string, Required. Name of the resource project and location to create the KeyHandle in, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Resource-oriented representation of a request to Cloud KMS Autokey and the resulting provisioning of a CryptoKey.
+  "kmsKey": "A String", # Output only. Name of a CryptoKey that has been provisioned for Customer Managed Encryption Key (CMEK) use in the KeyHandle's project and location for the requested resource type.
+  "name": "A String", # Output only. Identifier. Name of the [KeyHandle] resource, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
+  "resourceTypeSelector": "A String", # Required. Indicates the resource type that the resulting CryptoKey is meant to protect, e.g. `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource types.
+}
+
+  keyHandleId: string, Optional. Id of the KeyHandle. Must be unique to the resource project and location. If not provided by the caller, a new UUID is used.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Returns the KeyHandle.
+
+Args:
+  name: string, Required. Name of the KeyHandle resource, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Resource-oriented representation of a request to Cloud KMS Autokey and the resulting provisioning of a CryptoKey.
+  "kmsKey": "A String", # Output only. Name of a CryptoKey that has been provisioned for Customer Managed Encryption Key (CMEK) use in the KeyHandle's project and location for the requested resource type.
+  "name": "A String", # Output only. Identifier. Name of the [KeyHandle] resource, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
+  "resourceTypeSelector": "A String", # Required. Indicates the resource type that the resulting CryptoKey is meant to protect, e.g. `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource types.
+}
+
+ +
+ list(parent, filter=None, x__xgafv=None) +
Lists KeyHandles.
+
+Args:
+  parent: string, Required. Name of the resource project and location from which to list KeyHandles, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}`. (required)
+  filter: string, Optional. Filter to apply when listing KeyHandles, e.g. `resource_type_selector="{SERVICE}.googleapis.com/{TYPE}"`.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for Autokey.ListKeyHandles.
+  "keyHandles": [ # Resulting KeyHandles.
+    { # Resource-oriented representation of a request to Cloud KMS Autokey and the resulting provisioning of a CryptoKey.
+      "kmsKey": "A String", # Output only. Name of a CryptoKey that has been provisioned for Customer Managed Encryption Key (CMEK) use in the KeyHandle's project and location for the requested resource type.
+      "name": "A String", # Output only. Identifier. Name of the [KeyHandle] resource, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
+      "resourceTypeSelector": "A String", # Required. Indicates the resource type that the resulting CryptoKey is meant to protect, e.g. `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource types.
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudkms_v1.projects.locations.operations.html b/docs/dyn/cloudkms_v1.projects.locations.operations.html new file mode 100644 index 00000000000..436583a9174 --- /dev/null +++ b/docs/dyn/cloudkms_v1.projects.locations.operations.html @@ -0,0 +1,124 @@ + + + +

Cloud Key Management Service (KMS) API . projects . locations . operations

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudsupport_v2beta.cases.html b/docs/dyn/cloudsupport_v2beta.cases.html index fbf121c3b80..01ef81f2878 100644 --- a/docs/dyn/cloudsupport_v2beta.cases.html +++ b/docs/dyn/cloudsupport_v2beta.cases.html @@ -591,7 +591,7 @@

Method Details

parent: string, Required. The resource name of the case for which feed items should be listed. (required) orderBy: string, Optional. Field to order feed items by, followed by `asc` or `desc` postfix. The only valid field is `creation_time`. This list is case-insensitive, default sorting order is ascending, and the redundant space characters are insignificant. Example: `creation_time desc` pageSize: integer, Optional. The maximum number of feed items fetched with each request. - pageToken: string, Optional. A token identifying the page of results to return. If unspecified, the first page is retrieved. + pageToken: string, Optional. A token identifying the page of results to return. If unspecified, it retrieves the first page. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -648,9 +648,8 @@

Method Details

"googleSupport": True or False, # Output only. Whether the actor is a Google support actor. "username": "A String", # Output only. The username of the actor. It may look like an email or other format provided by the identity provider. If not provided, it is inferred from the credentials supplied. When a name is provided, a username must also be provided. If the user is a Google Support agent, this will not be set. }, - "bodyContent": { # Used for content on cases/comments/etc. On input either plain_text or rich_text should be supplied but not both. # Output only. The full email message body in both plaintext and richtext. The plaintext field of body_content will be elided in a best-effort attempt to remove extraneous reply threads. - "plainText": "A String", # Content in this field should be rendered and interpreted as-is. If not provided on input, then rich_text must be provided and this field will contain the plain text extracted from the rich_text input. - "richText": "A String", # Content in this field should be rendered and interpreted as HTML. If not provided on input, then plain_text must be provided and this field will contain the escaped plain text content. Only a subset of HTML tags and styles are allowed on input, all other tags will be stripped/sanitized. Output will always contain safe and valid HTML. + "bodyContent": { # Stores text attached to a support object. # Output only. The full email message body. A best-effort attempt is made to remove extraneous reply threads. + "plainText": "A String", # Content in this field should be rendered and interpreted as-is. }, "ccEmailAddresses": [ # Output only. Email addresses CCed on the email. "A String", diff --git a/docs/dyn/compute_beta.healthChecks.html b/docs/dyn/compute_beta.healthChecks.html index 75a0eec4dc2..b526982f464 100644 --- a/docs/dyn/compute_beta.healthChecks.html +++ b/docs/dyn/compute_beta.healthChecks.html @@ -151,7 +151,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -160,7 +160,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -169,7 +169,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -180,7 +180,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -410,7 +410,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -419,7 +419,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -428,7 +428,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -439,7 +439,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -490,7 +490,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -499,7 +499,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -508,7 +508,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -519,7 +519,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -699,7 +699,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -708,7 +708,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -717,7 +717,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -728,7 +728,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -809,7 +809,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -818,7 +818,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -827,7 +827,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -838,7 +838,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -1037,7 +1037,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -1046,7 +1046,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -1055,7 +1055,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -1066,7 +1066,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { diff --git a/docs/dyn/compute_beta.instanceTemplates.html b/docs/dyn/compute_beta.instanceTemplates.html index e2019d795b1..26dbe507138 100644 --- a/docs/dyn/compute_beta.instanceTemplates.html +++ b/docs/dyn/compute_beta.instanceTemplates.html @@ -298,7 +298,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -320,7 +320,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -792,7 +792,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -814,7 +814,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1208,7 +1208,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1230,7 +1230,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1655,7 +1655,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1677,7 +1677,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. diff --git a/docs/dyn/compute_beta.instances.html b/docs/dyn/compute_beta.instances.html index 1c4ec3ee8fa..c7bfb33a111 100644 --- a/docs/dyn/compute_beta.instances.html +++ b/docs/dyn/compute_beta.instances.html @@ -265,7 +265,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -726,7 +726,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -748,7 +748,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1341,7 +1341,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1363,7 +1363,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -2186,7 +2186,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -2208,7 +2208,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -3129,7 +3129,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -3151,7 +3151,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -3616,7 +3616,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -3638,7 +3638,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -7526,7 +7526,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -7548,7 +7548,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -7836,7 +7836,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -8112,7 +8112,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -8134,7 +8134,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. diff --git a/docs/dyn/compute_beta.machineImages.html b/docs/dyn/compute_beta.machineImages.html index 017d4b6f5ac..447b3b57a2b 100644 --- a/docs/dyn/compute_beta.machineImages.html +++ b/docs/dyn/compute_beta.machineImages.html @@ -409,7 +409,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -431,7 +431,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -635,7 +635,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -657,7 +657,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1008,7 +1008,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1030,7 +1030,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1234,7 +1234,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1256,7 +1256,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1634,7 +1634,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1656,7 +1656,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1860,7 +1860,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1882,7 +1882,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. diff --git a/docs/dyn/compute_beta.organizationSecurityPolicies.html b/docs/dyn/compute_beta.organizationSecurityPolicies.html index 55e2c8e78ca..ede5d629215 100644 --- a/docs/dyn/compute_beta.organizationSecurityPolicies.html +++ b/docs/dyn/compute_beta.organizationSecurityPolicies.html @@ -138,8 +138,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. } replaceExistingAssociation: boolean, Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists. @@ -855,8 +862,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1037,6 +1051,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1068,8 +1083,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }
@@ -1300,8 +1322,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1482,6 +1511,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1682,8 +1712,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1864,6 +1901,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1910,8 +1948,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "kind": "compute#organizationSecurityPoliciesListAssociationsResponse", # [Output Only] Type of securityPolicy associations. Always compute#organizationSecurityPoliciesListAssociations for lists of securityPolicy associations. @@ -2113,8 +2158,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -2295,6 +2347,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { diff --git a/docs/dyn/compute_beta.regionHealthChecks.html b/docs/dyn/compute_beta.regionHealthChecks.html index 9cb8bae4fc6..cc9565c33a8 100644 --- a/docs/dyn/compute_beta.regionHealthChecks.html +++ b/docs/dyn/compute_beta.regionHealthChecks.html @@ -265,7 +265,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -274,7 +274,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -283,7 +283,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -294,7 +294,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -346,7 +346,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -355,7 +355,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -364,7 +364,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -375,7 +375,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -556,7 +556,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -565,7 +565,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -574,7 +574,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -585,7 +585,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -667,7 +667,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -676,7 +676,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -685,7 +685,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -696,7 +696,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { @@ -897,7 +897,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpHealthCheck": { @@ -906,7 +906,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTP health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "httpsHealthCheck": { @@ -915,7 +915,7 @@

Method Details

"portName": "A String", # Not supported. "portSpecification": "A String", # Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports. "proxyHeader": "A String", # Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. - "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. + "requestPath": "A String", # The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986. "response": "A String", # Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. @@ -926,7 +926,7 @@

Method Details

"name": "A String", # Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. For example, a name that is 1-63 characters long, matches the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`, and otherwise complies with RFC1035. This regular expression describes a name where the first character is a lowercase letter, and all following characters are a dash, lowercase letter, or digit, except the last character, which isn't a dash. "region": "A String", # [Output Only] Region where the health check resides. Not applicable to global health checks. "selfLink": "A String", # [Output Only] Server-defined URL for the resource. - "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. + "sourceRegions": [ # The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. "A String", ], "sslHealthCheck": { diff --git a/docs/dyn/compute_beta.regionInstanceTemplates.html b/docs/dyn/compute_beta.regionInstanceTemplates.html index f631cbd01d8..8c80ed386ef 100644 --- a/docs/dyn/compute_beta.regionInstanceTemplates.html +++ b/docs/dyn/compute_beta.regionInstanceTemplates.html @@ -408,7 +408,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -430,7 +430,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -722,7 +722,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -744,7 +744,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1170,7 +1170,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -1192,7 +1192,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. diff --git a/docs/dyn/compute_beta.regionInstances.html b/docs/dyn/compute_beta.regionInstances.html index 9bf62f27675..65db51c57fb 100644 --- a/docs/dyn/compute_beta.regionInstances.html +++ b/docs/dyn/compute_beta.regionInstances.html @@ -251,7 +251,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. @@ -273,7 +273,7 @@

Method Details

"name": "A String", # The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6. "natIP": "A String", # Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. "networkTier": "A String", # This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. - "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. + "publicPtrDomainName": "A String", # The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range. "securityPolicy": "A String", # [Output Only] The resource URL for the security policy associated with this access config. "setPublicPtr": True or False, # Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. "type": "A String", # The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6. diff --git a/docs/dyn/compute_beta.regionSecurityPolicies.html b/docs/dyn/compute_beta.regionSecurityPolicies.html index 1ecc4981e8c..815b50a3d3d 100644 --- a/docs/dyn/compute_beta.regionSecurityPolicies.html +++ b/docs/dyn/compute_beta.regionSecurityPolicies.html @@ -585,8 +585,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -767,6 +774,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1011,8 +1019,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1193,6 +1208,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1394,8 +1410,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1576,6 +1599,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1676,8 +1700,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1858,6 +1889,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { diff --git a/docs/dyn/compute_beta.securityPolicies.html b/docs/dyn/compute_beta.securityPolicies.html index 1d0f5ffb5ff..5e1d39a0ba3 100644 --- a/docs/dyn/compute_beta.securityPolicies.html +++ b/docs/dyn/compute_beta.securityPolicies.html @@ -480,8 +480,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -662,6 +669,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -911,8 +919,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1093,6 +1108,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1335,8 +1351,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1517,6 +1540,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -1717,8 +1741,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -1899,6 +1930,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { @@ -2039,8 +2071,15 @@

Method Details

{ "attachmentId": "A String", # The resource that the security policy is attached to. "displayName": "A String", # [Output Only] The display name of the security policy of the association. + "excludedFolders": [ # A list of folders to exclude from the security policy. + "A String", + ], + "excludedProjects": [ # A list of projects to exclude from the security policy. + "A String", + ], "name": "A String", # The name for an association. "securityPolicyId": "A String", # [Output Only] The security policy ID of the association. + "shortName": "A String", # [Output Only] The short name of the security policy of the association. }, ], "creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. @@ -2221,6 +2260,7 @@

Method Details

], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "shortName": "A String", # User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. "type": "A String", # The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time. "userDefinedFields": [ # Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" base: IPV4 offset: 6 size: 2 mask: "0x1fff" { diff --git a/docs/dyn/compute_beta.snapshotSettings.html b/docs/dyn/compute_beta.snapshotSettings.html index 27a2b4ea505..8aeedceb396 100644 --- a/docs/dyn/compute_beta.snapshotSettings.html +++ b/docs/dyn/compute_beta.snapshotSettings.html @@ -105,9 +105,9 @@

Method Details

{ "storageLocation": { # Policy of which storage location is going to be resolved, and additional data that particularizes how the policy is going to be carried out. - "locations": { # When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are GCS bucket locations. + "locations": { # When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are Cloud Storage bucket locations. Only one location can be specified. "a_key": { # A structure for specifying storage locations. - "name": "A String", # Name of the location. It should be one of the GCS buckets. + "name": "A String", # Name of the location. It should be one of the Cloud Storage buckets. Only one location can be specified. }, }, "policy": "A String", # The chosen location policy. @@ -126,9 +126,9 @@

Method Details

{ "storageLocation": { # Policy of which storage location is going to be resolved, and additional data that particularizes how the policy is going to be carried out. - "locations": { # When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are GCS bucket locations. + "locations": { # When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are Cloud Storage bucket locations. Only one location can be specified. "a_key": { # A structure for specifying storage locations. - "name": "A String", # Name of the location. It should be one of the GCS buckets. + "name": "A String", # Name of the location. It should be one of the Cloud Storage buckets. Only one location can be specified. }, }, "policy": "A String", # The chosen location policy. diff --git a/docs/dyn/config_v1.projects.locations.deployments.html b/docs/dyn/config_v1.projects.locations.deployments.html index 44212776b39..3c7aab9d3ab 100644 --- a/docs/dyn/config_v1.projects.locations.deployments.html +++ b/docs/dyn/config_v1.projects.locations.deployments.html @@ -143,7 +143,7 @@

Method Details

The object takes the form of: { # A Deployment is a group of resources and configs managed and provisioned by Infra Manager. - "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identify deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. "a_key": "A String", }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs and artifacts in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty. Default bucket format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` - The field cannot be updated, including changing its presence @@ -371,7 +371,7 @@

Method Details

An object of the form: { # A Deployment is a group of resources and configs managed and provisioned by Infra Manager. - "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identify deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. "a_key": "A String", }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs and artifacts in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty. Default bucket format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` - The field cannot be updated, including changing its presence @@ -532,7 +532,7 @@

Method Details

{ "deployments": [ # List of Deployments. { # A Deployment is a group of resources and configs managed and provisioned by Infra Manager. - "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identify deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. "a_key": "A String", }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs and artifacts in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty. Default bucket format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` - The field cannot be updated, including changing its presence @@ -669,7 +669,7 @@

Method Details

The object takes the form of: { # A Deployment is a group of resources and configs managed and provisioned by Infra Manager. - "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identify deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. "a_key": "A String", }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs and artifacts in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty. Default bucket format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` - The field cannot be updated, including changing its presence diff --git a/docs/dyn/config_v1.projects.locations.previews.html b/docs/dyn/config_v1.projects.locations.previews.html index 8ac2d5256c7..176959165a6 100644 --- a/docs/dyn/config_v1.projects.locations.previews.html +++ b/docs/dyn/config_v1.projects.locations.previews.html @@ -111,6 +111,9 @@

Method Details

The object takes the form of: { # A preview represents a set of actions Infra Manager would perform to move the resources towards the desired state as specified in the configuration. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy preview during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "a_key": "A String", + }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs, artifacts, and in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty Default Bucket Format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` If omitted and deployment resource ref provided has artifacts_gcs_bucket defined, that artifact bucket is used. "build": "A String", # Output only. Cloud Build instance UUID associated with this preview. "createTime": "A String", # Output only. Time the preview was created. @@ -167,6 +170,8 @@

Method Details

"resourceAddress": "A String", # Address of the resource associated with the error, e.g. `google_compute_network.vpc_network`. }, ], + "tfVersion": "A String", # Output only. The current Terraform version set on the preview. It is in the format of "Major.Minor.Patch", for example, "1.3.10". + "tfVersionConstraint": "A String", # Optional. The user-specified Terraform version constraint. Example: "=1.3.10". "workerPool": "A String", # Optional. The user-specified Worker Pool resource in which the Cloud Build job will execute. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} If this field is unspecified, the default Cloud Build worker pool will be used. If omitted and deployment resource ref provided has worker_pool defined, that worker pool is used. } @@ -280,6 +285,9 @@

Method Details

An object of the form: { # A preview represents a set of actions Infra Manager would perform to move the resources towards the desired state as specified in the configuration. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy preview during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "a_key": "A String", + }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs, artifacts, and in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty Default Bucket Format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` If omitted and deployment resource ref provided has artifacts_gcs_bucket defined, that artifact bucket is used. "build": "A String", # Output only. Cloud Build instance UUID associated with this preview. "createTime": "A String", # Output only. Time the preview was created. @@ -336,6 +344,8 @@

Method Details

"resourceAddress": "A String", # Address of the resource associated with the error, e.g. `google_compute_network.vpc_network`. }, ], + "tfVersion": "A String", # Output only. The current Terraform version set on the preview. It is in the format of "Major.Minor.Patch", for example, "1.3.10". + "tfVersionConstraint": "A String", # Optional. The user-specified Terraform version constraint. Example: "=1.3.10". "workerPool": "A String", # Optional. The user-specified Worker Pool resource in which the Cloud Build job will execute. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} If this field is unspecified, the default Cloud Build worker pool will be used. If omitted and deployment resource ref provided has worker_pool defined, that worker pool is used. } @@ -362,6 +372,9 @@

Method Details

"nextPageToken": "A String", # Token to be supplied to the next ListPreviews request via `page_token` to obtain the next set of results. "previews": [ # List of Previewss. { # A preview represents a set of actions Infra Manager would perform to move the resources towards the desired state as specified in the configuration. + "annotations": { # Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy preview during automation. See https://google.aip.dev/148#annotations for details on format and size limitations. + "a_key": "A String", + }, "artifactsGcsBucket": "A String", # Optional. User-defined location of Cloud Build logs, artifacts, and in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty Default Bucket Format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` If omitted and deployment resource ref provided has artifacts_gcs_bucket defined, that artifact bucket is used. "build": "A String", # Output only. Cloud Build instance UUID associated with this preview. "createTime": "A String", # Output only. Time the preview was created. @@ -418,6 +431,8 @@

Method Details

"resourceAddress": "A String", # Address of the resource associated with the error, e.g. `google_compute_network.vpc_network`. }, ], + "tfVersion": "A String", # Output only. The current Terraform version set on the preview. It is in the format of "Major.Minor.Patch", for example, "1.3.10". + "tfVersionConstraint": "A String", # Optional. The user-specified Terraform version constraint. Example: "=1.3.10". "workerPool": "A String", # Optional. The user-specified Worker Pool resource in which the Cloud Build job will execute. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} If this field is unspecified, the default Cloud Build worker pool will be used. If omitted and deployment resource ref provided has worker_pool defined, that worker pool is used. }, ], diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html index 27a175ddefd..c2535519cc4 100644 --- a/docs/dyn/dataflow_v1b3.projects.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.jobs.html @@ -163,7 +163,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -186,7 +186,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -354,7 +354,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -478,7 +478,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. }, ], "nextPageToken": "A String", # Set if there may be more results than fit in this response. @@ -519,7 +519,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -542,7 +542,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -710,7 +710,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -834,7 +834,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } location: string, The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. @@ -859,7 +859,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -882,7 +882,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -1050,7 +1050,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -1174,7 +1174,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } @@ -1206,7 +1206,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -1229,7 +1229,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -1397,7 +1397,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -1521,7 +1521,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } @@ -1611,7 +1611,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -1634,7 +1634,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -1802,7 +1802,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -1926,7 +1926,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. }, ], "nextPageToken": "A String", # Set if there may be more results than fit in this response. @@ -2008,7 +2008,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -2031,7 +2031,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -2199,7 +2199,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -2323,7 +2323,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } location: string, The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. @@ -2342,7 +2342,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -2365,7 +2365,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -2533,7 +2533,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -2657,7 +2657,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.workItems.html b/docs/dyn/dataflow_v1b3.projects.jobs.workItems.html index 2aa4943b216..e69819595be 100644 --- a/docs/dyn/dataflow_v1b3.projects.jobs.workItems.html +++ b/docs/dyn/dataflow_v1b3.projects.jobs.workItems.html @@ -426,6 +426,16 @@

Method Details

"commitStreamChunkSizeBytes": "A String", # Chunk size for commit streams from the harness to windmill. "getDataStreamChunkSizeBytes": "A String", # Chunk size for get data streams from the harness to windmill. "maxWorkItemCommitBytes": "A String", # Maximum size for work item commit supported windmill storage layer. + "operationalLimits": { # Operational limits imposed on streaming jobs by the backend. # Operational limits for the streaming job. Can be used by the worker to validate outputs sent to the backend. + "maxBagElementBytes": "A String", # The maximum size for an element in bag state. + "maxGlobalDataBytes": "A String", # The maximum size for an element in global data. + "maxKeyBytes": "A String", # The maximum size allowed for a key. + "maxProductionOutputBytes": "A String", # The maximum size for a single output element. + "maxSortedListElementBytes": "A String", # The maximum size for an element in sorted list state. + "maxSourceStateBytes": "A String", # The maximum size for a source state update. + "maxTagBytes": "A String", # The maximum size for a state tag. + "maxValueBytes": "A String", # The maximum size for a value state field. + }, "streamingComputationConfigs": [ # Set of computation configuration information. { # Configuration information for a single streaming computation. "computationId": "A String", # Unique identifier for this computation. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html index c72c7d5932b..be9b0459f0c 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html @@ -238,7 +238,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -261,7 +261,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -429,7 +429,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -553,7 +553,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. }, } diff --git a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html index a58e0b55631..af680632230 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html @@ -151,7 +151,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -174,7 +174,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -342,7 +342,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -466,7 +466,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } replaceJobId: string, Deprecated. This field is now in the Job message. @@ -490,7 +490,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -513,7 +513,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -681,7 +681,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -805,7 +805,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } @@ -837,7 +837,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -860,7 +860,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -1028,7 +1028,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -1152,7 +1152,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } @@ -1350,7 +1350,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -1373,7 +1373,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -1541,7 +1541,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -1665,7 +1665,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. }, ], "nextPageToken": "A String", # Set if there may be more results than fit in this response. @@ -1749,7 +1749,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -1772,7 +1772,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -1940,7 +1940,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -2064,7 +2064,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } updateMask: string, The list of fields to update relative to Job. If empty, only RequestedJobState will be considered for update. If the FieldMask is not empty and RequestedJobState is none/empty, The fields specified in the update mask will be the only ones considered for update. If both RequestedJobState and update_mask are specified, an error will be returned as we cannot update both state and mask. @@ -2082,7 +2082,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -2105,7 +2105,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -2273,7 +2273,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -2397,7 +2397,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } diff --git a/docs/dyn/dataflow_v1b3.projects.locations.jobs.workItems.html b/docs/dyn/dataflow_v1b3.projects.locations.jobs.workItems.html index fbc863419a8..512c7fdb2b9 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.jobs.workItems.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.jobs.workItems.html @@ -427,6 +427,16 @@

Method Details

"commitStreamChunkSizeBytes": "A String", # Chunk size for commit streams from the harness to windmill. "getDataStreamChunkSizeBytes": "A String", # Chunk size for get data streams from the harness to windmill. "maxWorkItemCommitBytes": "A String", # Maximum size for work item commit supported windmill storage layer. + "operationalLimits": { # Operational limits imposed on streaming jobs by the backend. # Operational limits for the streaming job. Can be used by the worker to validate outputs sent to the backend. + "maxBagElementBytes": "A String", # The maximum size for an element in bag state. + "maxGlobalDataBytes": "A String", # The maximum size for an element in global data. + "maxKeyBytes": "A String", # The maximum size allowed for a key. + "maxProductionOutputBytes": "A String", # The maximum size for a single output element. + "maxSortedListElementBytes": "A String", # The maximum size for an element in sorted list state. + "maxSourceStateBytes": "A String", # The maximum size for a source state update. + "maxTagBytes": "A String", # The maximum size for a state tag. + "maxValueBytes": "A String", # The maximum size for a value state field. + }, "streamingComputationConfigs": [ # Set of computation configuration information. { # Configuration information for a single streaming computation. "computationId": "A String", # Unique identifier for this computation. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.templates.html b/docs/dyn/dataflow_v1b3.projects.locations.templates.html index ed3f4223cf0..a0f54616cd0 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.templates.html @@ -149,7 +149,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -172,7 +172,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -340,7 +340,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -464,7 +464,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } @@ -637,7 +637,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -660,7 +660,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -828,7 +828,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -952,7 +952,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. }, } diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html index 3216530b8a4..a93864d3361 100644 --- a/docs/dyn/dataflow_v1b3.projects.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.templates.html @@ -148,7 +148,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -171,7 +171,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -339,7 +339,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -463,7 +463,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. } @@ -636,7 +636,7 @@

Method Details

"createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot. "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field might be mutated by the Dataflow service; callers cannot mutate it. "currentStateTime": "A String", # The timestamp associated with the current state. - "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job. + "environment": { # Describes the environment in which a Dataflow Job runs. # Optional. The environment for the job. "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset} "debugOptions": { # Describes any options that have an effect on the debugging of pipelines. # Any debugging options to be supplied to the job. @@ -659,7 +659,7 @@

Method Details

}, "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account. "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY - "serviceOptions": [ # The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). + "serviceOptions": [ # Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). "A String", ], "shuffleMode": "A String", # Output only. The shuffle mode used for the job. @@ -827,7 +827,7 @@

Method Details

"a_key": "A String", }, "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job. - "name": "A String", # The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` + "name": "A String", # Optional. The user-specified Dataflow job name. Only one active job with a given name can exist in a project within one region at any given time. Jobs in different regions can have the same name. If a caller attempts to create a job with the same name as an active job that already exists, the attempt returns the existing job. The name must match the regular expression `[a-z]([-a-z0-9]{0,1022}[a-z0-9])?` "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL. "displayData": [ # Pipeline level display data. { # Data provided with a pipeline or transform to provide descriptive info. @@ -951,7 +951,7 @@

Method Details

"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job. "a_key": "A String", }, - "type": "A String", # The type of Dataflow job. + "type": "A String", # Optional. The type of Dataflow job. }, } diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html index cc1d9ee5f2a..52bb3f143e7 100644 --- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html +++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.compilationResults.html @@ -138,6 +138,9 @@

Method Details

"stack": "A String", # Output only. The error's full stack trace. }, ], + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "dataformCoreVersion": "A String", # Output only. The version of `@dataform/core` that was used for compilation. "gitCommitish": "A String", # Immutable. Git commit/tag/branch name at which the repository should be compiled. Must exist in the remote repository. Examples: - a commit SHA: `12ade345` - a tag: `tag1` - a branch name: `branch1` "name": "A String", # Output only. The compilation result's name. @@ -182,6 +185,9 @@

Method Details

"stack": "A String", # Output only. The error's full stack trace. }, ], + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "dataformCoreVersion": "A String", # Output only. The version of `@dataform/core` that was used for compilation. "gitCommitish": "A String", # Immutable. Git commit/tag/branch name at which the repository should be compiled. Must exist in the remote repository. Examples: - a commit SHA: `12ade345` - a tag: `tag1` - a branch name: `branch1` "name": "A String", # Output only. The compilation result's name. @@ -233,6 +239,9 @@

Method Details

"stack": "A String", # Output only. The error's full stack trace. }, ], + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "dataformCoreVersion": "A String", # Output only. The version of `@dataform/core` that was used for compilation. "gitCommitish": "A String", # Immutable. Git commit/tag/branch name at which the repository should be compiled. Must exist in the remote repository. Examples: - a commit SHA: `12ade345` - a tag: `tag1` - a branch name: `branch1` "name": "A String", # Output only. The compilation result's name. @@ -290,6 +299,9 @@

Method Details

"stack": "A String", # Output only. The error's full stack trace. }, ], + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "dataformCoreVersion": "A String", # Output only. The version of `@dataform/core` that was used for compilation. "gitCommitish": "A String", # Immutable. Git commit/tag/branch name at which the repository should be compiled. Must exist in the remote repository. Examples: - a commit SHA: `12ade345` - a tag: `tag1` - a branch name: `branch1` "name": "A String", # Output only. The compilation result's name. diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.html index 8c624c8a8c1..dcbcb019845 100644 --- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.html +++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.html @@ -196,7 +196,8 @@

Method Details

Returns: An object of the form: - { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } + { # `CommitRepositoryChanges` response message. + "commitSha": "A String", # The commit SHA of the current commit. } @@ -230,6 +231,9 @@

Method Details

{ # Represents a Dataform Git repository. "createTime": "A String", # Output only. The timestamp of when the repository was created. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "displayName": "A String", # Optional. The repository's user-friendly name. "gitRemoteSettings": { # Controls Git remote configuration for a repository. # Optional. If set, configures this repository to be linked to a Git remote. "authenticationTokenSecretVersion": "A String", # Optional. The name of the Secret Manager secret version to use as an authentication token for Git operations. Must be in the format `projects/*/secrets/*/versions/*`. @@ -241,6 +245,7 @@

Method Details

"tokenStatus": "A String", # Output only. Deprecated: The field does not contain any token status information. Instead use https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories/computeAccessTokenStatus "url": "A String", # Required. The Git remote's URL. }, + "kmsKeyName": "A String", # Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` "labels": { # Optional. Repository user labels. "a_key": "A String", }, @@ -266,6 +271,9 @@

Method Details

{ # Represents a Dataform Git repository. "createTime": "A String", # Output only. The timestamp of when the repository was created. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "displayName": "A String", # Optional. The repository's user-friendly name. "gitRemoteSettings": { # Controls Git remote configuration for a repository. # Optional. If set, configures this repository to be linked to a Git remote. "authenticationTokenSecretVersion": "A String", # Optional. The name of the Secret Manager secret version to use as an authentication token for Git operations. Must be in the format `projects/*/secrets/*/versions/*`. @@ -277,6 +285,7 @@

Method Details

"tokenStatus": "A String", # Output only. Deprecated: The field does not contain any token status information. Instead use https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories/computeAccessTokenStatus "url": "A String", # Required. The Git remote's URL. }, + "kmsKeyName": "A String", # Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` "labels": { # Optional. Repository user labels. "a_key": "A String", }, @@ -394,6 +403,9 @@

Method Details

{ # Represents a Dataform Git repository. "createTime": "A String", # Output only. The timestamp of when the repository was created. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "displayName": "A String", # Optional. The repository's user-friendly name. "gitRemoteSettings": { # Controls Git remote configuration for a repository. # Optional. If set, configures this repository to be linked to a Git remote. "authenticationTokenSecretVersion": "A String", # Optional. The name of the Secret Manager secret version to use as an authentication token for Git operations. Must be in the format `projects/*/secrets/*/versions/*`. @@ -405,6 +417,7 @@

Method Details

"tokenStatus": "A String", # Output only. Deprecated: The field does not contain any token status information. Instead use https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories/computeAccessTokenStatus "url": "A String", # Required. The Git remote's URL. }, + "kmsKeyName": "A String", # Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` "labels": { # Optional. Repository user labels. "a_key": "A String", }, @@ -478,6 +491,9 @@

Method Details

"repositories": [ # List of repositories. { # Represents a Dataform Git repository. "createTime": "A String", # Output only. The timestamp of when the repository was created. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "displayName": "A String", # Optional. The repository's user-friendly name. "gitRemoteSettings": { # Controls Git remote configuration for a repository. # Optional. If set, configures this repository to be linked to a Git remote. "authenticationTokenSecretVersion": "A String", # Optional. The name of the Secret Manager secret version to use as an authentication token for Git operations. Must be in the format `projects/*/secrets/*/versions/*`. @@ -489,6 +505,7 @@

Method Details

"tokenStatus": "A String", # Output only. Deprecated: The field does not contain any token status information. Instead use https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories/computeAccessTokenStatus "url": "A String", # Required. The Git remote's URL. }, + "kmsKeyName": "A String", # Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` "labels": { # Optional. Repository user labels. "a_key": "A String", }, @@ -534,6 +551,9 @@

Method Details

{ # Represents a Dataform Git repository. "createTime": "A String", # Output only. The timestamp of when the repository was created. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "displayName": "A String", # Optional. The repository's user-friendly name. "gitRemoteSettings": { # Controls Git remote configuration for a repository. # Optional. If set, configures this repository to be linked to a Git remote. "authenticationTokenSecretVersion": "A String", # Optional. The name of the Secret Manager secret version to use as an authentication token for Git operations. Must be in the format `projects/*/secrets/*/versions/*`. @@ -545,6 +565,7 @@

Method Details

"tokenStatus": "A String", # Output only. Deprecated: The field does not contain any token status information. Instead use https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories/computeAccessTokenStatus "url": "A String", # Required. The Git remote's URL. }, + "kmsKeyName": "A String", # Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` "labels": { # Optional. Repository user labels. "a_key": "A String", }, @@ -570,6 +591,9 @@

Method Details

{ # Represents a Dataform Git repository. "createTime": "A String", # Output only. The timestamp of when the repository was created. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Repository is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "displayName": "A String", # Optional. The repository's user-friendly name. "gitRemoteSettings": { # Controls Git remote configuration for a repository. # Optional. If set, configures this repository to be linked to a Git remote. "authenticationTokenSecretVersion": "A String", # Optional. The name of the Secret Manager secret version to use as an authentication token for Git operations. Must be in the format `projects/*/secrets/*/versions/*`. @@ -581,6 +605,7 @@

Method Details

"tokenStatus": "A String", # Output only. Deprecated: The field does not contain any token status information. Instead use https://cloud.google.com/dataform/reference/rest/v1beta1/projects.locations.repositories/computeAccessTokenStatus "url": "A String", # Required. The Git remote's URL. }, + "kmsKeyName": "A String", # Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. It is not possible to add or update the encryption key after the repository is created. Example: `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` "labels": { # Optional. Repository user labels. "a_key": "A String", }, diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.workflowInvocations.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.workflowInvocations.html index 93560505739..66f0f0ef4df 100644 --- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.workflowInvocations.html +++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.workflowInvocations.html @@ -142,6 +142,9 @@

Method Details

{ # Represents a single invocation of a compilation result. "compilationResult": "A String", # Immutable. The name of the compilation result to use for this invocation. Must be in the format `projects/*/locations/*/repositories/*/compilationResults/*`. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "invocationConfig": { # Includes various configuration options for a workflow invocation. If both `included_targets` and `included_tags` are unset, all actions will be included. # Immutable. If left unset, a default InvocationConfig will be used. "fullyRefreshIncrementalTablesEnabled": True or False, # Optional. When set to true, any incremental tables will be fully refreshed. "includedTags": [ # Optional. The set of tags to include. @@ -178,6 +181,9 @@

Method Details

{ # Represents a single invocation of a compilation result. "compilationResult": "A String", # Immutable. The name of the compilation result to use for this invocation. Must be in the format `projects/*/locations/*/repositories/*/compilationResults/*`. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "invocationConfig": { # Includes various configuration options for a workflow invocation. If both `included_targets` and `included_tags` are unset, all actions will be included. # Immutable. If left unset, a default InvocationConfig will be used. "fullyRefreshIncrementalTablesEnabled": True or False, # Optional. When set to true, any incremental tables will be fully refreshed. "includedTags": [ # Optional. The set of tags to include. @@ -239,6 +245,9 @@

Method Details

{ # Represents a single invocation of a compilation result. "compilationResult": "A String", # Immutable. The name of the compilation result to use for this invocation. Must be in the format `projects/*/locations/*/repositories/*/compilationResults/*`. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "invocationConfig": { # Includes various configuration options for a workflow invocation. If both `included_targets` and `included_tags` are unset, all actions will be included. # Immutable. If left unset, a default InvocationConfig will be used. "fullyRefreshIncrementalTablesEnabled": True or False, # Optional. When set to true, any incremental tables will be fully refreshed. "includedTags": [ # Optional. The set of tags to include. @@ -292,6 +301,9 @@

Method Details

"workflowInvocations": [ # List of workflow invocations. { # Represents a single invocation of a compilation result. "compilationResult": "A String", # Immutable. The name of the compilation result to use for this invocation. Must be in the format `projects/*/locations/*/repositories/*/compilationResults/*`. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. Only set if the repository has a KMS Key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "invocationConfig": { # Includes various configuration options for a workflow invocation. If both `included_targets` and `included_tags` are unset, all actions will be included. # Immutable. If left unset, a default InvocationConfig will be used. "fullyRefreshIncrementalTablesEnabled": True or False, # Optional. When set to true, any incremental tables will be fully refreshed. "includedTags": [ # Optional. The set of tags to include. diff --git a/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html b/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html index 269f2e5f805..82972dcdced 100644 --- a/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html +++ b/docs/dyn/dataform_v1beta1.projects.locations.repositories.workspaces.html @@ -206,6 +206,9 @@

Method Details

The object takes the form of: { # Represents a Dataform Git workspace. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Workspace is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "name": "A String", # Identifier. The workspace's name. } @@ -219,6 +222,9 @@

Method Details

An object of the form: { # Represents a Dataform Git workspace. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Workspace is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "name": "A String", # Identifier. The workspace's name. } @@ -321,6 +327,9 @@

Method Details

An object of the form: { # Represents a Dataform Git workspace. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Workspace is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "name": "A String", # Identifier. The workspace's name. } @@ -409,6 +418,9 @@

Method Details

], "workspaces": [ # List of workspaces. { # Represents a Dataform Git workspace. + "dataEncryptionState": { # Describes encryption state of a resource. # Output only. A data encryption state of a Git repository if this Workspace is protected by a KMS key. + "kmsKeyVersionName": "A String", # The KMS key version name with which data of a resource is encrypted. + }, "name": "A String", # Identifier. The workspace's name. }, ], diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html index 4a155283e2c..ac15b767e6f 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html @@ -386,7 +386,7 @@

Method Details

Args: parent: string, Required. The parent branch resource name, such as `projects/{project}/locations/{location}/collections/{collection_id}`. If the caller does not have permission to list DataStores under this location, regardless of whether or not this data store exists, a PERMISSION_DENIED error is returned. (required) - filter: string, Filter by solution type. For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' + filter: string, Filter by solution type . For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' pageSize: integer, Maximum number of DataStores to return. If unspecified, defaults to 10. The maximum allowed value is 50. Values above 50 will be coerced to 50. If this field is negative, an INVALID_ARGUMENT is returned. pageToken: string, A page token ListDataStoresResponse.next_page_token, received from a previous DataStoreService.ListDataStores call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to DataStoreService.ListDataStores must match the call that provided the page token. Otherwise, an INVALID_ARGUMENT error is returned. x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html index 44c8bd98132..5f016c9f872 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.servingConfigs.html @@ -319,7 +319,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.html index ba0bfa32208..7b4ed01f0b2 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.html @@ -264,6 +264,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html index 2688f518f27..22cd1d37c7a 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html @@ -128,6 +128,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -193,6 +194,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -290,6 +292,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -329,6 +332,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -375,6 +379,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html index 77a1cd367e3..a63ed21f8fb 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.servingConfigs.html @@ -319,7 +319,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html index c8291797d49..997b192db37 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html @@ -386,7 +386,7 @@

Method Details

Args: parent: string, Required. The parent branch resource name, such as `projects/{project}/locations/{location}/collections/{collection_id}`. If the caller does not have permission to list DataStores under this location, regardless of whether or not this data store exists, a PERMISSION_DENIED error is returned. (required) - filter: string, Filter by solution type. For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' + filter: string, Filter by solution type . For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' pageSize: integer, Maximum number of DataStores to return. If unspecified, defaults to 10. The maximum allowed value is 50. Values above 50 will be coerced to 50. If this field is negative, an INVALID_ARGUMENT is returned. pageToken: string, A page token ListDataStoresResponse.next_page_token, received from a previous DataStoreService.ListDataStores call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to DataStoreService.ListDataStores must match the call that provided the page token. Otherwise, an INVALID_ARGUMENT error is returned. x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html index 3e3dbff0cc3..d0caf7c823f 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.servingConfigs.html @@ -319,7 +319,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.siteSearchEngine.targetSites.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.siteSearchEngine.targetSites.html index ad25408c724..b2597810314 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.siteSearchEngine.targetSites.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.siteSearchEngine.targetSites.html @@ -123,6 +123,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -188,6 +189,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -285,6 +287,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -324,6 +327,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -370,6 +374,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1alpha.html b/docs/dyn/discoveryengine_v1alpha.html index 92bf4fe48f6..bd5353c9646 100644 --- a/docs/dyn/discoveryengine_v1alpha.html +++ b/docs/dyn/discoveryengine_v1alpha.html @@ -74,11 +74,6 @@

Discovery Engine API

Instance Methods

-

- locations() -

-

Returns the locations Resource.

-

projects()

diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html index 3d0ddca5ead..4378ecc406a 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html @@ -277,7 +277,8 @@

Method Details

Allowed values PROCESSED_DOCUMENT_TYPE_UNSPECIFIED - Default value. PARSED_DOCUMENT - Available for all data store parsing configs. - CHUNKED_DOCUMENT - Only available if ChunkingConfig is enabeld on the data store. + CHUNKED_DOCUMENT - Only available if ChunkingConfig is enabled on the data store. + PNG_CONVERTED_DOCUMENT - Returns the converted PNG Image bytes if available. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html index 13c37761c5b..8807442302d 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html @@ -209,7 +209,6 @@

Method Details

"userPseudoId": "A String", # A unique identifier for tracking users. }, "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the summary response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) - "name": "A String", # Required. The resource name of the Conversation to get. Format: `projects/{project_number}/locations/{location_id}/collections/{collection}/dataStores/{data_store_id}/conversations/{conversation_id}`. Use `projects/{project_number}/locations/{location_id}/collections/{collection}/dataStores/{data_store_id}/conversations/-` to activate auto session mode, which automatically creates a new conversation inside a ConverseConversation session. "query": { # Defines text input. # Required. Current user input. "context": { # Defines context of the conversation # Conversation context of the input. "activeDocument": "A String", # The current active document the user opened. It contains the document resource reference. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html new file mode 100644 index 00000000000..36ba18b1a73 --- /dev/null +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html @@ -0,0 +1,117 @@ + + + +

Discovery Engine API . projects . locations . collections . dataStores . customModels

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ list(dataStore, x__xgafv=None)

+

Gets a list of all the custom models.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ list(dataStore, x__xgafv=None) +
Gets a list of all the custom models.
+
+Args:
+  dataStore: string, Required. The resource name of the parent Data Store, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store`. This field is used to identify the data store where to fetch the models from. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchTuningService.ListCustomModels method.
+  "models": [ # List of custom tuning models.
+    { # Metadata that describes a custom tuned model.
+      "createTime": "A String", # Timestamp the Model was created at.
+      "displayName": "A String", # The display name of the model.
+      "modelState": "A String", # The state that the model is in (e.g.`TRAINING` or `TRAINING_FAILED`).
+      "modelVersion": "A String",
+      "name": "A String", # Required. The fully qualified resource name of the model. Format: `projects/{project_number}/locations/{location}/collections/{collection}/dataStores/{data_store}/customTuningModels/{custom_tuning_model}` model must be an alpha-numerical string with limit of 40 characters.
+      "trainingStartTime": "A String", # Timestamp the model training was initiated.
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html index 8d5fa971ae6..a5edbe330f6 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html @@ -84,6 +84,11 @@

Instance Methods

Returns the conversations Resource.

+

+ customModels() +

+

Returns the customModels Resource.

+

models()

@@ -517,7 +522,7 @@

Method Details

Args: parent: string, Required. The parent branch resource name, such as `projects/{project}/locations/{location}/collections/{collection_id}`. If the caller does not have permission to list DataStores under this location, regardless of whether or not this data store exists, a PERMISSION_DENIED error is returned. (required) - filter: string, Filter by solution type. For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' + filter: string, Filter by solution type . For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' pageSize: integer, Maximum number of DataStores to return. If unspecified, defaults to 10. The maximum allowed value is 50. Values above 50 will be coerced to 50. If this field is negative, an INVALID_ARGUMENT is returned. pageToken: string, A page token ListDataStoresResponse.next_page_token, received from a previous DataStoreService.ListDataStores call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to DataStoreService.ListDataStores must match the call that provided the page token. Otherwise, an INVALID_ARGUMENT error is returned. x__xgafv: string, V1 error format. @@ -817,6 +822,7 @@

Method Details

"testDataPath": "A String", # Cloud Storage test data. Same format as train_data_path. If not provided, a random 80/20 train/test split will be performed on train_data_path. "trainDataPath": "A String", # Cloud Storage training data path whose format should be `gs:///`. The file should be in tsv format. Each line should have the doc_id and query_id and score (number). For search-tuning model, it should have the query-id corpus-id score as tsv file header. The score should be a number in `[0, inf+)`. The larger the number is, the more relevant the pair is. Example: * `query-id\tcorpus-id\tscore` * `query1\tdoc1\t1` }, + "modelId": "A String", # If not provided, a UUID will be generated. "modelType": "A String", # Model to be trained. Supported values are: * **search-tuning**: Fine tuning the search system based on data provided. } diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html index 847f8d652f4..d444890bf34 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html @@ -1027,7 +1027,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 @@ -1040,7 +1040,6 @@

Method Details

}, "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joint by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: `relevance_score`: pre-defined keywords, used for measure relevance between query and document. `embedding_field_path`: the document embedding field used with query embedding vector. `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search. - "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect. "mode": "A String", # The mode under which spell correction should take effect to replace the original search query. Default to Mode.AUTO. }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.html index 6592c4f6579..92717248ea3 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.html @@ -264,6 +264,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html index 0dcbee7461e..1245dc68b56 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html @@ -128,6 +128,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -193,6 +194,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -290,6 +292,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -329,6 +332,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -375,6 +379,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html index 19e616127ea..baafcd199a7 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html @@ -209,7 +209,6 @@

Method Details

"userPseudoId": "A String", # A unique identifier for tracking users. }, "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the summary response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) - "name": "A String", # Required. The resource name of the Conversation to get. Format: `projects/{project_number}/locations/{location_id}/collections/{collection}/dataStores/{data_store_id}/conversations/{conversation_id}`. Use `projects/{project_number}/locations/{location_id}/collections/{collection}/dataStores/{data_store_id}/conversations/-` to activate auto session mode, which automatically creates a new conversation inside a ConverseConversation session. "query": { # Defines text input. # Required. Current user input. "context": { # Defines context of the conversation # Conversation context of the input. "activeDocument": "A String", # The current active document the user opened. It contains the document resource reference. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html index 1e7bb952e50..5bed845e857 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html @@ -1027,7 +1027,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 @@ -1040,7 +1040,6 @@

Method Details

}, "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joint by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: `relevance_score`: pre-defined keywords, used for measure relevance between query and document. `embedding_field_path`: the document embedding field used with query embedding vector. `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search. - "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect. "mode": "A String", # The mode under which spell correction should take effect to replace the original search query. Default to Mode.AUTO. }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html index 67f21605a1a..076655f524a 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html @@ -277,7 +277,8 @@

Method Details

Allowed values PROCESSED_DOCUMENT_TYPE_UNSPECIFIED - Default value. PARSED_DOCUMENT - Available for all data store parsing configs. - CHUNKED_DOCUMENT - Only available if ChunkingConfig is enabeld on the data store. + CHUNKED_DOCUMENT - Only available if ChunkingConfig is enabled on the data store. + PNG_CONVERTED_DOCUMENT - Returns the converted PNG Image bytes if available. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html index 4167bc5e16e..050be5ceba0 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html @@ -209,7 +209,6 @@

Method Details

"userPseudoId": "A String", # A unique identifier for tracking users. }, "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the summary response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customer might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) - "name": "A String", # Required. The resource name of the Conversation to get. Format: `projects/{project_number}/locations/{location_id}/collections/{collection}/dataStores/{data_store_id}/conversations/{conversation_id}`. Use `projects/{project_number}/locations/{location_id}/collections/{collection}/dataStores/{data_store_id}/conversations/-` to activate auto session mode, which automatically creates a new conversation inside a ConverseConversation session. "query": { # Defines text input. # Required. Current user input. "context": { # Defines context of the conversation # Conversation context of the input. "activeDocument": "A String", # The current active document the user opened. It contains the document resource reference. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html index 730fcf01442..8660b130ee8 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html @@ -514,7 +514,7 @@

Method Details

Args: parent: string, Required. The parent branch resource name, such as `projects/{project}/locations/{location}/collections/{collection_id}`. If the caller does not have permission to list DataStores under this location, regardless of whether or not this data store exists, a PERMISSION_DENIED error is returned. (required) - filter: string, Filter by solution type. For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' + filter: string, Filter by solution type . For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' pageSize: integer, Maximum number of DataStores to return. If unspecified, defaults to 10. The maximum allowed value is 50. Values above 50 will be coerced to 50. If this field is negative, an INVALID_ARGUMENT is returned. pageToken: string, A page token ListDataStoresResponse.next_page_token, received from a previous DataStoreService.ListDataStores call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to DataStoreService.ListDataStores must match the call that provided the page token. Otherwise, an INVALID_ARGUMENT error is returned. x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html index b90e3f5e430..a906fff1fb7 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html @@ -1027,7 +1027,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 @@ -1040,7 +1040,6 @@

Method Details

}, "rankingExpression": "A String", # The ranking expression controls the customized ranking on retrieval documents. This overrides ServingConfig.ranking_expression. The ranking expression is a single function or multiple functions that are joint by "+". * ranking_expression = function, { " + ", function }; Supported functions: * double * relevance_score * double * dotProduct(embedding_field_path) Function variables: `relevance_score`: pre-defined keywords, used for measure relevance between query and document. `embedding_field_path`: the document embedding field used with query embedding vector. `dotProduct`: embedding function between embedding_field_path and query embedding vector. Example ranking expression: If document has an embedding field doc_embedding, the ranking expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. "safeSearch": True or False, # Whether to turn on safe search. This is only supported for website search. - "servingConfig": "A String", # Required. The resource name of the Search serving config, such as `projects/*/locations/global/collections/default_collection/engines/*/servingConfigs/default_serving_config`, or `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/servingConfigs/default_serving_config`. This field is used to identify the serving configuration name, set of models used to make the search. "spellCorrectionSpec": { # The specification for query spell correction. # The spell correction specification that specifies the mode under which spell correction takes effect. "mode": "A String", # The mode under which spell correction should take effect to replace the original search query. Default to Mode.AUTO. }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.siteSearchEngine.targetSites.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.siteSearchEngine.targetSites.html index 20c4da7a4f6..793fd564dd2 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.siteSearchEngine.targetSites.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.siteSearchEngine.targetSites.html @@ -123,6 +123,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -188,6 +189,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -285,6 +287,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -324,6 +327,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -370,6 +374,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.groundingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.groundingConfigs.html index 7a97e3ab16e..c900e5e3af1 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.groundingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.groundingConfigs.html @@ -91,7 +91,7 @@

Method Details

The object takes the form of: { # Request message for GroundedGenerationService.CheckGrounding method. - "answerCandidate": "A String", # Answer candidate to check. + "answerCandidate": "A String", # Answer candidate to check. Can have a maximum length of 1024 characters. "facts": [ # List of facts for the grounding check. We support up to 200 facts. { # Grounding Fact. "attributes": { # Attributes associated with the fact. Common attributes include `source` (indicating where the fact was sourced from), `author` (indicating the author of the fact), and so on. @@ -103,6 +103,9 @@

Method Details

"groundingSpec": { # Specification for the grounding check. # Configuration of the grounding check. "citationThreshold": 3.14, # The threshold (in [0,1]) used for determining whether a fact must be cited for a claim in the answer candidate. Choosing a higher threshold will lead to fewer but very strong citations, while choosing a lower threshold may lead to more but somewhat weaker citations. If unset, the threshold will default to 0.6. }, + "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details. + "a_key": "A String", + }, } x__xgafv: string, V1 error format. @@ -117,6 +120,7 @@

Method Details

"citedChunks": [ # List of facts cited across all claims in the answer candidate. These are derived from the facts supplied in the request. { # Fact Chunk. "chunkText": "A String", # Text content of the fact chunk. Can be at most 10K characters long. + "index": 42, # The index of this chunk. Currently, only used for the streaming mode. "source": "A String", # Source from which this fact chunk was retrieved. If it was retrieved from the GroundingFacts provided in the request then this field will contain the index of the specific fact from which this chunk was retrieved. "sourceMetadata": { # More fine-grained information for the source reference. "a_key": "A String", @@ -130,6 +134,7 @@

Method Details

], "claimText": "A String", # Text for the claim in the answer candidate. Always provided regardless of whether citations or anti-citations are found. "endPos": 42, # Position indicating the end of the claim in the answer candidate, exclusive. + "groundingCheckRequired": True or False, # Indicates that this claim required grounding check. When the system decided this claim doesn't require attribution/grounding check, this field will be set to false. In that case, no grounding check was done for the claim and therefore citation_indices, and anti_citation_indices should not be returned. "startPos": 42, # Position indicating the start of the claim in the answer candidate, measured in bytes. }, ], diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html new file mode 100644 index 00000000000..aeb15e1d133 --- /dev/null +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html @@ -0,0 +1,117 @@ + + + +

Discovery Engine API . projects . locations . collections . dataStores . customModels

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ list(dataStore, x__xgafv=None)

+

Gets a list of all the custom models.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ list(dataStore, x__xgafv=None) +
Gets a list of all the custom models.
+
+Args:
+  dataStore: string, Required. The resource name of the parent Data Store, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store`. This field is used to identify the data store where to fetch the models from. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for SearchTuningService.ListCustomModels method.
+  "models": [ # List of custom tuning models.
+    { # Metadata that describes a custom tuned model.
+      "createTime": "A String", # Timestamp the Model was created at.
+      "displayName": "A String", # The display name of the model.
+      "modelState": "A String", # The state that the model is in (e.g.`TRAINING` or `TRAINING_FAILED`).
+      "modelVersion": "A String",
+      "name": "A String", # Required. The fully qualified resource name of the model. Format: `projects/{project_number}/locations/{location}/collections/{collection}/dataStores/{data_store}/customTuningModels/{custom_tuning_model}` model must be an alpha-numerical string with limit of 40 characters.
+      "trainingStartTime": "A String", # Timestamp the model training was initiated.
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html index 511990bb332..b5cb670ef33 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html @@ -84,6 +84,11 @@

Instance Methods

Returns the conversations Resource.

+

+ customModels() +

+

Returns the customModels Resource.

+

models()

@@ -394,7 +399,7 @@

Method Details

Args: parent: string, Required. The parent branch resource name, such as `projects/{project}/locations/{location}/collections/{collection_id}`. If the caller does not have permission to list DataStores under this location, regardless of whether or not this data store exists, a PERMISSION_DENIED error is returned. (required) - filter: string, Filter by solution type. For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' + filter: string, Filter by solution type . For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' pageSize: integer, Maximum number of DataStores to return. If unspecified, defaults to 10. The maximum allowed value is 50. Values above 50 will be coerced to 50. If this field is negative, an INVALID_ARGUMENT is returned. pageToken: string, A page token ListDataStoresResponse.next_page_token, received from a previous DataStoreService.ListDataStores call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to DataStoreService.ListDataStores must match the call that provided the page token. Otherwise, an INVALID_ARGUMENT error is returned. x__xgafv: string, V1 error format. @@ -595,6 +600,7 @@

Method Details

"testDataPath": "A String", # Cloud Storage test data. Same format as train_data_path. If not provided, a random 80/20 train/test split will be performed on train_data_path. "trainDataPath": "A String", # Cloud Storage training data path whose format should be `gs:///`. The file should be in tsv format. Each line should have the doc_id and query_id and score (number). For search-tuning model, it should have the query-id corpus-id score as tsv file header. The score should be a number in `[0, inf+)`. The larger the number is, the more relevant the pair is. Example: * `query-id\tcorpus-id\tscore` * `query1\tdoc1\t1` }, + "modelId": "A String", # If not provided, a UUID will be generated. "modelType": "A String", # Model to be trained. Supported values are: * **search-tuning**: Fine tuning the search system based on data provided. } diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html index cd7121993c3..72fcdd8b925 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html @@ -951,7 +951,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.html index 411118809a5..0082c890d86 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.html @@ -264,6 +264,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html index 3df7790e90a..929fdd94c58 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.siteSearchEngine.targetSites.html @@ -128,6 +128,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -193,6 +194,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -290,6 +292,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -329,6 +332,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -375,6 +379,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html index fdb4ece233d..070f2b5dbd3 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html @@ -951,7 +951,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html index 8ad9f0a2445..d4daaa4e433 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html @@ -391,7 +391,7 @@

Method Details

Args: parent: string, Required. The parent branch resource name, such as `projects/{project}/locations/{location}/collections/{collection_id}`. If the caller does not have permission to list DataStores under this location, regardless of whether or not this data store exists, a PERMISSION_DENIED error is returned. (required) - filter: string, Filter by solution type. For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' + filter: string, Filter by solution type . For example: filter = 'solution_type:SOLUTION_TYPE_SEARCH' pageSize: integer, Maximum number of DataStores to return. If unspecified, defaults to 10. The maximum allowed value is 50. Values above 50 will be coerced to 50. If this field is negative, an INVALID_ARGUMENT is returned. pageToken: string, A page token ListDataStoresResponse.next_page_token, received from a previous DataStoreService.ListDataStores call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to DataStoreService.ListDataStores must match the call that provided the page token. Otherwise, an INVALID_ARGUMENT error is returned. x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html index 6f931e88cdb..a292d90d8ff 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html @@ -951,7 +951,7 @@

Method Details

"imageBytes": "A String", # Base64 encoded image bytes. Supported image formats: JPEG, PNG, and BMP. }, "offset": 42, # A 0-indexed integer that specifies the current offset (that is, starting result location, amongst the Documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. If this field is negative, an `INVALID_ARGUMENT` is returned. - "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. + "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering for retail search, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "pageSize": 42, # Maximum number of Documents to return. The maximum allowed value depends on the data type. Values above the maximum value are coerced to the maximum value. * Websites with basic indexing: Default `10`, Maximum `25`. * Websites with advanced indexing: Default `25`, Maximum `50`. * Other: Default `50`, Maximum `100`. If this field is negative, an `INVALID_ARGUMENT` is returned. "pageToken": "A String", # A page token received from a previous SearchService.Search call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to SearchService.Search must match the call that provided the page token. Otherwise, an `INVALID_ARGUMENT` error is returned. "params": { # Additional search parameters. For public website search only, supported values are: * `user_country_code`: string. Default empty. If set to non-empty, results are restricted or boosted based on the location provided. Example: user_country_code: "au" For available codes see [Country Codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) * `search_type`: double. Default empty. Enables non-webpage searching depending on the value. The only valid non-default value is 1, which enables image searching. Example: search_type: 1 diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.siteSearchEngine.targetSites.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.siteSearchEngine.targetSites.html index 99924e385b0..1611420ac32 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.siteSearchEngine.targetSites.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.siteSearchEngine.targetSites.html @@ -123,6 +123,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -188,6 +189,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -285,6 +287,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -324,6 +327,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. @@ -370,6 +374,7 @@

Method Details

"indexingStatus": "A String", # Output only. Indexing status. "name": "A String", # Output only. The fully qualified resource name of the target site. `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/siteSearchEngine/targetSites/{target_site}` The `target_site_id` is system-generated. "providedUriPattern": "A String", # Required. Input only. The user provided URI pattern from which the `generated_uri_pattern` is generated. + "rootDomainUri": "A String", # Output only. Root domain of the provided_uri_pattern. "siteVerificationInfo": { # Verification information for target sites in advanced site search. # Output only. Site ownership and validity verification status. "siteVerificationState": "A String", # Site verification state indicating the ownership and validity. "verifyTime": "A String", # Latest site verification time. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.groundingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.groundingConfigs.html index 1508677ad11..644310dc38d 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.groundingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.groundingConfigs.html @@ -91,7 +91,7 @@

Method Details

The object takes the form of: { # Request message for GroundedGenerationService.CheckGrounding method. - "answerCandidate": "A String", # Answer candidate to check. + "answerCandidate": "A String", # Answer candidate to check. Can have a maximum length of 1024 characters. "facts": [ # List of facts for the grounding check. We support up to 200 facts. { # Grounding Fact. "attributes": { # Attributes associated with the fact. Common attributes include `source` (indicating where the fact was sourced from), `author` (indicating the author of the fact), and so on. @@ -103,6 +103,9 @@

Method Details

"groundingSpec": { # Specification for the grounding check. # Configuration of the grounding check. "citationThreshold": 3.14, # The threshold (in [0,1]) used for determining whether a fact must be cited for a claim in the answer candidate. Choosing a higher threshold will lead to fewer but very strong citations, while choosing a lower threshold may lead to more but somewhat weaker citations. If unset, the threshold will default to 0.6. }, + "userLabels": { # The user labels applied to a resource must meet the following requirements: * Each resource can have multiple labels, up to a maximum of 64. * Each label must be a key-value pair. * Keys have a minimum length of 1 character and a maximum length of 63 characters and cannot be empty. Values can be empty and have a maximum length of 63 characters. * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. All characters must use UTF-8 encoding, and international characters are allowed. * The key portion of a label must be unique. However, you can use the same key with multiple resources. * Keys must start with a lowercase letter or international character. See [Google Cloud Document](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more details. + "a_key": "A String", + }, } x__xgafv: string, V1 error format. @@ -117,6 +120,7 @@

Method Details

"citedChunks": [ # List of facts cited across all claims in the answer candidate. These are derived from the facts supplied in the request. { # Fact Chunk. "chunkText": "A String", # Text content of the fact chunk. Can be at most 10K characters long. + "index": 42, # The index of this chunk. Currently, only used for the streaming mode. "source": "A String", # Source from which this fact chunk was retrieved. If it was retrieved from the GroundingFacts provided in the request then this field will contain the index of the specific fact from which this chunk was retrieved. "sourceMetadata": { # More fine-grained information for the source reference. "a_key": "A String", @@ -130,6 +134,7 @@

Method Details

], "claimText": "A String", # Text for the claim in the answer candidate. Always provided regardless of whether citations or anti-citations are found. "endPos": 42, # Position indicating the end of the claim in the answer candidate, exclusive. + "groundingCheckRequired": True or False, # Indicates that this claim required grounding check. When the system decided this claim doesn't require attribution/grounding check, this field will be set to false. In that case, no grounding check was done for the claim and therefore citation_indices, and anti_citation_indices should not be returned. "startPos": 42, # Position indicating the start of the claim in the answer candidate, measured in bytes. }, ], diff --git a/docs/dyn/dlp_v2.infoTypes.html b/docs/dyn/dlp_v2.infoTypes.html index 3c996a0e3a2..c426ffab48b 100644 --- a/docs/dyn/dlp_v2.infoTypes.html +++ b/docs/dyn/dlp_v2.infoTypes.html @@ -107,7 +107,7 @@

Method Details

"infoTypes": [ # Set of sensitive infoTypes. { # InfoType description. "categories": [ # The category of the infoType. - { # Classification of infoTypes to organize them according to geographic location, industry, and data type. NEXT_ID: 47 + { # Classification of infoTypes to organize them according to geographic location, industry, and data type. NEXT_ID: 48 "industryCategory": "A String", # The group of relevant businesses where this infoType is commonly used "locationCategory": "A String", # The region or country that issued the ID or document represented by the infoType. "typeCategory": "A String", # The class of identifiers where this infoType belongs diff --git a/docs/dyn/dlp_v2.locations.infoTypes.html b/docs/dyn/dlp_v2.locations.infoTypes.html index 1ffe6bc3d8b..817571915af 100644 --- a/docs/dyn/dlp_v2.locations.infoTypes.html +++ b/docs/dyn/dlp_v2.locations.infoTypes.html @@ -107,7 +107,7 @@

Method Details

"infoTypes": [ # Set of sensitive infoTypes. { # InfoType description. "categories": [ # The category of the infoType. - { # Classification of infoTypes to organize them according to geographic location, industry, and data type. NEXT_ID: 47 + { # Classification of infoTypes to organize them according to geographic location, industry, and data type. NEXT_ID: 48 "industryCategory": "A String", # The group of relevant businesses where this infoType is commonly used "locationCategory": "A String", # The region or country that issued the ID or document represented by the infoType. "typeCategory": "A String", # The class of identifiers where this infoType belongs diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html index bed60cf8d4b..9754b56420e 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.dataset.html @@ -287,6 +287,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.html index 07e6b9fea13..65a6ba8f621 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.html @@ -581,6 +581,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, @@ -1519,6 +1522,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, @@ -2553,6 +2559,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html index 3f7f2dbe3e8..4692c55b026 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.humanReviewConfig.html @@ -124,6 +124,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, @@ -1102,6 +1105,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, diff --git a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html index 05e68050c07..199eea3c613 100644 --- a/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html +++ b/docs/dyn/documentai_v1beta3.projects.locations.processors.processorVersions.html @@ -722,6 +722,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, @@ -1660,6 +1663,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, @@ -2694,6 +2700,9 @@

Method Details

"pageEnd": 42, # Page where chunk ends in the document. "pageStart": 42, # Page where chunk starts in the document. }, + "sourceBlockIds": [ # DO NOT USE. List of all parsed documents layout source blocks used to generate the chunk. + "A String", + ], }, ], }, diff --git a/docs/dyn/doubleclickbidmanager_v2.queries.html b/docs/dyn/doubleclickbidmanager_v2.queries.html index 84e59a98eac..05ae787bf5e 100644 --- a/docs/dyn/doubleclickbidmanager_v2.queries.html +++ b/docs/dyn/doubleclickbidmanager_v2.queries.html @@ -151,48 +151,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, @@ -258,48 +216,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, @@ -385,48 +301,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, @@ -504,48 +378,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, @@ -655,48 +487,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, diff --git a/docs/dyn/doubleclickbidmanager_v2.queries.reports.html b/docs/dyn/doubleclickbidmanager_v2.queries.reports.html index 0db3487f66a..e996a6a1f0b 100644 --- a/docs/dyn/doubleclickbidmanager_v2.queries.reports.html +++ b/docs/dyn/doubleclickbidmanager_v2.queries.reports.html @@ -145,48 +145,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, @@ -251,48 +209,6 @@

Method Details

], "options": { # Additional query options. # Additional query options. "includeOnlyTargetedUserLists": True or False, # Set to true and filter your report by `FILTER_INSERTION_ORDER` or `FILTER_LINE_ITEM` to include data for audience lists specifically targeted by those items. - "pathQueryOptions": { # Path Query Options for Report Options. # Options that contain Path Filters and Custom Channel Groupings. This field is deprecated and will sunset on **May 1, 2024**. After sunset, requests using this field will return an error. - "channelGrouping": { # A channel grouping defines a set of rules that can be used to categorize events in a path report. # Custom Channel Groupings. - "fallbackName": "A String", # The name to apply to an event that does not match any of the rules in the channel grouping. - "name": "A String", # Channel Grouping name. - "rules": [ # Rules within Channel Grouping. There is a limit of 100 rules that can be set per channel grouping. - { # A Rule defines a name, and a boolean expression in [conjunctive normal form] (http://mathworld.wolfram.com/ConjunctiveNormalForm.html){.external} that can be applied to a path event to determine if that name should be applied. - "disjunctiveMatchStatements": [ # DisjunctiveMatchStatements within a Rule. DisjunctiveMatchStatement OR's all contained filters. - { # DisjunctiveMatchStatement that OR's all contained filters. - "eventFilters": [ # Filters. There is a limit of 100 filters that can be set per disjunctive match statement. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - }, - ], - "name": "A String", # Rule name. - }, - ], - }, - "pathFilters": [ # Path Filters. There is a limit of 100 path filters that can be set per report. - { # Path filters specify which paths to include in a report. A path is the result of combining DV360 events based on User ID to create a workflow of users' actions. When a path filter is set, the resulting report will only include paths that match the specified event at the specified position. All other paths will be excluded. - "eventFilters": [ # Filter on an event to be applied to some part of the path. - { # Defines the type of filter to be applied to the path, a DV360 event dimension filter. - "dimensionFilter": { # Dimension filter on path events. # Filter on a dimension. - "filter": "A String", # Dimension the filter is applied to. - "match": "A String", # Match logic of the filter. - "values": [ # Values to filter on. - "A String", - ], - }, - }, - ], - "pathMatchPosition": "A String", # The position of the path the filter should match to (first, last, or any event in path). - }, - ], - }, }, "type": "A String", # The type of the report. The type of the report will dictate what dimesions, filters, and metrics can be used. }, diff --git a/docs/dyn/firebaseappcheck_v1.projects.apps.debugTokens.html b/docs/dyn/firebaseappcheck_v1.projects.apps.debugTokens.html index c751d9033f8..9c3ffaa3e69 100644 --- a/docs/dyn/firebaseappcheck_v1.projects.apps.debugTokens.html +++ b/docs/dyn/firebaseappcheck_v1.projects.apps.debugTokens.html @@ -114,6 +114,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } x__xgafv: string, V1 error format. @@ -128,6 +129,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } @@ -167,6 +169,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } @@ -192,6 +195,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. }, ], "nextPageToken": "A String", # If the result list is too large to fit in a single response, then a token is returned. If the string is empty or omitted, then this response is the last page of results. This token can be used in a subsequent call to ListDebugTokens to find the next group of DebugTokens. Page tokens are short-lived and should not be persisted. @@ -225,6 +229,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } updateMask: string, Required. A comma-separated list of names of fields in the DebugToken to update. Example: `display_name`. @@ -240,6 +245,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.debugTokens.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.debugTokens.html index 42aca07b45d..984005db698 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.debugTokens.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.debugTokens.html @@ -114,6 +114,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } x__xgafv: string, V1 error format. @@ -128,6 +129,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } @@ -167,6 +169,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } @@ -192,6 +195,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. }, ], "nextPageToken": "A String", # If the result list is too large to fit in a single response, then a token is returned. If the string is empty or omitted, then this response is the last page of results. This token can be used in a subsequent call to ListDebugTokens to find the next group of DebugTokens. Page tokens are short-lived and should not be persisted. @@ -225,6 +229,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } updateMask: string, Required. A comma-separated list of names of fields in the DebugToken to update. Example: `display_name`. @@ -240,6 +245,7 @@

Method Details

"displayName": "A String", # Required. A human readable display name used to identify this debug token. "name": "A String", # Required. The relative resource name of the debug token, in the format: ``` projects/{project_number}/apps/{app_id}/debugTokens/{debug_token_id} ``` "token": "A String", # Required. Input only. Immutable. The secret token itself. Must be provided during creation, and must be a UUID4, case insensitive. This field is immutable once set, and cannot be provided during an UpdateDebugToken request. You can, however, delete this debug token using DeleteDebugToken to revoke it. For security reasons, this field will never be populated in any response. + "updateTime": "A String", # Output only. Timestamp when this debug token was most recently updated. } diff --git a/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html b/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html index 7c6b5321b21..4a1d35c7e52 100644 --- a/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html +++ b/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html @@ -206,6 +206,7 @@

Method Details

"maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. "presencePenalty": 3.14, # Optional. Positive penalties. "responseMimeType": "A String", # Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. + "responseStyle": "A String", # Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -507,6 +508,7 @@

Method Details

"maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. "presencePenalty": 3.14, # Optional. Positive penalties. "responseMimeType": "A String", # Optional. Output response mimetype of the generated candidate text. Supported mimetype: - `text/plain`: (default) Text output. - `application/json`: JSON response in the candidates. The model needs to be prompted to output the appropriate response type, otherwise the behavior is undefined. This is a preview feature. + "responseStyle": "A String", # Optional. Control Three levels of creativity in the model output. Default: RESPONSE_STYLE_BALANCED "stopSequences": [ # Optional. Stop sequences. "A String", ], diff --git a/docs/dyn/gkehub_v1.projects.locations.features.html b/docs/dyn/gkehub_v1.projects.locations.features.html index 2cde4961bd6..834b94ccf7a 100644 --- a/docs/dyn/gkehub_v1.projects.locations.features.html +++ b/docs/dyn/gkehub_v1.projects.locations.features.html @@ -248,6 +248,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -442,6 +445,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -755,6 +761,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -799,6 +808,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. @@ -1171,6 +1181,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -1365,6 +1378,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -1678,6 +1694,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -1722,6 +1741,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. @@ -2082,6 +2102,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -2276,6 +2299,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -2589,6 +2615,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -2633,6 +2662,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. @@ -2949,6 +2979,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -3143,6 +3176,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -3456,6 +3492,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -3500,6 +3539,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.features.html b/docs/dyn/gkehub_v1alpha.projects.locations.features.html index 658444b9740..99d11c5a4ee 100644 --- a/docs/dyn/gkehub_v1alpha.projects.locations.features.html +++ b/docs/dyn/gkehub_v1alpha.projects.locations.features.html @@ -251,6 +251,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -458,6 +461,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -789,6 +795,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -1336,6 +1345,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -1543,6 +1555,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -1874,6 +1889,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -2409,6 +2427,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -2616,6 +2637,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -2947,6 +2971,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -3438,6 +3465,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -3645,6 +3675,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -3976,6 +4009,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, diff --git a/docs/dyn/gkehub_v1beta.projects.locations.features.html b/docs/dyn/gkehub_v1beta.projects.locations.features.html index 18195c8c213..d724f1f72a9 100644 --- a/docs/dyn/gkehub_v1beta.projects.locations.features.html +++ b/docs/dyn/gkehub_v1beta.projects.locations.features.html @@ -251,6 +251,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -457,6 +460,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -779,6 +785,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -827,6 +836,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. @@ -1210,6 +1220,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -1416,6 +1429,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -1738,6 +1754,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -1786,6 +1805,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. @@ -2157,6 +2177,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -2363,6 +2386,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -2685,6 +2711,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -2733,6 +2762,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. @@ -3060,6 +3090,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -3266,6 +3299,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "mesh": { # **Service Mesh**: Spec for a single Membership for the servicemesh feature # Anthos Service Mesh-specific spec "controlPlane": "A String", # Deprecated: use `management` instead Enables automatic control plane management. @@ -3588,6 +3624,9 @@

Method Details

}, }, ], + "identityServiceOptions": { # Holds non-protocol-related configuration options. # Optional. non-protocol-related configuration options. + "sessionDuration": "A String", # Optional. Determines the lifespan of STS tokens issued by Anthos Identity Service. + }, }, "state": "A String", # Deployment state on this member }, @@ -3636,6 +3675,7 @@

Method Details

"details": "A String", # Human-readable explanation of code. }, ], + "implementation": "A String", # Output only. Implementation of managed control plane. "state": "A String", # LifecycleState of control plane management. }, "dataPlaneManagement": { # Status of data plane management. Only reported per-member. # Output only. Status of data plane management. diff --git a/docs/dyn/iap_v1.v1.html b/docs/dyn/iap_v1.v1.html index 75b79255f88..f1672166717 100644 --- a/docs/dyn/iap_v1.v1.html +++ b/docs/dyn/iap_v1.v1.html @@ -175,6 +175,9 @@

Method Details

"A String", ], }, + "identitySources": [ # Optional. Identity sources that IAP can use to authenticate the end user. Only one identity source can be configured. + "A String", + ], "oauthSettings": { # Configuration for OAuth login&consent flow behavior as well as for OAuth Credentials. # Settings to configure IAP's OAuth behavior. "loginHint": "A String", # Domain hint to send as hd=? parameter in OAuth request flow. Enables redirect to primary IDP by skipping Google's login screen. https://developers.google.com/identity/protocols/OpenIDConnect#hd-param Note: IAP does not verify that the id token's hd claim matches this value since access behavior is managed by IAM policies. "programmaticClients": [ # List of client ids allowed to use IAP programmatically. @@ -206,6 +209,16 @@

Method Details

"method": "A String", # Reauth method requested. "policyType": "A String", # How IAP determines the effective policy in cases of hierarchical policies. Policies are merged from higher in the hierarchy to lower in the hierarchy. }, + "workforceIdentitySettings": { # WorkforceIdentitySettings allows customers to configure workforce pools and OAuth 2.0 settings to gate their applications using a third-party IdP with access control. # Optional. Settings to configure the workforce identity federation, including workforce pools and OAuth 2.0 settings. + "oauth2": { # The OAuth 2.0 Settings # OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity federation services. + "clientId": "A String", # The OAuth 2.0 client ID registered in the workforce identity federation OAuth 2.0 Server. + "clientSecret": "A String", # Input only. The OAuth 2.0 client secret created while registering the client ID. + "clientSecretSha256": "A String", # Output only. SHA256 hash value for the client secret. This field is returned by IAP when the settings are retrieved. + }, + "workforcePools": [ # The workforce pool resources. Only one workforce pool is accepted. + "A String", + ], + }, }, "applicationSettings": { # Wrapper over application specific settings for IAP. # Top level wrapper for all application related settings in IAP "accessDeniedPageSettings": { # Custom content configuration for access denied page. IAP allows customers to define a custom URI to use as the error page when access is denied to users. If IAP prevents access to this page, the default IAP error page will be displayed instead. # Customization for Access Denied page. @@ -343,6 +356,9 @@

Method Details

"A String", ], }, + "identitySources": [ # Optional. Identity sources that IAP can use to authenticate the end user. Only one identity source can be configured. + "A String", + ], "oauthSettings": { # Configuration for OAuth login&consent flow behavior as well as for OAuth Credentials. # Settings to configure IAP's OAuth behavior. "loginHint": "A String", # Domain hint to send as hd=? parameter in OAuth request flow. Enables redirect to primary IDP by skipping Google's login screen. https://developers.google.com/identity/protocols/OpenIDConnect#hd-param Note: IAP does not verify that the id token's hd claim matches this value since access behavior is managed by IAM policies. "programmaticClients": [ # List of client ids allowed to use IAP programmatically. @@ -374,6 +390,16 @@

Method Details

"method": "A String", # Reauth method requested. "policyType": "A String", # How IAP determines the effective policy in cases of hierarchical policies. Policies are merged from higher in the hierarchy to lower in the hierarchy. }, + "workforceIdentitySettings": { # WorkforceIdentitySettings allows customers to configure workforce pools and OAuth 2.0 settings to gate their applications using a third-party IdP with access control. # Optional. Settings to configure the workforce identity federation, including workforce pools and OAuth 2.0 settings. + "oauth2": { # The OAuth 2.0 Settings # OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity federation services. + "clientId": "A String", # The OAuth 2.0 client ID registered in the workforce identity federation OAuth 2.0 Server. + "clientSecret": "A String", # Input only. The OAuth 2.0 client secret created while registering the client ID. + "clientSecretSha256": "A String", # Output only. SHA256 hash value for the client secret. This field is returned by IAP when the settings are retrieved. + }, + "workforcePools": [ # The workforce pool resources. Only one workforce pool is accepted. + "A String", + ], + }, }, "applicationSettings": { # Wrapper over application specific settings for IAP. # Top level wrapper for all application related settings in IAP "accessDeniedPageSettings": { # Custom content configuration for access denied page. IAP allows customers to define a custom URI to use as the error page when access is denied to users. If IAP prevents access to this page, the default IAP error page will be displayed instead. # Customization for Access Denied page. @@ -422,6 +448,9 @@

Method Details

"A String", ], }, + "identitySources": [ # Optional. Identity sources that IAP can use to authenticate the end user. Only one identity source can be configured. + "A String", + ], "oauthSettings": { # Configuration for OAuth login&consent flow behavior as well as for OAuth Credentials. # Settings to configure IAP's OAuth behavior. "loginHint": "A String", # Domain hint to send as hd=? parameter in OAuth request flow. Enables redirect to primary IDP by skipping Google's login screen. https://developers.google.com/identity/protocols/OpenIDConnect#hd-param Note: IAP does not verify that the id token's hd claim matches this value since access behavior is managed by IAM policies. "programmaticClients": [ # List of client ids allowed to use IAP programmatically. @@ -453,6 +482,16 @@

Method Details

"method": "A String", # Reauth method requested. "policyType": "A String", # How IAP determines the effective policy in cases of hierarchical policies. Policies are merged from higher in the hierarchy to lower in the hierarchy. }, + "workforceIdentitySettings": { # WorkforceIdentitySettings allows customers to configure workforce pools and OAuth 2.0 settings to gate their applications using a third-party IdP with access control. # Optional. Settings to configure the workforce identity federation, including workforce pools and OAuth 2.0 settings. + "oauth2": { # The OAuth 2.0 Settings # OAuth 2.0 settings for IAP to perform OIDC flow with workforce identity federation services. + "clientId": "A String", # The OAuth 2.0 client ID registered in the workforce identity federation OAuth 2.0 Server. + "clientSecret": "A String", # Input only. The OAuth 2.0 client secret created while registering the client ID. + "clientSecretSha256": "A String", # Output only. SHA256 hash value for the client secret. This field is returned by IAP when the settings are retrieved. + }, + "workforcePools": [ # The workforce pool resources. Only one workforce pool is accepted. + "A String", + ], + }, }, "applicationSettings": { # Wrapper over application specific settings for IAP. # Top level wrapper for all application related settings in IAP "accessDeniedPageSettings": { # Custom content configuration for access denied page. IAP allows customers to define a custom URI to use as the error page when access is denied to users. If IAP prevents access to this page, the default IAP error page will be displayed instead. # Customization for Access Denied page. diff --git a/docs/dyn/index.md b/docs/dyn/index.md index 540bc635469..9a3b8634398 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -559,7 +559,6 @@ ## eventarc * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/eventarc_v1.html) -* [v1beta1](http://googleapis.github.io/google-api-python-client/docs/dyn/eventarc_v1beta1.html) ## factchecktools diff --git a/docs/dyn/logging_v2.entries.html b/docs/dyn/logging_v2.entries.html index ca63915a9da..bbcfc439ddf 100644 --- a/docs/dyn/logging_v2.entries.html +++ b/docs/dyn/logging_v2.entries.html @@ -150,8 +150,8 @@

Method Details

The object takes the form of: { # The parameters to ListLogEntries. - "filter": "A String", # Optional. A filter that chooses which log entries to return. For more information, see Logging query language (https://cloud.google.com/logging/docs/view/logging-query-language).Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of a filter is 20,000 characters. - "orderBy": "A String", # Optional. How the results should be sorted. Presently, the only permitted values are "timestamp asc" (default) and "timestamp desc". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of their insert_id values. + "filter": "A String", # Optional. A filter that chooses which log entries to return. For more information, see Logging query language (https://cloud.google.com/logging/docs/view/logging-query-language).Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of a filter is 20,000 characters.To make queries faster, you can make the filter more selective by using restrictions on indexed fields (https://cloud.google.com/logging/docs/view/logging-query-language#indexed-fields) as well as limit the time range of the query by adding range restrictions on the timestamp field. + "orderBy": "A String", # Optional. How the results should be sorted. Presently, the only permitted values are "timestamp asc" (default) and "timestamp desc". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of their insert_id values.We recommend setting the order_by field to "timestamp desc" when listing recently ingested log entries. If not set, the default value of "timestamp asc" may take a long time to fetch matching logs that are only recently ingested. "pageSize": 42, # Optional. The maximum number of results to return from this request. Default is 50. If the value is negative, the request is rejected.The presence of next_page_token in the response indicates that more results might be available. "pageToken": "A String", # Optional. If present, then retrieve the next batch of results from the preceding call to this method. page_token must be the value of next_page_token from the previous response. The values of other method parameters should be identical to those in the previous call. "projectIds": [ # Optional. Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: "my-project-1A". diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html index 72079d08c5d..b5a700187ca 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html @@ -228,15 +228,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -289,13 +322,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -407,7 +460,7 @@

Method Details

}, "name": "A String", # Output only. The full name of the asset. "performanceData": { # Performance data for an asset. # Output only. Performance data for the asset. - "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 420 days. Aggregations are sorted from oldest to most recent. + "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 40 days. Aggregations are sorted from oldest to most recent. { # Usage data aggregation for a single day. "cpu": { # Statistical aggregation of CPU usage. # CPU usage. "utilizationPercentage": { # Statistical aggregation of samples for a single resource usage. # CPU utilization percentage. @@ -751,15 +804,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -812,13 +898,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -930,7 +1036,7 @@

Method Details

}, "name": "A String", # Output only. The full name of the asset. "performanceData": { # Performance data for an asset. # Output only. Performance data for the asset. - "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 420 days. Aggregations are sorted from oldest to most recent. + "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 40 days. Aggregations are sorted from oldest to most recent. { # Usage data aggregation for a single day. "cpu": { # Statistical aggregation of CPU usage. # CPU usage. "utilizationPercentage": { # Statistical aggregation of samples for a single resource usage. # CPU utilization percentage. @@ -1306,15 +1412,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -1367,13 +1506,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -1485,7 +1644,7 @@

Method Details

}, "name": "A String", # Output only. The full name of the asset. "performanceData": { # Performance data for an asset. # Output only. Performance data for the asset. - "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 420 days. Aggregations are sorted from oldest to most recent. + "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 40 days. Aggregations are sorted from oldest to most recent. { # Usage data aggregation for a single day. "cpu": { # Statistical aggregation of CPU usage. # CPU usage. "utilizationPercentage": { # Statistical aggregation of samples for a single resource usage. # CPU utilization percentage. @@ -1842,15 +2001,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -1903,13 +2095,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -2021,7 +2233,7 @@

Method Details

}, "name": "A String", # Output only. The full name of the asset. "performanceData": { # Performance data for an asset. # Output only. Performance data for the asset. - "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 420 days. Aggregations are sorted from oldest to most recent. + "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 40 days. Aggregations are sorted from oldest to most recent. { # Usage data aggregation for a single day. "cpu": { # Statistical aggregation of CPU usage. # CPU usage. "utilizationPercentage": { # Statistical aggregation of samples for a single resource usage. # CPU utilization percentage. @@ -2380,15 +2592,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -2441,13 +2686,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -2559,7 +2824,7 @@

Method Details

}, "name": "A String", # Output only. The full name of the asset. "performanceData": { # Performance data for an asset. # Output only. Performance data for the asset. - "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 420 days. Aggregations are sorted from oldest to most recent. + "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 40 days. Aggregations are sorted from oldest to most recent. { # Usage data aggregation for a single day. "cpu": { # Statistical aggregation of CPU usage. # CPU usage. "utilizationPercentage": { # Statistical aggregation of samples for a single resource usage. # CPU utilization percentage. @@ -2898,15 +3163,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -2959,13 +3257,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -3077,7 +3395,7 @@

Method Details

}, "name": "A String", # Output only. The full name of the asset. "performanceData": { # Performance data for an asset. # Output only. Performance data for the asset. - "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 420 days. Aggregations are sorted from oldest to most recent. + "dailyResourceUsageAggregations": [ # Daily resource usage aggregations. Contains all of the data available for an asset, up to the last 40 days. Aggregations are sorted from oldest to most recent. { # Usage data aggregation for a single day. "cpu": { # Statistical aggregation of CPU usage. # CPU usage. "utilizationPercentage": { # Statistical aggregation of samples for a single resource usage. # CPU utilization percentage. @@ -3415,15 +3733,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -3476,13 +3827,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html index 36c23696646..6c22ddb926f 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html @@ -127,15 +127,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -188,13 +221,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], @@ -519,15 +572,48 @@

Method Details

"generatedId": "A String", # The database deployment generated ID. "manualUniqueId": "A String", # A manual unique ID set by the user. "mysql": { # Specific details for a Mysql database deployment. # Details of a MYSQL database deployment. - "plugins": [ # Optional. List of Mysql plugins. - { # Mysql plugin. + "plugins": [ # Optional. List of MySql plugins. + { # MySql plugin. "enabled": True or False, # Required. The plugin is active. "plugin": "A String", # Required. The plugin name. "version": "A String", # Required. The plugin version. }, ], + "properties": [ # Optional. List of MySql properties. + { # MySql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "resourceGroupsCount": 42, # Optional. Number of resource groups. + "variables": [ # Optional. List of MySql variables. + { # MySql variable. + "category": "A String", # Required. The variable category. + "value": "A String", # Required. The variable value. + "variable": "A String", # Required. The variable name. + }, + ], }, "postgresql": { # Specific details for a PostgreSQL database deployment. # Details of a PostgreSQL database deployment. + "properties": [ # Optional. List of PostgreSql properties. + { # PostgreSql property. + "enabled": True or False, # Required. The property is enabled. + "numericValue": "A String", # Required. The property numeric value. + "property": "A String", # Required. The property name. + }, + ], + "settings": [ # Optional. List of PostgreSql settings. + { # PostgreSql setting. + "boolValue": True or False, # Required. The setting boolean value. + "intValue": "A String", # Required. The setting int value. + "realValue": 3.14, # Required. The setting real value. + "setting": "A String", # Required. The setting name. + "source": "A String", # Required. The setting source. + "stringValue": "A String", # Required. The setting string value. Notice that enum values are stored as strings. + "unit": "A String", # Optional. The setting unit. + }, + ], }, "sqlServer": { # Specific details for a Microsoft SQL Server database deployment. # Details of a Microsoft SQL Server database deployment. "features": [ # Optional. List of SQL Server features. @@ -580,13 +666,33 @@

Method Details

}, "schemas": [ # The database schemas. { # Details of a database schema. + "mysql": { # Specific details for a Mysql database. # Details of a Mysql schema. + "storageEngines": [ # Optional. Mysql storage engine tables. + { # Mysql storage engine tables. + "encryptedTableCount": 42, # Optional. The number of encrypted tables. + "engine": "A String", # Required. The storage engine. + "tableCount": 42, # Optional. The number of tables. + }, + ], + }, "objects": [ # List of details of objects by category. { # Details of a group of database objects. "category": "A String", # The category of the objects. "count": "A String", # The number of objects. }, ], + "postgresql": { # Specific details for a PostgreSql schema. # Details of a PostgreSql schema. + "foreignTablesCount": 42, # Optional. PostgreSql foreign tables. + "postgresqlExtensions": [ # Optional. PostgreSql extensions. + { # PostgreSql extension. + "extension": "A String", # Required. The extension name. + "version": "A String", # Required. The extension version. + }, + ], + }, "schemaName": "A String", # The name of the schema. + "sqlServer": { # Specific details for a SqlServer database. # Details of a SqlServer schema. + }, "tablesSizeBytes": "A String", # The total size of tables in bytes. }, ], diff --git a/docs/dyn/monitoring_v1.projects.dashboards.html b/docs/dyn/monitoring_v1.projects.dashboards.html index 9fd4de1ab79..fc4f252a404 100644 --- a/docs/dyn/monitoring_v1.projects.dashboards.html +++ b/docs/dyn/monitoring_v1.projects.dashboards.html @@ -682,6 +682,7 @@

Method Details

}, "dashboardFilters": [ # Filters to reduce the amount of data charted based on the filter criteria. { # A filter to reduce the amount of data charted in relevant widgets. + "applyToNewWidgets": True or False, # Whether to apply this filter to new widgets by default "filterType": "A String", # The specified filter type "labelKey": "A String", # Required. The key for the label "stringValue": "A String", # A variable-length string value. @@ -2982,6 +2983,7 @@

Method Details

}, "dashboardFilters": [ # Filters to reduce the amount of data charted based on the filter criteria. { # A filter to reduce the amount of data charted in relevant widgets. + "applyToNewWidgets": True or False, # Whether to apply this filter to new widgets by default "filterType": "A String", # The specified filter type "labelKey": "A String", # Required. The key for the label "stringValue": "A String", # A variable-length string value. @@ -5306,6 +5308,7 @@

Method Details

}, "dashboardFilters": [ # Filters to reduce the amount of data charted based on the filter criteria. { # A filter to reduce the amount of data charted in relevant widgets. + "applyToNewWidgets": True or False, # Whether to apply this filter to new widgets by default "filterType": "A String", # The specified filter type "labelKey": "A String", # Required. The key for the label "stringValue": "A String", # A variable-length string value. @@ -7616,6 +7619,7 @@

Method Details

}, "dashboardFilters": [ # Filters to reduce the amount of data charted based on the filter criteria. { # A filter to reduce the amount of data charted in relevant widgets. + "applyToNewWidgets": True or False, # Whether to apply this filter to new widgets by default "filterType": "A String", # The specified filter type "labelKey": "A String", # Required. The key for the label "stringValue": "A String", # A variable-length string value. @@ -9934,6 +9938,7 @@

Method Details

}, "dashboardFilters": [ # Filters to reduce the amount of data charted based on the filter criteria. { # A filter to reduce the amount of data charted in relevant widgets. + "applyToNewWidgets": True or False, # Whether to apply this filter to new widgets by default "filterType": "A String", # The specified filter type "labelKey": "A String", # Required. The key for the label "stringValue": "A String", # A variable-length string value. @@ -12234,6 +12239,7 @@

Method Details

}, "dashboardFilters": [ # Filters to reduce the amount of data charted based on the filter criteria. { # A filter to reduce the amount of data charted in relevant widgets. + "applyToNewWidgets": True or False, # Whether to apply this filter to new widgets by default "filterType": "A String", # The specified filter type "labelKey": "A String", # Required. The key for the label "stringValue": "A String", # A variable-length string value. diff --git a/docs/dyn/monitoring_v3.projects.uptimeCheckConfigs.html b/docs/dyn/monitoring_v3.projects.uptimeCheckConfigs.html index 22f68431cef..c6a988e8bf7 100644 --- a/docs/dyn/monitoring_v3.projects.uptimeCheckConfigs.html +++ b/docs/dyn/monitoring_v3.projects.uptimeCheckConfigs.html @@ -147,7 +147,7 @@

Method Details

}, "port": 42, # Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL. "requestMethod": "A String", # The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then request_method defaults to GET. - "serviceAgentAuthentication": { # Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. + "serviceAgentAuthentication": { # Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. "type": "A String", # Type of authentication. }, "useSsl": True or False, # If true, use HTTPS instead of HTTP to run the check. @@ -247,7 +247,7 @@

Method Details

}, "port": 42, # Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL. "requestMethod": "A String", # The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then request_method defaults to GET. - "serviceAgentAuthentication": { # Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. + "serviceAgentAuthentication": { # Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. "type": "A String", # Type of authentication. }, "useSsl": True or False, # If true, use HTTPS instead of HTTP to run the check. @@ -372,7 +372,7 @@

Method Details

}, "port": 42, # Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL. "requestMethod": "A String", # The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then request_method defaults to GET. - "serviceAgentAuthentication": { # Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. + "serviceAgentAuthentication": { # Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. "type": "A String", # Type of authentication. }, "useSsl": True or False, # If true, use HTTPS instead of HTTP to run the check. @@ -486,7 +486,7 @@

Method Details

}, "port": 42, # Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL. "requestMethod": "A String", # The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then request_method defaults to GET. - "serviceAgentAuthentication": { # Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. + "serviceAgentAuthentication": { # Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. "type": "A String", # Type of authentication. }, "useSsl": True or False, # If true, use HTTPS instead of HTTP to run the check. @@ -604,7 +604,7 @@

Method Details

}, "port": 42, # Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL. "requestMethod": "A String", # The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then request_method defaults to GET. - "serviceAgentAuthentication": { # Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. + "serviceAgentAuthentication": { # Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. "type": "A String", # Type of authentication. }, "useSsl": True or False, # If true, use HTTPS instead of HTTP to run the check. @@ -705,7 +705,7 @@

Method Details

}, "port": 42, # Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL. "requestMethod": "A String", # The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then request_method defaults to GET. - "serviceAgentAuthentication": { # Contains information needed for generating an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect). The OIDC token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. + "serviceAgentAuthentication": { # Contains information needed for generating either an OpenID Connect token (https://developers.google.com/identity/protocols/OpenIDConnect) or OAuth token (https://developers.google.com/identity/protocols/oauth2). The token will be generated for the Monitoring service agent service account. # If specified, Uptime will generate and attach an OIDC JWT token for the Monitoring service agent service account as an Authorization header in the HTTP request when probing. "type": "A String", # Type of authentication. }, "useSsl": True or False, # If true, use HTTPS instead of HTTP to run the check. diff --git a/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html b/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html index 16e2e3f8ff1..7a4e3309c90 100644 --- a/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html +++ b/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html @@ -492,6 +492,11 @@

Method Details

"reason": "A String", # Explains why a file is non compliant for a CIS check. }, ], + "version": { # Describes the CIS benchmark version that is applicable to a given OS and os version. # The OS and config version the benchmark was run on. + "benchmarkDocument": "A String", # The name of the document that defines this benchmark, e.g. "CIS Container-Optimized OS". + "cpeUri": "A String", # The CPE URI (https://cpe.mitre.org/specification/) this benchmark is applicable to. + "version": "A String", # The version of the benchmark. This is set to the version of the OS-specific CIS document the benchmark is defined in. + }, }, "createTime": "A String", # Output only. The time this occurrence was created. "deployment": { # The period during which some deployable was active in a runtime. # Describes the deployment of an artifact on a runtime. diff --git a/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html b/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html index 8434c1466f4..0b8e033cdeb 100644 --- a/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html +++ b/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html @@ -492,6 +492,11 @@

Method Details

"reason": "A String", # Explains why a file is non compliant for a CIS check. }, ], + "version": { # Describes the CIS benchmark version that is applicable to a given OS and os version. # The OS and config version the benchmark was run on. + "benchmarkDocument": "A String", # The name of the document that defines this benchmark, e.g. "CIS Container-Optimized OS". + "cpeUri": "A String", # The CPE URI (https://cpe.mitre.org/specification/) this benchmark is applicable to. + "version": "A String", # The version of the benchmark. This is set to the version of the OS-specific CIS document the benchmark is defined in. + }, }, "createTime": "A String", # Output only. The time this occurrence was created. "deployment": { # The period during which some deployable was active in a runtime. # Describes the deployment of an artifact on a runtime. diff --git a/docs/dyn/privateca_v1.projects.locations.caPools.certificateAuthorities.html b/docs/dyn/privateca_v1.projects.locations.caPools.certificateAuthorities.html index 926cbd5bb1f..e3137b717ca 100644 --- a/docs/dyn/privateca_v1.projects.locations.caPools.certificateAuthorities.html +++ b/docs/dyn/privateca_v1.projects.locations.caPools.certificateAuthorities.html @@ -478,6 +478,8 @@

Method Details

"pemCaCertificates": [ # Output only. This CertificateAuthority's certificate chain, including the current CertificateAuthority's certificate. Ordered such that the root issuer is the final element (consistent with RFC 5246). For a self-signed CA, this will only list the current CertificateAuthority's certificate. "A String", ], + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The State for this CertificateAuthority. "subordinateConfig": { # Describes a subordinate CA's issuers. This is either a resource name to a known issuing CertificateAuthority, or a PEM issuer certificate chain. # Optional. If this is a subordinate CertificateAuthority, this field will be set with the subordinate configuration, which describes its issuers. This may be updated, but this CertificateAuthority must continue to validate. "certificateAuthority": "A String", # Required. This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format `projects/*/locations/*/caPools/*/certificateAuthorities/*`. @@ -977,6 +979,8 @@

Method Details

"pemCaCertificates": [ # Output only. This CertificateAuthority's certificate chain, including the current CertificateAuthority's certificate. Ordered such that the root issuer is the final element (consistent with RFC 5246). For a self-signed CA, this will only list the current CertificateAuthority's certificate. "A String", ], + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The State for this CertificateAuthority. "subordinateConfig": { # Describes a subordinate CA's issuers. This is either a resource name to a known issuing CertificateAuthority, or a PEM issuer certificate chain. # Optional. If this is a subordinate CertificateAuthority, this field will be set with the subordinate configuration, which describes its issuers. This may be updated, but this CertificateAuthority must continue to validate. "certificateAuthority": "A String", # Required. This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format `projects/*/locations/*/caPools/*/certificateAuthorities/*`. @@ -1309,6 +1313,8 @@

Method Details

"pemCaCertificates": [ # Output only. This CertificateAuthority's certificate chain, including the current CertificateAuthority's certificate. Ordered such that the root issuer is the final element (consistent with RFC 5246). For a self-signed CA, this will only list the current CertificateAuthority's certificate. "A String", ], + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The State for this CertificateAuthority. "subordinateConfig": { # Describes a subordinate CA's issuers. This is either a resource name to a known issuing CertificateAuthority, or a PEM issuer certificate chain. # Optional. If this is a subordinate CertificateAuthority, this field will be set with the subordinate configuration, which describes its issuers. This may be updated, but this CertificateAuthority must continue to validate. "certificateAuthority": "A String", # Required. This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format `projects/*/locations/*/caPools/*/certificateAuthorities/*`. @@ -1650,6 +1656,8 @@

Method Details

"pemCaCertificates": [ # Output only. This CertificateAuthority's certificate chain, including the current CertificateAuthority's certificate. Ordered such that the root issuer is the final element (consistent with RFC 5246). For a self-signed CA, this will only list the current CertificateAuthority's certificate. "A String", ], + "satisfiesPzi": True or False, # Output only. Reserved for future use. + "satisfiesPzs": True or False, # Output only. Reserved for future use. "state": "A String", # Output only. The State for this CertificateAuthority. "subordinateConfig": { # Describes a subordinate CA's issuers. This is either a resource name to a known issuing CertificateAuthority, or a PEM issuer certificate chain. # Optional. If this is a subordinate CertificateAuthority, this field will be set with the subordinate configuration, which describes its issuers. This may be updated, but this CertificateAuthority must continue to validate. "certificateAuthority": "A String", # Required. This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format `projects/*/locations/*/caPools/*/certificateAuthorities/*`. diff --git a/docs/dyn/recommender_v1.billingAccounts.locations.insightTypes.html b/docs/dyn/recommender_v1.billingAccounts.locations.insightTypes.html index 975cb530acf..6b1cd86bccc 100644 --- a/docs/dyn/recommender_v1.billingAccounts.locations.insightTypes.html +++ b/docs/dyn/recommender_v1.billingAccounts.locations.insightTypes.html @@ -119,7 +119,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -130,7 +130,7 @@

Method Details

Updates an InsightTypeConfig change. This will create a new revision of the config.
 
 Args:
-  name: string, Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
+  name: string, Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -145,7 +145,7 @@ 

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -171,7 +171,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
diff --git a/docs/dyn/recommender_v1.billingAccounts.locations.recommenders.html b/docs/dyn/recommender_v1.billingAccounts.locations.recommenders.html index c7bbe5d3c45..6c989c13af9 100644 --- a/docs/dyn/recommender_v1.billingAccounts.locations.recommenders.html +++ b/docs/dyn/recommender_v1.billingAccounts.locations.recommenders.html @@ -114,7 +114,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -130,7 +130,7 @@

Method Details

Updates a Recommender Config. This will create a new revision of the config.
 
 Args:
-  name: string, Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
+  name: string, Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -140,7 +140,7 @@ 

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -166,7 +166,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. diff --git a/docs/dyn/recommender_v1.organizations.locations.insightTypes.html b/docs/dyn/recommender_v1.organizations.locations.insightTypes.html index 61ae02d0e07..c4bf601e717 100644 --- a/docs/dyn/recommender_v1.organizations.locations.insightTypes.html +++ b/docs/dyn/recommender_v1.organizations.locations.insightTypes.html @@ -119,7 +119,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
@@ -130,7 +130,7 @@

Method Details

Updates an InsightTypeConfig change. This will create a new revision of the config.
 
 Args:
-  name: string, Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
+  name: string, Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -145,7 +145,7 @@ 

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -171,7 +171,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
diff --git a/docs/dyn/recommender_v1.organizations.locations.recommenders.html b/docs/dyn/recommender_v1.organizations.locations.recommenders.html index d68cf15c91b..80b34eed06f 100644 --- a/docs/dyn/recommender_v1.organizations.locations.recommenders.html +++ b/docs/dyn/recommender_v1.organizations.locations.recommenders.html @@ -114,7 +114,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -130,7 +130,7 @@

Method Details

Updates a Recommender Config. This will create a new revision of the config.
 
 Args:
-  name: string, Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
+  name: string, Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -140,7 +140,7 @@ 

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -166,7 +166,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. diff --git a/docs/dyn/recommender_v1.projects.locations.insightTypes.html b/docs/dyn/recommender_v1.projects.locations.insightTypes.html index 340c7f844b5..be7341bd604 100644 --- a/docs/dyn/recommender_v1.projects.locations.insightTypes.html +++ b/docs/dyn/recommender_v1.projects.locations.insightTypes.html @@ -119,7 +119,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
@@ -130,7 +130,7 @@

Method Details

Updates an InsightTypeConfig change. This will create a new revision of the config.
 
 Args:
-  name: string, Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
+  name: string, Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -145,7 +145,7 @@ 

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -171,7 +171,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
diff --git a/docs/dyn/recommender_v1.projects.locations.recommenders.html b/docs/dyn/recommender_v1.projects.locations.recommenders.html index 954122e3f99..b87ff9b18dc 100644 --- a/docs/dyn/recommender_v1.projects.locations.recommenders.html +++ b/docs/dyn/recommender_v1.projects.locations.recommenders.html @@ -114,7 +114,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -130,7 +130,7 @@

Method Details

Updates a Recommender Config. This will create a new revision of the config.
 
 Args:
-  name: string, Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
+  name: string, Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -140,7 +140,7 @@ 

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -166,7 +166,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. diff --git a/docs/dyn/recommender_v1beta1.billingAccounts.locations.insightTypes.html b/docs/dyn/recommender_v1beta1.billingAccounts.locations.insightTypes.html index fffb51cb9d3..edc0624c485 100644 --- a/docs/dyn/recommender_v1beta1.billingAccounts.locations.insightTypes.html +++ b/docs/dyn/recommender_v1beta1.billingAccounts.locations.insightTypes.html @@ -119,7 +119,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
@@ -130,7 +130,7 @@

Method Details

Updates an InsightTypeConfig change. This will create a new revision of the config.
 
 Args:
-  name: string, Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
+  name: string, Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -145,7 +145,7 @@ 

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -171,7 +171,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
diff --git a/docs/dyn/recommender_v1beta1.billingAccounts.locations.recommenders.html b/docs/dyn/recommender_v1beta1.billingAccounts.locations.recommenders.html index 55cf61c9f6a..1323595cf33 100644 --- a/docs/dyn/recommender_v1beta1.billingAccounts.locations.recommenders.html +++ b/docs/dyn/recommender_v1beta1.billingAccounts.locations.recommenders.html @@ -114,7 +114,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -130,7 +130,7 @@

Method Details

Updates a Recommender Config. This will create a new revision of the config.
 
 Args:
-  name: string, Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
+  name: string, Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -140,7 +140,7 @@ 

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -166,7 +166,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. diff --git a/docs/dyn/recommender_v1beta1.organizations.locations.insightTypes.html b/docs/dyn/recommender_v1beta1.organizations.locations.insightTypes.html index 74e8bde7602..29a5c5c8c2d 100644 --- a/docs/dyn/recommender_v1beta1.organizations.locations.insightTypes.html +++ b/docs/dyn/recommender_v1beta1.organizations.locations.insightTypes.html @@ -119,7 +119,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
@@ -130,7 +130,7 @@

Method Details

Updates an InsightTypeConfig change. This will create a new revision of the config.
 
 Args:
-  name: string, Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
+  name: string, Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -145,7 +145,7 @@ 

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -171,7 +171,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
diff --git a/docs/dyn/recommender_v1beta1.organizations.locations.recommenders.html b/docs/dyn/recommender_v1beta1.organizations.locations.recommenders.html index 74406304e95..c627fd39f77 100644 --- a/docs/dyn/recommender_v1beta1.organizations.locations.recommenders.html +++ b/docs/dyn/recommender_v1beta1.organizations.locations.recommenders.html @@ -114,7 +114,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -130,7 +130,7 @@

Method Details

Updates a Recommender Config. This will create a new revision of the config.
 
 Args:
-  name: string, Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
+  name: string, Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -140,7 +140,7 @@ 

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -166,7 +166,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. diff --git a/docs/dyn/recommender_v1beta1.projects.locations.insightTypes.html b/docs/dyn/recommender_v1beta1.projects.locations.insightTypes.html index 223b326ccad..a995110739c 100644 --- a/docs/dyn/recommender_v1beta1.projects.locations.insightTypes.html +++ b/docs/dyn/recommender_v1beta1.projects.locations.insightTypes.html @@ -119,7 +119,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
@@ -130,7 +130,7 @@

Method Details

Updates an InsightTypeConfig change. This will create a new revision of the config.
 
 Args:
-  name: string, Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
+  name: string, Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -145,7 +145,7 @@ 

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. } @@ -171,7 +171,7 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "name": "A String", # Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config + "name": "A String", # Identifier. Name of insight type config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/insightTypes/[INSIGHT_TYPE_ID]/config "revisionId": "A String", # Output only. Immutable. The revision ID of the config. A new revision is committed whenever the config is changed in any way. The format is an 8-character hexadecimal string. "updateTime": "A String", # Last time when the config was updated. }
diff --git a/docs/dyn/recommender_v1beta1.projects.locations.recommenders.html b/docs/dyn/recommender_v1beta1.projects.locations.recommenders.html index df1cbc92093..0dbbfd62552 100644 --- a/docs/dyn/recommender_v1beta1.projects.locations.recommenders.html +++ b/docs/dyn/recommender_v1beta1.projects.locations.recommenders.html @@ -114,7 +114,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -130,7 +130,7 @@

Method Details

Updates a Recommender Config. This will create a new revision of the config.
 
 Args:
-  name: string, Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
+  name: string, Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -140,7 +140,7 @@ 

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. @@ -166,7 +166,7 @@

Method Details

}, "displayName": "A String", # A user-settable field to provide a human-readable name to be used in user interfaces. "etag": "A String", # Fingerprint of the RecommenderConfig. Provides optimistic locking when updating. - "name": "A String", # Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config + "name": "A String", # Identifier. Name of recommender config. Eg, projects/[PROJECT_NUMBER]/locations/[LOCATION]/recommenders/[RECOMMENDER_ID]/config "recommenderGenerationConfig": { # A Configuration to customize the generation of recommendations. Eg, customizing the lookback period considered when generating a recommendation. # RecommenderGenerationConfig which configures the Generation of recommendations for this recommender. "params": { # Parameters for this RecommenderGenerationConfig. These configs can be used by or are applied to all subtypes. "a_key": "", # Properties of the object. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.html b/docs/dyn/retail_v2.projects.locations.catalogs.html index 2c389b998bc..1bd75c83db9 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.html @@ -123,7 +123,7 @@

Instance Methods

close()

Close httplib2 connections.

- completeQuery(catalog, dataset=None, deviceType=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None)

+ completeQuery(catalog, dataset=None, deviceType=None, enableAttributeSuggestions=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None)

Completes the specified prefix with keyword suggestions. This feature is only available for users who have Retail Search enabled. Enable Retail Search on Cloud Console before using this feature.

exportAnalyticsMetrics(catalog, body=None, x__xgafv=None)

@@ -162,13 +162,14 @@

Method Details

- completeQuery(catalog, dataset=None, deviceType=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None) + completeQuery(catalog, dataset=None, deviceType=None, enableAttributeSuggestions=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None)
Completes the specified prefix with keyword suggestions. This feature is only available for users who have Retail Search enabled. Enable Retail Search on Cloud Console before using this feature.
 
 Args:
   catalog: string, Required. Catalog for which the completion is performed. Full resource name of catalog, such as `projects/*/locations/global/catalogs/default_catalog`. (required)
   dataset: string, Determines which dataset to use for fetching completion. "user-data" will use the imported dataset through CompletionService.ImportCompletionData. "cloud-retail" will use the dataset generated by cloud retail based on user events. If leave empty, it will use the "user-data". Current supported values: * user-data * cloud-retail: This option requires enabling auto-learning function first. See [guidelines](https://cloud.google.com/retail/docs/completion-overview#generated-completion-dataset).
   deviceType: string, The device type context for completion suggestions. We recommend that you leave this field empty. It can apply different suggestions on different device types, e.g. `DESKTOP`, `MOBILE`. If it is empty, the suggestions are across all device types. Supported formats: * `UNKNOWN_DEVICE_TYPE` * `DESKTOP` * `MOBILE` * A customized string starts with `OTHER_`, e.g. `OTHER_IPHONE`.
+  enableAttributeSuggestions: boolean, If true, attribute suggestions are enabled and provided in response. This field is only available for "cloud-retail" dataset.
   entity: string, The entity for customers who run multiple entities, domains, sites, or regions, for example, `Google US`, `Google Ads`, `Waymo`, `google.com`, `youtube.com`, etc. If this is set, it must be an exact match with UserEvent.entity to get per-entity autocomplete results.
   languageCodes: string, Note that this field applies for `user-data` dataset only. For requests with `cloud-retail` dataset, setting this field has no effect. The language filters applied to the output suggestions. If set, it should contain the language of the query. If not set, suggestions are returned without considering language restrictions. This is the BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Tags for Identifying Languages](https://tools.ietf.org/html/bcp47). The maximum number of language codes is 3. (repeated)
   maxSuggestions: integer, Completion max suggestions. If left unset or set to 0, then will fallback to the configured value CompletionConfig.max_suggestions. The maximum allowed max suggestions is 20. If it is set higher, it will be capped by 20.
diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.html
index 0276f3a9950..3e6973fd0b4 100644
--- a/docs/dyn/retail_v2beta.projects.locations.catalogs.html
+++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.html
@@ -123,7 +123,7 @@ 

Instance Methods

close()

Close httplib2 connections.

- completeQuery(catalog, dataset=None, deviceType=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None)

+ completeQuery(catalog, dataset=None, deviceType=None, enableAttributeSuggestions=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None)

Completes the specified prefix with keyword suggestions. This feature is only available for users who have Retail Search enabled. Enable Retail Search on Cloud Console before using this feature.

exportAnalyticsMetrics(catalog, body=None, x__xgafv=None)

@@ -162,13 +162,14 @@

Method Details

- completeQuery(catalog, dataset=None, deviceType=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None) + completeQuery(catalog, dataset=None, deviceType=None, enableAttributeSuggestions=None, entity=None, languageCodes=None, maxSuggestions=None, query=None, visitorId=None, x__xgafv=None)
Completes the specified prefix with keyword suggestions. This feature is only available for users who have Retail Search enabled. Enable Retail Search on Cloud Console before using this feature.
 
 Args:
   catalog: string, Required. Catalog for which the completion is performed. Full resource name of catalog, such as `projects/*/locations/global/catalogs/default_catalog`. (required)
   dataset: string, Determines which dataset to use for fetching completion. "user-data" will use the imported dataset through CompletionService.ImportCompletionData. "cloud-retail" will use the dataset generated by cloud retail based on user events. If leave empty, it will use the "user-data". Current supported values: * user-data * cloud-retail: This option requires enabling auto-learning function first. See [guidelines](https://cloud.google.com/retail/docs/completion-overview#generated-completion-dataset).
   deviceType: string, The device type context for completion suggestions. We recommend that you leave this field empty. It can apply different suggestions on different device types, e.g. `DESKTOP`, `MOBILE`. If it is empty, the suggestions are across all device types. Supported formats: * `UNKNOWN_DEVICE_TYPE` * `DESKTOP` * `MOBILE` * A customized string starts with `OTHER_`, e.g. `OTHER_IPHONE`.
+  enableAttributeSuggestions: boolean, If true, attribute suggestions are enabled and provided in response. This field is only available for "cloud-retail" dataset.
   entity: string, The entity for customers who run multiple entities, domains, sites, or regions, for example, `Google US`, `Google Ads`, `Waymo`, `google.com`, `youtube.com`, etc. If this is set, it must be an exact match with UserEvent.entity to get per-entity autocomplete results.
   languageCodes: string, Note that this field applies for `user-data` dataset only. For requests with `cloud-retail` dataset, setting this field has no effect. The language filters applied to the output suggestions. If set, it should contain the language of the query. If not set, suggestions are returned without considering language restrictions. This is the BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see [Tags for Identifying Languages](https://tools.ietf.org/html/bcp47). The maximum number of language codes is 3. (repeated)
   maxSuggestions: integer, Completion max suggestions. If left unset or set to 0, then will fallback to the configured value CompletionConfig.max_suggestions. The maximum allowed max suggestions is 20. If it is set higher, it will be capped by 20.
diff --git a/docs/dyn/run_v1.namespaces.configurations.html b/docs/dyn/run_v1.namespaces.configurations.html
index c92b9ebc985..55e698e4900 100644
--- a/docs/dyn/run_v1.namespaces.configurations.html
+++ b/docs/dyn/run_v1.namespaces.configurations.html
@@ -361,6 +361,9 @@ 

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -711,6 +714,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ diff --git a/docs/dyn/run_v1.namespaces.revisions.html b/docs/dyn/run_v1.namespaces.revisions.html index 840b19e4fe7..6c8c93fa796 100644 --- a/docs/dyn/run_v1.namespaces.revisions.html +++ b/docs/dyn/run_v1.namespaces.revisions.html @@ -375,6 +375,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -691,6 +694,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ diff --git a/docs/dyn/run_v1.namespaces.services.html b/docs/dyn/run_v1.namespaces.services.html index 69866cc3cd9..768b35ef3bf 100644 --- a/docs/dyn/run_v1.namespaces.services.html +++ b/docs/dyn/run_v1.namespaces.services.html @@ -365,6 +365,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -723,6 +726,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -1132,6 +1138,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -1506,6 +1515,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -1876,6 +1888,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -2234,6 +2249,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ diff --git a/docs/dyn/run_v1.projects.locations.configurations.html b/docs/dyn/run_v1.projects.locations.configurations.html index 27055dd079f..3ea5fd3c54e 100644 --- a/docs/dyn/run_v1.projects.locations.configurations.html +++ b/docs/dyn/run_v1.projects.locations.configurations.html @@ -361,6 +361,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -711,6 +714,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ diff --git a/docs/dyn/run_v1.projects.locations.revisions.html b/docs/dyn/run_v1.projects.locations.revisions.html index 9d4baf439fd..417d2d4bf8d 100644 --- a/docs/dyn/run_v1.projects.locations.revisions.html +++ b/docs/dyn/run_v1.projects.locations.revisions.html @@ -375,6 +375,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -691,6 +694,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ diff --git a/docs/dyn/run_v1.projects.locations.services.html b/docs/dyn/run_v1.projects.locations.services.html index 99eb815a05c..45707a7eb29 100644 --- a/docs/dyn/run_v1.projects.locations.services.html +++ b/docs/dyn/run_v1.projects.locations.services.html @@ -374,6 +374,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -732,6 +735,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -1141,6 +1147,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -1563,6 +1572,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -1933,6 +1945,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ @@ -2291,6 +2306,9 @@

Method Details

"name": "A String", # Name of the referent. }, ], + "nodeSelector": { # Optional. The Node Selector configuration. Map of selector key to a value which matches a node. + "a_key": "A String", + }, "serviceAccountName": "A String", # Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. "timeoutSeconds": 42, # TimeoutSeconds holds the max duration the instance is allowed for responding to a request. Cloud Run: defaults to 300 seconds (5 minutes). Maximum allowed value is 3600 seconds (1 hour). "volumes": [ diff --git a/docs/dyn/run_v2.projects.locations.services.html b/docs/dyn/run_v2.projects.locations.services.html index b90005da7dc..baa19aa4888 100644 --- a/docs/dyn/run_v2.projects.locations.services.html +++ b/docs/dyn/run_v2.projects.locations.services.html @@ -277,6 +277,9 @@

Method Details

"a_key": "A String", }, "maxInstanceRequestConcurrency": 42, # Optional. Sets the maximum number of requests that each serving instance can receive. + "nodeSelector": { # Hardware constraints configuration. # Optional. The node selector for the revision template. + "accelerator": "A String", # Required. GPU accelerator type to attach to an instance. + }, "revision": "A String", # Optional. The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. "scaling": { # Settings for revision-level scaling settings. # Optional. Scaling settings for this Revision. "maxInstanceCount": 42, # Optional. Maximum number of serving instances that this resource should have. @@ -600,6 +603,9 @@

Method Details

"a_key": "A String", }, "maxInstanceRequestConcurrency": 42, # Optional. Sets the maximum number of requests that each serving instance can receive. + "nodeSelector": { # Hardware constraints configuration. # Optional. The node selector for the revision template. + "accelerator": "A String", # Required. GPU accelerator type to attach to an instance. + }, "revision": "A String", # Optional. The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. "scaling": { # Settings for revision-level scaling settings. # Optional. Scaling settings for this Revision. "maxInstanceCount": 42, # Optional. Maximum number of serving instances that this resource should have. @@ -910,6 +916,9 @@

Method Details

"a_key": "A String", }, "maxInstanceRequestConcurrency": 42, # Optional. Sets the maximum number of requests that each serving instance can receive. + "nodeSelector": { # Hardware constraints configuration. # Optional. The node selector for the revision template. + "accelerator": "A String", # Required. GPU accelerator type to attach to an instance. + }, "revision": "A String", # Optional. The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. "scaling": { # Settings for revision-level scaling settings. # Optional. Scaling settings for this Revision. "maxInstanceCount": 42, # Optional. Maximum number of serving instances that this resource should have. @@ -1177,6 +1186,9 @@

Method Details

"a_key": "A String", }, "maxInstanceRequestConcurrency": 42, # Optional. Sets the maximum number of requests that each serving instance can receive. + "nodeSelector": { # Hardware constraints configuration. # Optional. The node selector for the revision template. + "accelerator": "A String", # Required. GPU accelerator type to attach to an instance. + }, "revision": "A String", # Optional. The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. "scaling": { # Settings for revision-level scaling settings. # Optional. Scaling settings for this Revision. "maxInstanceCount": 42, # Optional. Maximum number of serving instances that this resource should have. diff --git a/docs/dyn/run_v2.projects.locations.services.revisions.html b/docs/dyn/run_v2.projects.locations.services.revisions.html index 815f8a92ac6..06f4cf63232 100644 --- a/docs/dyn/run_v2.projects.locations.services.revisions.html +++ b/docs/dyn/run_v2.projects.locations.services.revisions.html @@ -310,6 +310,9 @@

Method Details

"logUri": "A String", # Output only. The Google Console URI to obtain logs for the Revision. "maxInstanceRequestConcurrency": 42, # Sets the maximum number of requests that each serving instance can receive. "name": "A String", # Output only. The unique name of this Revision. + "nodeSelector": { # Hardware constraints configuration. # The node selector for the revision. + "accelerator": "A String", # Required. GPU accelerator type to attach to an instance. + }, "observedGeneration": "A String", # Output only. The generation of this Revision currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. "reconciling": True or False, # Output only. Indicates whether the resource's reconciliation is still in progress. See comments in `Service.reconciling` for additional information on reconciliation process in Cloud Run. "satisfiesPzs": True or False, # Output only. Reserved for future use. @@ -521,6 +524,9 @@

Method Details

"logUri": "A String", # Output only. The Google Console URI to obtain logs for the Revision. "maxInstanceRequestConcurrency": 42, # Sets the maximum number of requests that each serving instance can receive. "name": "A String", # Output only. The unique name of this Revision. + "nodeSelector": { # Hardware constraints configuration. # The node selector for the revision. + "accelerator": "A String", # Required. GPU accelerator type to attach to an instance. + }, "observedGeneration": "A String", # Output only. The generation of this Revision currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. "reconciling": True or False, # Output only. Indicates whether the resource's reconciliation is still in progress. See comments in `Service.reconciling` for additional information on reconciliation process in Cloud Run. "satisfiesPzs": True or False, # Output only. Reserved for future use. diff --git a/docs/dyn/servicemanagement_v1.services.configs.html b/docs/dyn/servicemanagement_v1.services.configs.html index 0efb2e7d763..606a6f77bb1 100644 --- a/docs/dyn/servicemanagement_v1.services.configs.html +++ b/docs/dyn/servicemanagement_v1.services.configs.html @@ -292,7 +292,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. @@ -907,7 +907,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. @@ -1534,7 +1534,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. @@ -2161,7 +2161,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. diff --git a/docs/dyn/servicemanagement_v1.services.html b/docs/dyn/servicemanagement_v1.services.html index 8dc7e8d7fbe..7c06886a2fb 100644 --- a/docs/dyn/servicemanagement_v1.services.html +++ b/docs/dyn/servicemanagement_v1.services.html @@ -487,7 +487,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. If this is empty, then an endpoint with the same name as the service is automatically generated to service all defined APIs. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. diff --git a/docs/dyn/serviceusage_v1.services.html b/docs/dyn/serviceusage_v1.services.html index b325fbf54fb..800009e2442 100644 --- a/docs/dyn/serviceusage_v1.services.html +++ b/docs/dyn/serviceusage_v1.services.html @@ -272,7 +272,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. Contains only the names and aliases of the endpoints. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. @@ -579,7 +579,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. Contains only the names and aliases of the endpoints. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. @@ -801,7 +801,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. Contains only the names and aliases of the endpoints. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. diff --git a/docs/dyn/serviceusage_v1beta1.services.html b/docs/dyn/serviceusage_v1beta1.services.html index b55fcededd2..a1e199964e9 100644 --- a/docs/dyn/serviceusage_v1beta1.services.html +++ b/docs/dyn/serviceusage_v1beta1.services.html @@ -396,7 +396,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. Contains only the names and aliases of the endpoints. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. @@ -618,7 +618,7 @@

Method Details

}, "endpoints": [ # Configuration for network endpoints. Contains only the names and aliases of the endpoints. { # `Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true - "aliases": [ # Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on. + "aliases": [ # Aliases for this endpoint, these will be served by the same UrlMap as the parent endpoint, and will be provisioned in the GCP stack for the Regional Endpoints. "A String", ], "allowCors": True or False, # Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed. diff --git a/docs/dyn/sheets_v4.spreadsheets.html b/docs/dyn/sheets_v4.spreadsheets.html index 36223339fb9..f89d1079fc8 100644 --- a/docs/dyn/sheets_v4.spreadsheets.html +++ b/docs/dyn/sheets_v4.spreadsheets.html @@ -3322,6 +3322,33 @@

Method Details

"startIndex": 42, # The start (inclusive) of the span, or not set if unbounded. }, }, + "cancelDataSourceRefresh": { # Cancels one or multiple refreshes of data source objects in the spreadsheet by the specified references. # Cancels refreshes of one or multiple data sources and associated dbobjects. + "dataSourceId": "A String", # Reference to a DataSource. If specified, cancels all associated data source object refreshes for this data source. + "isAll": True or False, # Cancels all existing data source object refreshes for all data sources in the spreadsheet. + "references": { # A list of references to data source objects. # References to data source objects whose refreshes are to be cancelled. + "references": [ # The references. + { # Reference to a data source object. + "chartId": 42, # References to a data source chart. + "dataSourceFormulaCell": { # A coordinate in a sheet. All indexes are zero-based. # References to a cell containing DataSourceFormula. + "columnIndex": 42, # The column index of the coordinate. + "rowIndex": 42, # The row index of the coordinate. + "sheetId": 42, # The sheet this coordinate is on. + }, + "dataSourcePivotTableAnchorCell": { # A coordinate in a sheet. All indexes are zero-based. # References to a data source PivotTable anchored at the cell. + "columnIndex": 42, # The column index of the coordinate. + "rowIndex": 42, # The row index of the coordinate. + "sheetId": 42, # The sheet this coordinate is on. + }, + "dataSourceTableAnchorCell": { # A coordinate in a sheet. All indexes are zero-based. # References to a DataSourceTable anchored at the cell. + "columnIndex": 42, # The column index of the coordinate. + "rowIndex": 42, # The row index of the coordinate. + "sheetId": 42, # The sheet this coordinate is on. + }, + "sheetId": "A String", # References to a DATA_SOURCE sheet. + }, + ], + }, + }, "clearBasicFilter": { # Clears the basic filter, if any exists on the sheet. # Clears the basic filter on a sheet. "sheetId": 42, # The sheet ID on which the basic filter should be cleared. }, @@ -10370,6 +10397,35 @@

Method Details

}, }, }, + "cancelDataSourceRefresh": { # The response from cancelling one or multiple data source object refreshes. # A reply from cancelling data source object refreshes. + "statuses": [ # The cancellation statuses of refreshes of all data source objects specified in the request. If is_all is specified, the field contains only those in failure status. Refreshing and canceling refresh the same data source object is also not allowed in the same `batchUpdate`. + { # The status of cancelling a single data source object refresh. + "reference": { # Reference to a data source object. # Reference to the data source object whose refresh is being cancelled. + "chartId": 42, # References to a data source chart. + "dataSourceFormulaCell": { # A coordinate in a sheet. All indexes are zero-based. # References to a cell containing DataSourceFormula. + "columnIndex": 42, # The column index of the coordinate. + "rowIndex": 42, # The row index of the coordinate. + "sheetId": 42, # The sheet this coordinate is on. + }, + "dataSourcePivotTableAnchorCell": { # A coordinate in a sheet. All indexes are zero-based. # References to a data source PivotTable anchored at the cell. + "columnIndex": 42, # The column index of the coordinate. + "rowIndex": 42, # The row index of the coordinate. + "sheetId": 42, # The sheet this coordinate is on. + }, + "dataSourceTableAnchorCell": { # A coordinate in a sheet. All indexes are zero-based. # References to a DataSourceTable anchored at the cell. + "columnIndex": 42, # The column index of the coordinate. + "rowIndex": 42, # The row index of the coordinate. + "sheetId": 42, # The sheet this coordinate is on. + }, + "sheetId": "A String", # References to a DATA_SOURCE sheet. + }, + "refreshCancellationStatus": { # The status of a refresh cancellation. You can send cancel request to explicitly cancel one or multiple data source object refreshes. # The cancellation status. + "errorCode": "A String", # The error code. + "state": "A String", # The state of a call to cancel a refresh in Sheets. + }, + }, + ], + }, "createDeveloperMetadata": { # The response from creating developer metadata. # A reply from creating a developer metadata entry. "developerMetadata": { # Developer metadata associated with a location or object in a spreadsheet. Developer metadata may be used to associate arbitrary data with various parts of a spreadsheet and will remain associated at those locations as they move around and the spreadsheet is edited. For example, if developer metadata is associated with row 5 and another row is then subsequently inserted above row 5, that original metadata will still be associated with the row it was first associated with (what is now row 6). If the associated object is deleted its metadata is deleted too. # The developer metadata that was created. "location": { # A location where metadata may be associated in a spreadsheet. # The location where the metadata is associated. diff --git a/docs/dyn/solar_v1.dataLayers.html b/docs/dyn/solar_v1.dataLayers.html index eb66b1f2f13..4bdb0782116 100644 --- a/docs/dyn/solar_v1.dataLayers.html +++ b/docs/dyn/solar_v1.dataLayers.html @@ -78,7 +78,7 @@

Instance Methods

close()

Close httplib2 connections.

- get(location_latitude=None, location_longitude=None, pixelSizeMeters=None, radiusMeters=None, requiredQuality=None, view=None, x__xgafv=None)

+ get(exactQualityRequired=None, location_latitude=None, location_longitude=None, pixelSizeMeters=None, radiusMeters=None, requiredQuality=None, view=None, x__xgafv=None)

Gets solar information for a region surrounding a location. Returns an error with code `NOT_FOUND` if the location is outside the coverage area.

Method Details

@@ -87,10 +87,11 @@

Method Details

- get(location_latitude=None, location_longitude=None, pixelSizeMeters=None, radiusMeters=None, requiredQuality=None, view=None, x__xgafv=None) + get(exactQualityRequired=None, location_latitude=None, location_longitude=None, pixelSizeMeters=None, radiusMeters=None, requiredQuality=None, view=None, x__xgafv=None)
Gets solar information for a region surrounding a location. Returns an error with code `NOT_FOUND` if the location is outside the coverage area.
 
 Args:
+  exactQualityRequired: boolean, Optional. Whether to require exact quality of the imagery. If set to false, the `required_quality` field is interpreted as the minimum required quality, such that HIGH quality imagery may be returned when `required_quality` is set to MEDIUM. If set to true, `required_quality` is interpreted as the exact required quality and only `MEDIUM` quality imagery is returned if `required_quality` is set to `MEDIUM`.
   location_latitude: number, The latitude in degrees. It must be in the range [-90.0, +90.0].
   location_longitude: number, The longitude in degrees. It must be in the range [-180.0, +180.0].
   pixelSizeMeters: number, Optional. The minimum scale, in meters per pixel, of the data to return. Values of 0.1 (the default, if this field is not set explicitly), 0.25, 0.5, and 1.0 are supported. Imagery components whose normal resolution is less than `pixel_size_meters` will be returned at the resolution specified by `pixel_size_meters`; imagery components whose normal resolution is equal to or greater than `pixel_size_meters` will be returned at that normal resolution.
diff --git a/docs/dyn/spanner_v1.projects.instances.backups.html b/docs/dyn/spanner_v1.projects.instances.backups.html
index bf6915a9163..7151539183c 100644
--- a/docs/dyn/spanner_v1.projects.instances.backups.html
+++ b/docs/dyn/spanner_v1.projects.instances.backups.html
@@ -86,7 +86,7 @@ 

Instance Methods

copy(parent, body=None, x__xgafv=None)

Starts copying a Cloud Spanner Backup. The returned backup long-running operation will have a name of the format `projects//instances//backups//operations/` and can be used to track copying of the backup. The operation is associated with the destination backup. The metadata field type is CopyBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the copying and delete the destination backup. Concurrent CopyBackup requests can run on the same source backup.

- create(parent, backupId=None, body=None, encryptionConfig_encryptionType=None, encryptionConfig_kmsKeyName=None, x__xgafv=None)

+ create(parent, backupId=None, body=None, encryptionConfig_encryptionType=None, encryptionConfig_kmsKeyName=None, encryptionConfig_kmsKeyNames=None, x__xgafv=None)

Starts creating a new Cloud Spanner Backup. The returned backup long-running operation will have a name of the format `projects//instances//backups//operations/` and can be used to track creation of the backup. The metadata field type is CreateBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the creation and delete the backup. There can be only one pending backup creation per database. Backup creation of different databases can run concurrently.

delete(name, x__xgafv=None)

@@ -132,6 +132,9 @@

Method Details

"encryptionConfig": { # Encryption configuration for the copied backup. # Optional. The encryption configuration used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the source backup by default, namely encryption_type = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. "encryptionType": "A String", # Required. The encryption type of the backup. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. Kms keys specified can be in any order. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "A String", + ], }, "expireTime": "A String", # Required. The expiration time of the backup in microsecond granularity. The expiration time must be at least 6 hours and at most 366 days from the `create_time` of the source backup. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. "sourceBackup": "A String", # Required. The source backup to be copied. The source backup needs to be in READY state for it to be copied. Once CopyBackup is in progress, the source backup cannot be deleted or cleaned up on expiration until CopyBackup is finished. Values are of the form: `projects//instances//backups/`. @@ -167,7 +170,7 @@

Method Details

- create(parent, backupId=None, body=None, encryptionConfig_encryptionType=None, encryptionConfig_kmsKeyName=None, x__xgafv=None) + create(parent, backupId=None, body=None, encryptionConfig_encryptionType=None, encryptionConfig_kmsKeyName=None, encryptionConfig_kmsKeyNames=None, x__xgafv=None)
Starts creating a new Cloud Spanner Backup. The returned backup long-running operation will have a name of the format `projects//instances//backups//operations/` and can be used to track creation of the backup. The metadata field type is CreateBackupMetadata. The response field type is Backup, if successful. Cancelling the returned operation will stop the creation and delete the backup. There can be only one pending backup creation per database. Backup creation of different databases can run concurrently.
 
 Args:
@@ -192,6 +195,21 @@ 

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + { # Encryption information for a Cloud Spanner database or backup. + "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "encryptionType": "A String", # Output only. The type of encryption. + "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. + }, + ], "expireTime": "A String", # Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. @@ -214,6 +232,7 @@

Method Details

GOOGLE_DEFAULT_ENCRYPTION - Use Google default encryption. CUSTOMER_MANAGED_ENCRYPTION - Use customer managed encryption. If specified, `kms_key_name` must contain a valid Cloud KMS key. encryptionConfig_kmsKeyName: string, Optional. The Cloud KMS key that will be used to protect the backup. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + encryptionConfig_kmsKeyNames: string, Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the backup's instance configuration. Some examples: * For single region instance configs, specify a single regional location KMS key. * For multi-regional instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For an instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. (repeated) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -292,6 +311,21 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + { # Encryption information for a Cloud Spanner database or backup. + "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "encryptionType": "A String", # Output only. The type of encryption. + "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. + }, + ], "expireTime": "A String", # Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. @@ -386,6 +420,21 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + { # Encryption information for a Cloud Spanner database or backup. + "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "encryptionType": "A String", # Output only. The type of encryption. + "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. + }, + ], "expireTime": "A String", # Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. @@ -444,6 +493,21 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + { # Encryption information for a Cloud Spanner database or backup. + "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "encryptionType": "A String", # Output only. The type of encryption. + "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. + }, + ], "expireTime": "A String", # Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. @@ -484,6 +548,21 @@

Method Details

"encryptionType": "A String", # Output only. The type of encryption. "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. }, + "encryptionInformation": [ # Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status' field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. + { # Encryption information for a Cloud Spanner database or backup. + "encryptionStatus": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. If present, the status of a recent encrypt/decrypt call on underlying data for this database or backup. Regardless of status, data is always encrypted at rest. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "encryptionType": "A String", # Output only. The type of encryption. + "kmsKeyVersion": "A String", # Output only. A Cloud KMS key version that is being used to protect the database or backup. + }, + ], "expireTime": "A String", # Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. diff --git a/docs/dyn/spanner_v1.projects.instances.databases.html b/docs/dyn/spanner_v1.projects.instances.databases.html index ce89f2422cb..bd44b4bb0f4 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.html @@ -151,6 +151,9 @@

Method Details

"databaseDialect": "A String", # Optional. The dialect of the Cloud Spanner Database. "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Optional. The encryption configuration for the database. If this field is not specified, Cloud Spanner will encrypt/decrypt all data at rest using Google default encryption. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "A String", + ], }, "extraStatements": [ # Optional. A list of DDL statements to run inside the newly created database. Statements can create tables, indexes, etc. These statements execute atomically with the creation of the database: if there is an error in any statement, the database is not created. "A String", @@ -227,6 +230,9 @@

Method Details

"enableDropProtection": True or False, # Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "A String", + ], }, "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. { # Encryption information for a Cloud Spanner database or backup. @@ -555,6 +561,9 @@

Method Details

"enableDropProtection": True or False, # Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "A String", + ], }, "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. { # Encryption information for a Cloud Spanner database or backup. @@ -621,6 +630,9 @@

Method Details

"enableDropProtection": True or False, # Whether drop protection is enabled for this database. Defaults to false, if not set. For more details, please see how to [prevent accidental database deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). "encryptionConfig": { # Encryption configuration for a Cloud Spanner database. # Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty. "kmsKeyName": "A String", # The Cloud KMS key to be used for encrypting and decrypting the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "A String", + ], }, "encryptionInfo": [ # Output only. For databases that are using customer managed encryption, this field contains the encryption information for the database, such as all Cloud KMS key versions that are in use. The `encryption_status' field inside of each `EncryptionInfo` is not populated. For databases that are using Google default or other types of encryption, this field is empty. This field is propagated lazily from the backend. There might be a delay from when a key version is being used and when it appears in this field. { # Encryption information for a Cloud Spanner database or backup. @@ -697,6 +709,9 @@

Method Details

"encryptionConfig": { # Encryption configuration for the restored database. # Optional. An encryption configuration describing the encryption type and key resources in Cloud KMS used to encrypt/decrypt the database to restore to. If this field is not specified, the restored database will use the same encryption configuration as the backup by default, namely encryption_type = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. "encryptionType": "A String", # Required. The encryption type of the restored database. "kmsKeyName": "A String", # Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored database. This field should be set only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. + "kmsKeyNames": [ # Optional. Specifies the KMS configuration for the one or more keys used to encrypt the database. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by kms_key_names must fully cover all regions of the database instance configuration. Some examples: * For single region database instance configs, specify a single regional location KMS key. * For multi-regional database instance configs of type GOOGLE_MANAGED, either specify a multi-regional location KMS key or multiple regional location KMS keys that cover all regions in the instance config. * For a database instance config of type USER_MANAGED, please specify only regional location KMS keys to cover each region in the instance config. Multi-regional location KMS keys are not supported for USER_MANAGED instance configs. + "A String", + ], }, } diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html index a41548b7a39..5f68b933c71 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html @@ -308,7 +308,7 @@

Method Details

The object takes the form of: { # The request for BeginTransaction. - "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. + "options": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Required. Options for the new transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -442,7 +442,7 @@

Method Details

"transactionTag": "A String", # A tag used for statistics collection about this transaction. Both request_tag and transaction_tag can be specified for a read or query that belongs to a transaction. The value of transaction_tag should be the same for all requests belonging to the same transaction. If this request doesn't belong to any transaction, transaction_tag will be ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a transaction_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters will be removed from the string. }, "returnCommitStats": True or False, # If `true`, then statistics related to the transaction will be included in the CommitResponse. Default value is `false`. - "singleUseTransaction": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. + "singleUseTransaction": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -578,7 +578,7 @@

Method Details

}, ], "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Required. The transaction to use. Must be a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -595,7 +595,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -775,7 +775,7 @@

Method Details

"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -792,7 +792,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -959,7 +959,7 @@

Method Details

"seqno": "A String", # A per-transaction sequence number used to identify this request. This field makes each request idempotent such that if the request is received multiple times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. "sql": "A String", # Required. The SQL string. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. For queries, if none is provided, the default is a temporary read-only transaction with strong concurrency. Standard DML statements require a read-write transaction. To protect against replays, single-use transactions are not supported. The caller must either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing Partitioned DML transaction ID. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -976,7 +976,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1188,7 +1188,7 @@

Method Details

}, "sql": "A String", # Required. The query request to generate partitions for. The request will fail if the query is not root partitionable. For a query to be root partitionable, it needs to satisfy a few conditions. For example, if the query execution plan contains a distributed union operator, then it must be the first operator in the plan. For more information about other conditions, see [Read data in parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel). The query request must not contain DML commands, such as INSERT, UPDATE, or DELETE. Use ExecuteStreamingSql with a PartitionedDml transaction for large, partition-friendly DML operations. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1205,7 +1205,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1289,7 +1289,7 @@

Method Details

}, "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # Read only snapshot transactions are supported, read/write and single use transactions are not. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1306,7 +1306,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1414,7 +1414,7 @@

Method Details

"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1431,7 +1431,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1627,7 +1627,7 @@

Method Details

"resumeToken": "A String", # If this request is resuming a previously interrupted read, `resume_token` should be copied from the last PartialResultSet yielded before the interruption. Doing this enables the new read to resume where the last read left off. The rest of the request parameters must exactly match the request that yielded this token. "table": "A String", # Required. The name of the table in the database to be read. "transaction": { # This message is used to select the transaction in which a Read or ExecuteSql call runs. See TransactionOptions for more information about transactions. # The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. - "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. + "begin": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Begin a new transaction and execute this read or SQL query in it. The transaction ID of the new transaction is returned in ResultSetMetadata.transaction, which is a Transaction. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, @@ -1644,7 +1644,7 @@

Method Details

}, }, "id": "A String", # Execute the read or SQL query in a previously-started transaction. - "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. + "singleUse": { # Transactions: Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction. Transaction modes: Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry. 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed. Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see TransactionOptions.ReadOnly.strong for more details. 3. Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed. For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed. Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database. Locking read-write transactions: Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by Commit or Rollback. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it. Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by Commit. At any time before Commit, the client can send a Rollback request to abort the transaction. Semantics: Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire write locks for all writes. Cloud Spanner can abort the transaction for any reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees that the transaction has not modified any user data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. Retrying aborted transactions: When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous. Note that the lock priority is preserved per session (not per transaction). Lock priority is set by the first read or write in the first attempt of a read-write transaction. If the application starts a new session to retry the whole transaction, the transaction loses its original lock priority. Moreover, the lock priority is only preserved if the transaction fails with an `ABORTED` error. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying. Idle transactions: A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last 10 seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold on to locks indefinitely. If an idle transaction is aborted, the commit will fail with error `ABORTED`. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, `SELECT 1`) prevents the transaction from becoming idle. Snapshot read-only transactions: Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions. Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice. Snapshot read-only transactions do not need to call Commit or Rollback (and in fact are not permitted to do so). To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp. The types of timestamp bound are: - Strong (the default). - Bounded staleness. - Exact staleness. If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica. Each type of timestamp bound is discussed in detail below. Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp. Queries on change streams (see below for more details) must also specify the strong read timestamp bound. See TransactionOptions.ReadOnly.strong. Exact staleness: These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results. See TransactionOptions.ReadOnly.read_timestamp and TransactionOptions.ReadOnly.exact_staleness. Bounded staleness: Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results. Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp. As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica. Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions. See TransactionOptions.ReadOnly.max_staleness and TransactionOptions.ReadOnly.min_read_timestamp. Old read timestamps and garbage collection: Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is known as "version GC". By default, version GC reclaims versions after they are one hour old. Because of this, Cloud Spanner cannot perform reads at read timestamps more than one hour in the past. This restriction also applies to in-progress reads and/or SQL queries whose timestamp become too old while executing. Reads and SQL queries with too-old read timestamps fail with the error `FAILED_PRECONDITION`. You can configure and extend the `VERSION_RETENTION_PERIOD` of a database up to a period as long as one week, which allows Cloud Spanner to perform reads up to one week in the past. Querying change Streams: A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database. When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_. All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries. In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the Transaction message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries. Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs. Partitioned DML transactions: Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another. To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time. That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions. - The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table. - The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows. - Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement is applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as `UPDATE table SET column = column + 1` as it could be run multiple times against some rows. - The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows. - Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql. - If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all. Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table. # Execute the read or SQL query in a temporary transaction. This is the most efficient way to execute a transaction that consists of a single SQL query. "excludeTxnFromChangeStreams": True or False, # When `exclude_txn_from_change_streams` is set to `true`: * Modifications from this transaction will not be recorded in change streams with DDL option `allow_txn_exclusion=true` that are tracking columns modified by these transactions. * Modifications from this transaction will be recorded in change streams with DDL option `allow_txn_exclusion=false or not set` that are tracking columns modified by these transactions. When `exclude_txn_from_change_streams` is set to `false` or not set, Modifications from this transaction will be recorded in all change streams that are tracking columns modified by these transactions. `exclude_txn_from_change_streams` may only be specified for read-write or partitioned-dml transactions, otherwise the API will return an `INVALID_ARGUMENT` error. "partitionedDml": { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction. Authorization to begin a Partitioned DML transaction requires `spanner.databases.beginPartitionedDmlTransaction` permission on the `session` resource. }, diff --git a/docs/dyn/sqladmin_v1.instances.html b/docs/dyn/sqladmin_v1.instances.html index 17f4c5d6f39..66d7ea216d0 100644 --- a/docs/dyn/sqladmin_v1.instances.html +++ b/docs/dyn/sqladmin_v1.instances.html @@ -124,7 +124,7 @@

Instance Methods

Partially updates settings of a Cloud SQL instance by merging the request with the current configuration. This method supports patch semantics.

promoteReplica(project, instance, failover=None, x__xgafv=None)

-

Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation might cause your instance to restart.

+

Promotes the read replica instance to be an independent Cloud SQL primary instance. Using this operation might cause your instance to restart.

reencrypt(project, instance, body=None, x__xgafv=None)

Reencrypt CMEK instance with latest key version.

@@ -151,7 +151,7 @@

Instance Methods

Stops the replication in the read replica instance.

switchover(project, instance, dbTimeout=None, x__xgafv=None)

-

Switches over from the primary instance to the replica instance.

+

Switches over from the primary instance to the designated DR replica instance.

truncateLog(project, instance, body=None, x__xgafv=None)

Truncate MySQL general and slow query log tables MySQL only.

@@ -1209,13 +1209,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. - "entitled": True or False, # Output only. Whether gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. + "entitled": True or False, # Output only. Whether Gemini is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -1275,9 +1275,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # Optional. The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. # Optional. A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -1327,7 +1327,7 @@

Method Details

"replicationLogArchivingEnabled": True or False, # Reserved for future use. "startTime": "A String", # Start time for the daily backup configuration in UTC timezone in the 24 hour format - `HH:MM`. "transactionLogRetentionDays": 42, # The number of days of transaction logs we retain for point in time restore, from 1-7. - "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs for the database for point-in-time recovery. + "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs used to perform point-in-time recovery (PITR) for the database. }, "collation": "A String", # The name of server Instance collation. "connectorEnforcement": "A String", # Specifies if connections must use Cloud SQL connectors. Option values include the following: `NOT_REQUIRED` (Cloud SQL instances can be connected without Cloud SQL Connectors) and `REQUIRED` (Only allow connections that use Cloud SQL Connectors). Note that using REQUIRED disables all existing authorized networks. If this field is not specified when creating a new instance, NOT_REQUIRED is used. If this field is not specified when patching or updating an existing instance, it is left unchanged in the instance. @@ -1381,7 +1381,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1629,13 +1629,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. - "entitled": True or False, # Output only. Whether gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. + "entitled": True or False, # Output only. Whether Gemini is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -1695,9 +1695,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # Optional. The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. # Optional. A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -1747,7 +1747,7 @@

Method Details

"replicationLogArchivingEnabled": True or False, # Reserved for future use. "startTime": "A String", # Start time for the daily backup configuration in UTC timezone in the 24 hour format - `HH:MM`. "transactionLogRetentionDays": 42, # The number of days of transaction logs we retain for point in time restore, from 1-7. - "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs for the database for point-in-time recovery. + "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs used to perform point-in-time recovery (PITR) for the database. }, "collation": "A String", # The name of server Instance collation. "connectorEnforcement": "A String", # Specifies if connections must use Cloud SQL connectors. Option values include the following: `NOT_REQUIRED` (Cloud SQL instances can be connected without Cloud SQL Connectors) and `REQUIRED` (Only allow connections that use Cloud SQL Connectors). Note that using REQUIRED disables all existing authorized networks. If this field is not specified when creating a new instance, NOT_REQUIRED is used. If this field is not specified when patching or updating an existing instance, it is left unchanged in the instance. @@ -1801,7 +1801,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2011,13 +2011,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. - "entitled": True or False, # Output only. Whether gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. + "entitled": True or False, # Output only. Whether Gemini is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -2077,9 +2077,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # Optional. The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. # Optional. A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -2129,7 +2129,7 @@

Method Details

"replicationLogArchivingEnabled": True or False, # Reserved for future use. "startTime": "A String", # Start time for the daily backup configuration in UTC timezone in the 24 hour format - `HH:MM`. "transactionLogRetentionDays": 42, # The number of days of transaction logs we retain for point in time restore, from 1-7. - "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs for the database for point-in-time recovery. + "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs used to perform point-in-time recovery (PITR) for the database. }, "collation": "A String", # The name of server Instance collation. "connectorEnforcement": "A String", # Specifies if connections must use Cloud SQL connectors. Option values include the following: `NOT_REQUIRED` (Cloud SQL instances can be connected without Cloud SQL Connectors) and `REQUIRED` (Only allow connections that use Cloud SQL Connectors). Note that using REQUIRED disables all existing authorized networks. If this field is not specified when creating a new instance, NOT_REQUIRED is used. If this field is not specified when patching or updating an existing instance, it is left unchanged in the instance. @@ -2183,7 +2183,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2327,13 +2327,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. - "entitled": True or False, # Output only. Whether gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. + "entitled": True or False, # Output only. Whether Gemini is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -2393,9 +2393,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # Optional. The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. # Optional. A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -2445,7 +2445,7 @@

Method Details

"replicationLogArchivingEnabled": True or False, # Reserved for future use. "startTime": "A String", # Start time for the daily backup configuration in UTC timezone in the 24 hour format - `HH:MM`. "transactionLogRetentionDays": 42, # The number of days of transaction logs we retain for point in time restore, from 1-7. - "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs for the database for point-in-time recovery. + "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs used to perform point-in-time recovery (PITR) for the database. }, "collation": "A String", # The name of server Instance collation. "connectorEnforcement": "A String", # Specifies if connections must use Cloud SQL connectors. Option values include the following: `NOT_REQUIRED` (Cloud SQL instances can be connected without Cloud SQL Connectors) and `REQUIRED` (Only allow connections that use Cloud SQL Connectors). Note that using REQUIRED disables all existing authorized networks. If this field is not specified when creating a new instance, NOT_REQUIRED is used. If this field is not specified when patching or updating an existing instance, it is left unchanged in the instance. @@ -2499,7 +2499,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2667,12 +2667,12 @@

Method Details

promoteReplica(project, instance, failover=None, x__xgafv=None) -
Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation might cause your instance to restart.
+  
Promotes the read replica instance to be an independent Cloud SQL primary instance. Using this operation might cause your instance to restart.
 
 Args:
   project: string, ID of the project that contains the read replica. (required)
   instance: string, Cloud SQL read replica instance name. (required)
-  failover: boolean, Set to true if the promote operation should attempt to re-add the original primary as a replica when it comes back online. Otherwise, if this value is false or not set, the original primary will be a standalone instance.
+  failover: boolean, Set to true to invoke a replica failover to the designated DR replica. As part of replica failover, the promote operation attempts to add the original primary instance as a replica of the promoted DR replica when the original primary instance comes back online. If set to false or not specified, then the original primary instance becomes an independent Cloud SQL primary instance. Only applicable to MySQL.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -3712,7 +3712,7 @@ 

Method Details

switchover(project, instance, dbTimeout=None, x__xgafv=None) -
Switches over from the primary instance to the replica instance.
+  
Switches over from the primary instance to the designated DR replica instance.
 
 Args:
   project: string, ID of the project that contains the replica. (required)
@@ -4004,13 +4004,13 @@ 

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. - "entitled": True or False, # Output only. Whether gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. + "entitled": True or False, # Output only. Whether Gemini is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -4070,9 +4070,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # Optional. The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. # Optional. A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -4122,7 +4122,7 @@

Method Details

"replicationLogArchivingEnabled": True or False, # Reserved for future use. "startTime": "A String", # Start time for the daily backup configuration in UTC timezone in the 24 hour format - `HH:MM`. "transactionLogRetentionDays": 42, # The number of days of transaction logs we retain for point in time restore, from 1-7. - "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs for the database for point-in-time recovery. + "transactionalLogStorageState": "A String", # Output only. This value contains the storage location of transactional logs used to perform point-in-time recovery (PITR) for the database. }, "collation": "A String", # The name of server Instance collation. "connectorEnforcement": "A String", # Specifies if connections must use Cloud SQL connectors. Option values include the following: `NOT_REQUIRED` (Cloud SQL instances can be connected without Cloud SQL Connectors) and `REQUIRED` (Only allow connections that use Cloud SQL Connectors). Note that using REQUIRED disables all existing authorized networks. If this field is not specified when creating a new instance, NOT_REQUIRED is used. If this field is not specified when patching or updating an existing instance, it is left unchanged in the instance. @@ -4176,7 +4176,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. diff --git a/docs/dyn/sqladmin_v1.projects.instances.html b/docs/dyn/sqladmin_v1.projects.instances.html index 88ad0dcdd1c..d831b9dc3c8 100644 --- a/docs/dyn/sqladmin_v1.projects.instances.html +++ b/docs/dyn/sqladmin_v1.projects.instances.html @@ -553,7 +553,7 @@

Method Details

The object takes the form of: { # Instance start external sync request. - "migrationType": "A String", # Optional. MigrationType decides if the migration is a physical file based migration or logical migration. + "migrationType": "A String", # Optional. MigrationType configures the migration to use physical files or logical dump files. If not set, then the logical dump file configuration is used. Valid values are `LOGICAL` or `PHYSICAL`. Only applicable to MySQL. "mysqlSyncConfig": { # MySQL-specific external server sync settings. # MySQL-specific settings for start external sync. "initialSyncFlags": [ # Flags to use for the initial dump. { # Initial sync flags for certain Cloud SQL APIs. Currently used for the MySQL external server initial dump. @@ -695,7 +695,7 @@

Method Details

The object takes the form of: { # Instance verify external sync settings request. - "migrationType": "A String", # Optional. MigrationType decides if the migration is a physical file based migration or logical migration + "migrationType": "A String", # Optional. MigrationType configures the migration to use physical files or logical dump files. If not set, then the logical dump file configuration is used. Valid values are `LOGICAL` or `PHYSICAL`. Only applicable to MySQL. "mysqlSyncConfig": { # MySQL-specific external server sync settings. # Optional. MySQL-specific settings for start external sync. "initialSyncFlags": [ # Flags to use for the initial dump. { # Initial sync flags for certain Cloud SQL APIs. Currently used for the MySQL external server initial dump. @@ -705,7 +705,7 @@

Method Details

], }, "syncMode": "A String", # External sync mode - "syncParallelLevel": "A String", # Optional. Parallel level for initial data sync. Currently only applicable for PostgreSQL. + "syncParallelLevel": "A String", # Optional. Parallel level for initial data sync. Only applicable for PostgreSQL. "verifyConnectionOnly": True or False, # Flag to enable verifying connection only "verifyReplicationOnly": True or False, # Optional. Flag to verify settings required by replication setup only } diff --git a/docs/dyn/sqladmin_v1beta4.instances.html b/docs/dyn/sqladmin_v1beta4.instances.html index 38c231b7964..3937119f9e2 100644 --- a/docs/dyn/sqladmin_v1beta4.instances.html +++ b/docs/dyn/sqladmin_v1beta4.instances.html @@ -124,7 +124,7 @@

Instance Methods

Partially updates settings of a Cloud SQL instance by merging the request with the current configuration. This method supports patch semantics.

promoteReplica(project, instance, failover=None, x__xgafv=None)

-

Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation might cause your instance to restart.

+

Promotes the read replica instance to be an independent Cloud SQL primary instance. Using this operation might cause your instance to restart.

reencrypt(project, instance, body=None, x__xgafv=None)

Reencrypt CMEK instance with latest key version.

@@ -151,7 +151,7 @@

Instance Methods

Stops the replication in the read replica instance.

switchover(project, instance, dbTimeout=None, x__xgafv=None)

-

Switches over from the primary instance to a replica instance.

+

Switches over from the primary instance to the designated DR replica instance.

truncateLog(project, instance, body=None, x__xgafv=None)

Truncate MySQL general and slow query log tables MySQL only.

@@ -1209,13 +1209,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini instance configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. "entitled": True or False, # Output only. Whether Gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -1275,9 +1275,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. Only applicable to MySQL. # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -1381,7 +1381,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1629,13 +1629,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini instance configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. "entitled": True or False, # Output only. Whether Gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -1695,9 +1695,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. Only applicable to MySQL. # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -1801,7 +1801,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2011,13 +2011,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini instance configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. "entitled": True or False, # Output only. Whether Gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -2077,9 +2077,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. Only applicable to MySQL. # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -2183,7 +2183,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2327,13 +2327,13 @@

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini instance configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. "entitled": True or False, # Output only. Whether Gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -2393,9 +2393,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. Only applicable to MySQL. # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -2499,7 +2499,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2667,12 +2667,12 @@

Method Details

promoteReplica(project, instance, failover=None, x__xgafv=None) -
Promotes the read replica instance to be a stand-alone Cloud SQL instance. Using this operation might cause your instance to restart.
+  
Promotes the read replica instance to be an independent Cloud SQL primary instance. Using this operation might cause your instance to restart.
 
 Args:
   project: string, ID of the project that contains the read replica. (required)
   instance: string, Cloud SQL read replica instance name. (required)
-  failover: boolean, Set to true if the promote operation should attempt to re-add the original primary as a replica when it comes back online. Otherwise, if this value is false or not set, the original primary will be a standalone instance.
+  failover: boolean, Set to true to invoke a replica failover to the designated DR replica. As part of replica failover, the promote operation attempts to add the original primary instance as a replica of the promoted DR replica when the original primary instance comes back online. If set to false or not specified, then the original primary instance becomes an independent Cloud SQL primary instance. Only applicable to MySQL.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -3712,7 +3712,7 @@ 

Method Details

switchover(project, instance, dbTimeout=None, x__xgafv=None) -
Switches over from the primary instance to a replica instance.
+  
Switches over from the primary instance to the designated DR replica instance.
 
 Args:
   project: string, ID of the project that contains the replica. (required)
@@ -4004,13 +4004,13 @@ 

Method Details

"name": "A String", # The name of the failover replica. If specified at instance creation, a failover replica is created for the instance. The name doesn't include the project ID. }, "gceZone": "A String", # The Compute Engine zone that the instance is currently serving from. This value could be different from the zone that was specified when the instance was created if the instance has failed over to its secondary zone. WARNING: Changing this might restart the instance. - "geminiConfig": { # Gemini configuration. # Gemini instance configuration. - "activeQueryEnabled": True or False, # Output only. Whether active query is enabled. + "geminiConfig": { # Gemini instance configuration. # Gemini instance configuration. + "activeQueryEnabled": True or False, # Output only. Whether the active query is enabled. "entitled": True or False, # Output only. Whether Gemini is enabled. - "flagRecommenderEnabled": True or False, # Output only. Whether flag recommender is enabled. - "googleVacuumMgmtEnabled": True or False, # Output only. Whether vacuum management is enabled. - "indexAdvisorEnabled": True or False, # Output only. Whether index advisor is enabled. - "oomSessionCancelEnabled": True or False, # Output only. Whether oom session cancel is enabled. + "flagRecommenderEnabled": True or False, # Output only. Whether the flag recommender is enabled. + "googleVacuumMgmtEnabled": True or False, # Output only. Whether the vacuum management is enabled. + "indexAdvisorEnabled": True or False, # Output only. Whether the index advisor is enabled. + "oomSessionCancelEnabled": True or False, # Output only. Whether canceling the out-of-memory (OOM) session is enabled. }, "instanceType": "A String", # The instance type. "ipAddresses": [ # The assigned IP addresses for the instance. @@ -4070,9 +4070,9 @@

Method Details

"replicaNames": [ # The replicas of the instance. "A String", ], - "replicationCluster": { # Primary-DR replica pair # The pair of a primary instance and disaster recovery (DR) replica. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. - "drReplica": True or False, # Output only. read-only field that indicates if the replica is a dr_replica; not set for a primary. - "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Users can set this field to set a designated DR replica for a primary. Removing this field removes the DR replica. + "replicationCluster": { # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance has regional failure. Only applicable to MySQL. # A primary instance and disaster recovery (DR) replica pair. A DR replica is a cross-region replica that you designate for failover in the event that the primary instance experiences regional failure. Only applicable to MySQL. + "drReplica": True or False, # Output only. Read-only field that indicates whether the replica is a DR replica. This field is not set if the instance is a primary instance. + "failoverDrReplicaName": "A String", # Optional. If the instance is a primary instance, then this field identifies the disaster recovery (DR) replica. A DR replica is an optional configuration for Enterprise Plus edition instances. If the instance is a read replica, then the field is not set. Set this field to a replica name to designate a DR replica for a primary instance. Remove the replica name to remove the DR replica designation. }, "rootPassword": "A String", # Initial root password. Use only on creation. You must set root passwords before you can connect to PostgreSQL instances. "satisfiesPzs": True or False, # The status indicating if instance satisfiesPzs. Reserved for future use. @@ -4176,7 +4176,7 @@

Method Details

"pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, "requireSsl": True or False, # Use `ssl_mode` instead. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. - "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: For PostgreSQL and MySQL: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` For SQL Server: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=true` The value of `ssl_mode` has priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, `ssl_mode=ENCRYPTED_ONLY` means accept only SSL connections, while `require_ssl=false` means accept both non-SSL and SSL connections. In this case, MySQL and PostgreSQL databases respect `ssl_mode` and accepts only SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. diff --git a/docs/dyn/sqladmin_v1beta4.projects.instances.html b/docs/dyn/sqladmin_v1beta4.projects.instances.html index 221fc21fcb7..b7ce4a48870 100644 --- a/docs/dyn/sqladmin_v1beta4.projects.instances.html +++ b/docs/dyn/sqladmin_v1beta4.projects.instances.html @@ -553,7 +553,7 @@

Method Details

The object takes the form of: { - "migrationType": "A String", # Optional. MigrationType decides if the migration is a physical file based migration or logical migration. + "migrationType": "A String", # Optional. MigrationType configures the migration to use physical files or logical dump files. If not set, then the logical dump file configuration is used. Valid values are `LOGICAL` or `PHYSICAL`. Only applicable to MySQL. "mysqlSyncConfig": { # MySQL-specific external server sync settings. # MySQL-specific settings for start external sync. "initialSyncFlags": [ # Flags to use for the initial dump. { # Initial sync flags for certain Cloud SQL APIs. Currently used for the MySQL external server initial dump. @@ -695,7 +695,7 @@

Method Details

The object takes the form of: { - "migrationType": "A String", # Optional. MigrationType field decides if the migration is a physical file based migration or logical migration + "migrationType": "A String", # Optional. MigrationType configures the migration to use physical files or logical dump files. If not set, then the logical dump file configuration is used. Valid values are `LOGICAL` or `PHYSICAL`. Only applicable to MySQL. "mysqlSyncConfig": { # MySQL-specific external server sync settings. # Optional. MySQL-specific settings for start external sync. "initialSyncFlags": [ # Flags to use for the initial dump. { # Initial sync flags for certain Cloud SQL APIs. Currently used for the MySQL external server initial dump. @@ -705,7 +705,7 @@

Method Details

], }, "syncMode": "A String", # External sync mode - "syncParallelLevel": "A String", # Optional. Parallel level for initial data sync. Currently only applicable for PostgreSQL. + "syncParallelLevel": "A String", # Optional. Parallel level for initial data sync. Only applicable for PostgreSQL. "verifyConnectionOnly": True or False, # Flag to enable verifying connection only "verifyReplicationOnly": True or False, # Optional. Flag to verify settings required by replication setup only } diff --git a/docs/dyn/storagetransfer_v1.transferJobs.html b/docs/dyn/storagetransfer_v1.transferJobs.html index 711f38c38f6..36a291596d0 100644 --- a/docs/dyn/storagetransfer_v1.transferJobs.html +++ b/docs/dyn/storagetransfer_v1.transferJobs.html @@ -229,6 +229,7 @@

Method Details

"bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "cloudfrontDomain": "A String", # Optional. The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: `https://{id}.cloudfront.net` or any valid custom domain. Must begin with `https://`. "credentialsSecret": "A String", # Optional. The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format: { "access_key_id": "ACCESS_KEY_ID", "secret_access_key": "SECRET_ACCESS_KEY" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Amazon S3] (https://cloud.google.com/storage-transfer/docs/source-amazon-s3#secret_manager) for more information. If `credentials_secret` is specified, do not specify role_arn or aws_access_key. Format: `projects/{project_number}/secrets/{secret_name}` + "managedPrivateNetwork": True or False, # Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. "roleArn": "A String", # The Amazon Resource Name (ARN) of the role to support temporary credentials via `AssumeRoleWithWebIdentity`. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a `AssumeRoleWithWebIdentity` call for the provided role using the GoogleServiceAccount for this project. }, @@ -430,6 +431,7 @@

Method Details

"bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "cloudfrontDomain": "A String", # Optional. The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: `https://{id}.cloudfront.net` or any valid custom domain. Must begin with `https://`. "credentialsSecret": "A String", # Optional. The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format: { "access_key_id": "ACCESS_KEY_ID", "secret_access_key": "SECRET_ACCESS_KEY" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Amazon S3] (https://cloud.google.com/storage-transfer/docs/source-amazon-s3#secret_manager) for more information. If `credentials_secret` is specified, do not specify role_arn or aws_access_key. Format: `projects/{project_number}/secrets/{secret_name}` + "managedPrivateNetwork": True or False, # Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. "roleArn": "A String", # The Amazon Resource Name (ARN) of the role to support temporary credentials via `AssumeRoleWithWebIdentity`. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a `AssumeRoleWithWebIdentity` call for the provided role using the GoogleServiceAccount for this project. }, @@ -658,6 +660,7 @@

Method Details

"bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "cloudfrontDomain": "A String", # Optional. The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: `https://{id}.cloudfront.net` or any valid custom domain. Must begin with `https://`. "credentialsSecret": "A String", # Optional. The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format: { "access_key_id": "ACCESS_KEY_ID", "secret_access_key": "SECRET_ACCESS_KEY" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Amazon S3] (https://cloud.google.com/storage-transfer/docs/source-amazon-s3#secret_manager) for more information. If `credentials_secret` is specified, do not specify role_arn or aws_access_key. Format: `projects/{project_number}/secrets/{secret_name}` + "managedPrivateNetwork": True or False, # Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. "roleArn": "A String", # The Amazon Resource Name (ARN) of the role to support temporary credentials via `AssumeRoleWithWebIdentity`. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a `AssumeRoleWithWebIdentity` call for the provided role using the GoogleServiceAccount for this project. }, @@ -871,6 +874,7 @@

Method Details

"bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "cloudfrontDomain": "A String", # Optional. The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: `https://{id}.cloudfront.net` or any valid custom domain. Must begin with `https://`. "credentialsSecret": "A String", # Optional. The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format: { "access_key_id": "ACCESS_KEY_ID", "secret_access_key": "SECRET_ACCESS_KEY" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Amazon S3] (https://cloud.google.com/storage-transfer/docs/source-amazon-s3#secret_manager) for more information. If `credentials_secret` is specified, do not specify role_arn or aws_access_key. Format: `projects/{project_number}/secrets/{secret_name}` + "managedPrivateNetwork": True or False, # Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. "roleArn": "A String", # The Amazon Resource Name (ARN) of the role to support temporary credentials via `AssumeRoleWithWebIdentity`. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a `AssumeRoleWithWebIdentity` call for the provided role using the GoogleServiceAccount for this project. }, @@ -1092,6 +1096,7 @@

Method Details

"bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "cloudfrontDomain": "A String", # Optional. The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: `https://{id}.cloudfront.net` or any valid custom domain. Must begin with `https://`. "credentialsSecret": "A String", # Optional. The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format: { "access_key_id": "ACCESS_KEY_ID", "secret_access_key": "SECRET_ACCESS_KEY" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Amazon S3] (https://cloud.google.com/storage-transfer/docs/source-amazon-s3#secret_manager) for more information. If `credentials_secret` is specified, do not specify role_arn or aws_access_key. Format: `projects/{project_number}/secrets/{secret_name}` + "managedPrivateNetwork": True or False, # Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. "roleArn": "A String", # The Amazon Resource Name (ARN) of the role to support temporary credentials via `AssumeRoleWithWebIdentity`. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a `AssumeRoleWithWebIdentity` call for the provided role using the GoogleServiceAccount for this project. }, @@ -1295,6 +1300,7 @@

Method Details

"bucketName": "A String", # Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). "cloudfrontDomain": "A String", # Optional. The CloudFront distribution domain name pointing to this bucket, to use when fetching. See [Transfer from S3 via CloudFront](https://cloud.google.com/storage-transfer/docs/s3-cloudfront) for more information. Format: `https://{id}.cloudfront.net` or any valid custom domain. Must begin with `https://`. "credentialsSecret": "A String", # Optional. The Resource name of a secret in Secret Manager. AWS credentials must be stored in Secret Manager in JSON format: { "access_key_id": "ACCESS_KEY_ID", "secret_access_key": "SECRET_ACCESS_KEY" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Amazon S3] (https://cloud.google.com/storage-transfer/docs/source-amazon-s3#secret_manager) for more information. If `credentials_secret` is specified, do not specify role_arn or aws_access_key. Format: `projects/{project_number}/secrets/{secret_name}` + "managedPrivateNetwork": True or False, # Egress bytes over a Google-managed private network. This network is shared between other users of Storage Transfer Service. "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. "roleArn": "A String", # The Amazon Resource Name (ARN) of the role to support temporary credentials via `AssumeRoleWithWebIdentity`. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a `AssumeRoleWithWebIdentity` call for the provided role using the GoogleServiceAccount for this project. }, diff --git a/docs/dyn/walletobjects_v1.genericobject.html b/docs/dyn/walletobjects_v1.genericobject.html index 9590c681f59..552d5c85044 100644 --- a/docs/dyn/walletobjects_v1.genericobject.html +++ b/docs/dyn/walletobjects_v1.genericobject.html @@ -163,7 +163,7 @@

Method Details

An object of the form: { # Response to adding a new issuer message to the object. This contains the entire updated GenericObject. - "resource": { # Generic Object Next ID: 121 # The updated GenericObject resource. + "resource": { # Generic Object # The updated GenericObject resource. "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -822,7 +822,7 @@

Method Details

Returns: An object of the form: - { # Generic Object Next ID: 121 + { # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -1469,7 +1469,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Generic Object Next ID: 121 +{ # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -2115,7 +2115,7 @@

Method Details

Returns: An object of the form: - { # Generic Object Next ID: 121 + { # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -2777,7 +2777,7 @@

Method Details

"resultsPerPage": 42, # Number of results returned in this page. }, "resources": [ # Resources corresponding to the list request. - { # Generic Object Next ID: 121 + { # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -3427,7 +3427,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Generic Object Next ID: 121 +{ # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -4073,7 +4073,7 @@

Method Details

Returns: An object of the form: - { # Generic Object Next ID: 121 + { # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -4721,7 +4721,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # Generic Object Next ID: 121 +{ # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module @@ -5367,7 +5367,7 @@

Method Details

Returns: An object of the form: - { # Generic Object Next ID: 121 + { # Generic Object "appLinkData": { # Information about the partner app link. The maximum number of these fields displayed is 10. "androidAppLinkInfo": { # Optional information about the partner app link. If included, the app link link module will be rendered on the valuable details on the android client. "appLogoImage": { # Wrapping type for Google hosted images. Next ID: 7 # Optional image to be displayed in the App Link Module diff --git a/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html b/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html index 469d9aeae47..36d256e6d30 100644 --- a/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html +++ b/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html @@ -199,8 +199,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -397,8 +397,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -583,8 +583,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -723,8 +723,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -878,8 +878,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. diff --git a/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html b/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html index 8495ee0b1e6..d0f942093a8 100644 --- a/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html +++ b/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html @@ -212,8 +212,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -426,8 +426,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -628,8 +628,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -784,8 +784,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. @@ -955,8 +955,8 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. - "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logging.logEntries.create` and `monitoring.timeSeries.create` permissions on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], "shieldedInstanceConfig": { # A set of Compute Engine Shielded instance options. # Optional. A set of Compute Engine Shielded instance options. diff --git a/docs/dyn/youtube_v3.youtube.v3.html b/docs/dyn/youtube_v3.youtube.v3.html index 3284b087c59..577002cbe33 100644 --- a/docs/dyn/youtube_v3.youtube.v3.html +++ b/docs/dyn/youtube_v3.youtube.v3.html @@ -74,6 +74,11 @@

YouTube Data API v3 . youtube . v3

Instance Methods

+

+ liveChat() +

+

Returns the liveChat Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/youtube_v3.youtube.v3.liveChat.html b/docs/dyn/youtube_v3.youtube.v3.liveChat.html new file mode 100644 index 00000000000..ac2dff4dde2 --- /dev/null +++ b/docs/dyn/youtube_v3.youtube.v3.liveChat.html @@ -0,0 +1,91 @@ + + + +

YouTube Data API v3 . youtube . v3 . liveChat

+

Instance Methods

+

+ messages() +

+

Returns the messages Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/youtube_v3.youtube.v3.liveChat.messages.html b/docs/dyn/youtube_v3.youtube.v3.liveChat.messages.html new file mode 100644 index 00000000000..391adfdc677 --- /dev/null +++ b/docs/dyn/youtube_v3.youtube.v3.liveChat.messages.html @@ -0,0 +1,205 @@ + + + +

YouTube Data API v3 . youtube . v3 . liveChat . messages

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ transition(id=None, status=None, x__xgafv=None)

+

Transition a durable chat event.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ transition(id=None, status=None, x__xgafv=None) +
Transition a durable chat event.
+
+Args:
+  id: string, The ID that uniquely identify the chat message event to transition.
+  status: string, The status to which the chat event is going to transition.
+    Allowed values
+      statusUnspecified - Default unknown enum value.
+      closed - The durable chat event is over.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A *liveChatMessage* resource represents a chat message in a YouTube Live Chat.
+  "authorDetails": { # The authorDetails object contains basic details about the user that posted this message.
+    "channelId": "A String", # The YouTube channel ID.
+    "channelUrl": "A String", # The channel's URL.
+    "displayName": "A String", # The channel's display name.
+    "isChatModerator": True or False, # Whether the author is a moderator of the live chat.
+    "isChatOwner": True or False, # Whether the author is the owner of the live chat.
+    "isChatSponsor": True or False, # Whether the author is a sponsor of the live chat.
+    "isVerified": True or False, # Whether the author's identity has been verified by YouTube.
+    "profileImageUrl": "A String", # The channels's avatar URL.
+  },
+  "etag": "A String", # Etag of this resource.
+  "id": "A String", # The ID that YouTube assigns to uniquely identify the message.
+  "kind": "youtube#liveChatMessage", # Identifies what kind of resource this is. Value: the fixed string "youtube#liveChatMessage".
+  "snippet": { # Next ID: 34 # The snippet object contains basic details about the message.
+    "authorChannelId": "A String", # The ID of the user that authored this message, this field is not always filled. textMessageEvent - the user that wrote the message fanFundingEvent - the user that funded the broadcast newSponsorEvent - the user that just became a sponsor memberMilestoneChatEvent - the member that sent the message membershipGiftingEvent - the user that made the purchase giftMembershipReceivedEvent - the user that received the gift membership messageDeletedEvent - the moderator that took the action messageRetractedEvent - the author that retracted their message userBannedEvent - the moderator that took the action superChatEvent - the user that made the purchase superStickerEvent - the user that made the purchase pollEvent - the user that created the poll
+    "displayMessage": "A String", # Contains a string that can be displayed to the user. If this field is not present the message is silent, at the moment only messages of type TOMBSTONE and CHAT_ENDED_EVENT are silent.
+    "fanFundingEventDetails": { # Details about the funding event, this is only set if the type is 'fanFundingEvent'.
+      "amountDisplayString": "A String", # A rendered string that displays the fund amount and currency to the user.
+      "amountMicros": "A String", # The amount of the fund.
+      "currency": "A String", # The currency in which the fund was made.
+      "userComment": "A String", # The comment added by the user to this fan funding event.
+    },
+    "giftMembershipReceivedDetails": { # Details about the Gift Membership Received event, this is only set if the type is 'giftMembershipReceivedEvent'.
+      "associatedMembershipGiftingMessageId": "A String", # The ID of the membership gifting message that is related to this gift membership. This ID will always refer to a message whose type is 'membershipGiftingEvent'.
+      "gifterChannelId": "A String", # The ID of the user that made the membership gifting purchase. This matches the `snippet.authorChannelId` of the associated membership gifting message.
+      "memberLevelName": "A String", # The name of the Level at which the viewer is a member. This matches the `snippet.membershipGiftingDetails.giftMembershipsLevelName` of the associated membership gifting message. The Level names are defined by the YouTube channel offering the Membership. In some situations this field isn't filled.
+    },
+    "hasDisplayContent": True or False, # Whether the message has display content that should be displayed to users.
+    "liveChatId": "A String",
+    "memberMilestoneChatDetails": { # Details about the Member Milestone Chat event, this is only set if the type is 'memberMilestoneChatEvent'.
+      "memberLevelName": "A String", # The name of the Level at which the viever is a member. The Level names are defined by the YouTube channel offering the Membership. In some situations this field isn't filled.
+      "memberMonth": 42, # The total amount of months (rounded up) the viewer has been a member that granted them this Member Milestone Chat. This is the same number of months as is being displayed to YouTube users.
+      "userComment": "A String", # The comment added by the member to this Member Milestone Chat. This field is empty for messages without a comment from the member.
+    },
+    "membershipGiftingDetails": { # Details about the Membership Gifting event, this is only set if the type is 'membershipGiftingEvent'.
+      "giftMembershipsCount": 42, # The number of gift memberships purchased by the user.
+      "giftMembershipsLevelName": "A String", # The name of the level of the gift memberships purchased by the user. The Level names are defined by the YouTube channel offering the Membership. In some situations this field isn't filled.
+    },
+    "messageDeletedDetails": {
+      "deletedMessageId": "A String",
+    },
+    "messageRetractedDetails": {
+      "retractedMessageId": "A String",
+    },
+    "newSponsorDetails": { # Details about the New Member Announcement event, this is only set if the type is 'newSponsorEvent'. Please note that "member" is the new term for "sponsor".
+      "isUpgrade": True or False, # If the viewer just had upgraded from a lower level. For viewers that were not members at the time of purchase, this field is false.
+      "memberLevelName": "A String", # The name of the Level that the viewer just had joined. The Level names are defined by the YouTube channel offering the Membership. In some situations this field isn't filled.
+    },
+    "pollDetails": { # Details about the poll event, this is only set if the type is 'pollEvent'.
+      "metadata": {
+        "options": [ # The options will be returned in the order that is displayed in 1P
+          {
+            "optionText": "A String",
+            "tally": "A String",
+          },
+        ],
+        "questionText": "A String",
+      },
+      "status": "A String",
+    },
+    "publishedAt": "A String", # The date and time when the message was orignally published.
+    "superChatDetails": { # Details about the Super Chat event, this is only set if the type is 'superChatEvent'.
+      "amountDisplayString": "A String", # A rendered string that displays the fund amount and currency to the user.
+      "amountMicros": "A String", # The amount purchased by the user, in micros (1,750,000 micros = 1.75).
+      "currency": "A String", # The currency in which the purchase was made.
+      "tier": 42, # The tier in which the amount belongs. Lower amounts belong to lower tiers. The lowest tier is 1.
+      "userComment": "A String", # The comment added by the user to this Super Chat event.
+    },
+    "superStickerDetails": { # Details about the Super Sticker event, this is only set if the type is 'superStickerEvent'.
+      "amountDisplayString": "A String", # A rendered string that displays the fund amount and currency to the user.
+      "amountMicros": "A String", # The amount purchased by the user, in micros (1,750,000 micros = 1.75).
+      "currency": "A String", # The currency in which the purchase was made.
+      "superStickerMetadata": { # Information about the Super Sticker.
+        "altText": "A String", # Internationalized alt text that describes the sticker image and any animation associated with it.
+        "altTextLanguage": "A String", # Specifies the localization language in which the alt text is returned.
+        "stickerId": "A String", # Unique identifier of the Super Sticker. This is a shorter form of the alt_text that includes pack name and a recognizable characteristic of the sticker.
+      },
+      "tier": 42, # The tier in which the amount belongs. Lower amounts belong to lower tiers. The lowest tier is 1.
+    },
+    "textMessageDetails": { # Details about the text message, this is only set if the type is 'textMessageEvent'.
+      "messageText": "A String", # The user's message.
+    },
+    "type": "A String", # The type of message, this will always be present, it determines the contents of the message as well as which fields will be present.
+    "userBannedDetails": {
+      "banDurationSeconds": "A String", # The duration of the ban. This property is only present if the banType is temporary.
+      "banType": "A String", # The type of ban.
+      "bannedUserDetails": { # The details of the user that was banned.
+        "channelId": "A String", # The YouTube channel ID.
+        "channelUrl": "A String", # The channel's URL.
+        "displayName": "A String", # The channel's display name.
+        "profileImageUrl": "A String", # The channels's avatar URL.
+      },
+    },
+  },
+}
+
+ + \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json index 3b86bd09d4e..3e19d652333 100644 --- a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json +++ b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json @@ -115,7 +115,7 @@ } } }, -"revision": "20240427", +"revision": "20240505", "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/", "schemas": { "AmpUrl": { diff --git a/googleapiclient/discovery_cache/documents/accessapproval.v1.json b/googleapiclient/discovery_cache/documents/accessapproval.v1.json index 5a46de8239e..81457f44939 100644 --- a/googleapiclient/discovery_cache/documents/accessapproval.v1.json +++ b/googleapiclient/discovery_cache/documents/accessapproval.v1.json @@ -913,7 +913,7 @@ } } }, -"revision": "20240424", +"revision": "20240503", "rootUrl": "https://accessapproval.googleapis.com/", "schemas": { "AccessApprovalServiceAccount": { @@ -1257,6 +1257,7 @@ "EC_SIGN_P256_SHA256", "EC_SIGN_P384_SHA384", "EC_SIGN_SECP256K1_SHA256", +"EC_SIGN_ED25519", "HMAC_SHA256", "HMAC_SHA1", "HMAC_SHA384", @@ -1294,6 +1295,7 @@ "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", +"EdDSA on the Curve25519 in pure mode (taking data as input).", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", diff --git a/googleapiclient/discovery_cache/documents/accesscontextmanager.v1.json b/googleapiclient/discovery_cache/documents/accesscontextmanager.v1.json index 57cc669b913..373aed699a8 100644 --- a/googleapiclient/discovery_cache/documents/accesscontextmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/accesscontextmanager.v1.json @@ -1290,7 +1290,7 @@ } } }, -"revision": "20240424", +"revision": "20240429", "rootUrl": "https://accesscontextmanager.googleapis.com/", "schemas": { "AccessContextManagerOperationMetadata": { @@ -1374,6 +1374,21 @@ }, "type": "object" }, +"Application": { +"description": "An application that accesses Google Cloud APIs.", +"id": "Application", +"properties": { +"clientId": { +"description": "The OAuth client ID of the application.", +"type": "string" +}, +"name": { +"description": "The name of the application. Example: \"Cloud Console\"", +"type": "string" +} +}, +"type": "object" +}, "AuditConfig": { "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.", "id": "AuditConfig", @@ -1843,6 +1858,13 @@ "name": { "description": "Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by [RFC 3986 Section 2.3](https://tools.ietf.org/html/rfc3986#section-2.3)). Should not be specified by the client during creation. Example: \"organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N\"", "type": "string" +}, +"restrictedClientApplications": { +"description": "Optional. A list of applications that are subject to this binding's restrictions. If the list is empty, the binding restrictions will universally apply to all applications.", +"items": { +"$ref": "Application" +}, +"type": "array" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/acmedns.v1.json b/googleapiclient/discovery_cache/documents/acmedns.v1.json index 2b8e9025933..ac3b32b044d 100644 --- a/googleapiclient/discovery_cache/documents/acmedns.v1.json +++ b/googleapiclient/discovery_cache/documents/acmedns.v1.json @@ -146,7 +146,7 @@ } } }, -"revision": "20240427", +"revision": "20240505", "rootUrl": "https://acmedns.googleapis.com/", "schemas": { "AcmeChallengeSet": { diff --git a/googleapiclient/discovery_cache/documents/addressvalidation.v1.json b/googleapiclient/discovery_cache/documents/addressvalidation.v1.json index 31f442329b1..e20129ab14b 100644 --- a/googleapiclient/discovery_cache/documents/addressvalidation.v1.json +++ b/googleapiclient/discovery_cache/documents/addressvalidation.v1.json @@ -151,7 +151,7 @@ } } }, -"revision": "20240428", +"revision": "20240505", "rootUrl": "https://addressvalidation.googleapis.com/", "schemas": { "GoogleGeoTypeViewport": { diff --git a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json index c4da9e1ced8..a22f6ddf7a9 100644 --- a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json @@ -272,7 +272,7 @@ } } }, -"revision": "20240423", +"revision": "20240429", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Application": { diff --git a/googleapiclient/discovery_cache/documents/admin.directory_v1.json b/googleapiclient/discovery_cache/documents/admin.directory_v1.json index 18573de40c6..65798b1c8fa 100644 --- a/googleapiclient/discovery_cache/documents/admin.directory_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.directory_v1.json @@ -4671,7 +4671,7 @@ } } }, -"revision": "20240423", +"revision": "20240429", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Alias": { @@ -5403,10 +5403,16 @@ false "type": "string" }, "autoUpdateExpiration": { -"description": "(Read-only) The timestamp after which the device will stop receiving Chrome updates or support", +"deprecated": true, +"description": "(Read-only) The timestamp after which the device will stop receiving Chrome updates or support. Please use \"autoUpdateThrough\" instead.", "format": "int64", "type": "string" }, +"autoUpdateThrough": { +"description": "Output only. The timestamp after which the device will stop receiving Chrome updates or support.", +"readOnly": true, +"type": "string" +}, "backlightInfo": { "description": "Output only. Contains backlight information for the device.", "items": { @@ -5667,6 +5673,21 @@ false "description": "(Read-only) MAC address used by the Chromebook\u2019s internal ethernet port, and for onboard network (ethernet) interface. The format is twelve (12) hexadecimal digits without any delimiter (uppercase letters). This is only relevant for some devices.", "type": "string" }, +"extendedSupportEligible": { +"description": "Output only. Whether or not the device requires the extended support opt in.", +"readOnly": true, +"type": "boolean" +}, +"extendedSupportEnabled": { +"description": "Output only. Whether extended support policy is enabled on the device.", +"readOnly": true, +"type": "boolean" +}, +"extendedSupportStart": { +"description": "Output only. Date of the device when extended support policy for automatic updates starts.", +"readOnly": true, +"type": "string" +}, "firmwareVersion": { "description": "The Chrome device's firmware version.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/admin.reports_v1.json b/googleapiclient/discovery_cache/documents/admin.reports_v1.json index 60110dca8f7..db966720cbc 100644 --- a/googleapiclient/discovery_cache/documents/admin.reports_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.reports_v1.json @@ -626,7 +626,7 @@ } } }, -"revision": "20240423", +"revision": "20240429", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Activities": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1.json b/googleapiclient/discovery_cache/documents/admob.v1.json index 01fddfe2096..3ce2fbfcc35 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1.json +++ b/googleapiclient/discovery_cache/documents/admob.v1.json @@ -321,7 +321,7 @@ } } }, -"revision": "20240429", +"revision": "20240506", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdUnit": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1beta.json b/googleapiclient/discovery_cache/documents/admob.v1beta.json index eacf92de6f1..a0c12fe697b 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1beta.json +++ b/googleapiclient/discovery_cache/documents/admob.v1beta.json @@ -758,7 +758,7 @@ } } }, -"revision": "20240429", +"revision": "20240502", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdSource": { diff --git a/googleapiclient/discovery_cache/documents/adsense.v2.json b/googleapiclient/discovery_cache/documents/adsense.v2.json index 8720e13dcc3..d282b9df509 100644 --- a/googleapiclient/discovery_cache/documents/adsense.v2.json +++ b/googleapiclient/discovery_cache/documents/adsense.v2.json @@ -1912,7 +1912,7 @@ } } }, -"revision": "20240429", +"revision": "20240502", "rootUrl": "https://adsense.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index e94edd266be..9af87a7d6c1 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -3686,7 +3686,7 @@ "type": "string" }, "parent": { -"description": "Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}'`", +"description": "Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -16204,7 +16204,7 @@ } } }, -"revision": "20240424", +"revision": "20240501", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionEmbedVideoResponse": { @@ -16262,7 +16262,8 @@ "CHILD_TEXT", "DANGEROUS_CONTENT", "RECITATION_TEXT", -"CELEBRITY_IMG" +"CELEBRITY_IMG", +"WATERMARK_IMG_REMOVAL" ], "enumDescriptions": [ "", @@ -16299,7 +16300,8 @@ "", "Text category from SafetyCat v3", "", -"" +"", +"Error message when user attempts to remove watermark from editing image" ], "type": "string" }, @@ -16614,9 +16616,9 @@ "The maximum number of tokens as specified in the request was reached.", "The token generation was stopped as the response was flagged for safety reasons. NOTE: When streaming the Candidate.content will be empty if content filters blocked the output.", "The token generation was stopped as the response was flagged for unauthorized citations.", -"All other reasons that stopped the token generation.", +"All other reasons that stopped the token generation (currently only language filter).", "The token generation was stopped as the response was flagged for the terms which are included from the terminology blocklist.", -"The token generation was stopped as the response was flagged for the prohibited contents.", +"The token generation was stopped as the response was flagged for the prohibited contents (currently only CSAM).", "The token generation was stopped as the response was flagged for Sensitive Personally Identifiable Information (SPII) contents." ], "type": "string" @@ -16961,9 +16963,9 @@ "enumDescriptions": [ "Unspecified blocked reason.", "Candidates blocked due to safety.", -"Candidates blocked due to other reason.", +"Candidates blocked due to other reason (currently only language filter).", "Candidates blocked due to the terms which are included from the terminology blocklist.", -"Candidates blocked due to prohibited content." +"Candidates blocked due to prohibited content (currently only CSAM)." ], "type": "string" }, @@ -22662,6 +22664,22 @@ "description": "Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag.", "format": "int32", "type": "integer" +}, +"rrf": { +"$ref": "GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF", +"description": "Optional. Represents RRF algorithm that combines search results." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF": { +"description": "Parameters for RRF algorithm that combines search results.", +"id": "GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF", +"properties": { +"alpha": { +"description": "Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense.", +"format": "float", +"type": "number" } }, "type": "object" @@ -22707,7 +22725,12 @@ "description": "The datapoint of the neighbor. Note that full datapoints are returned only when \"return_full_datapoint\" is set to true. Otherwise, only the \"datapoint_id\" and \"crowding_tag\" fields are populated." }, "distance": { -"description": "The distance between the neighbor and the query vector.", +"description": "The distance between the neighbor and the dense embedding query.", +"format": "double", +"type": "number" +}, +"sparseDistance": { +"description": "The distance between the neighbor and the query sparse_embedding.", "format": "double", "type": "number" } @@ -23033,6 +23056,10 @@ "description": "Metadata returned to client when grounding is enabled.", "id": "GoogleCloudAiplatformV1GroundingMetadata", "properties": { +"searchEntryPoint": { +"$ref": "GoogleCloudAiplatformV1SearchEntryPoint", +"description": "Optional. Google search entry for the following-up web searches." +}, "webSearchQueries": { "description": "Optional. Web search queries for the following-up web search.", "items": { @@ -23480,7 +23507,7 @@ "type": "string" }, "featureVector": { -"description": "Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions].", +"description": "Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions].", "items": { "format": "float", "type": "number" @@ -23500,6 +23527,10 @@ "$ref": "GoogleCloudAiplatformV1IndexDatapointRestriction" }, "type": "array" +}, +"sparseEmbedding": { +"$ref": "GoogleCloudAiplatformV1IndexDatapointSparseEmbedding", +"description": "Optional. Feature embedding vector for sparse index." } }, "type": "object" @@ -23588,6 +23619,29 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1IndexDatapointSparseEmbedding": { +"description": "Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions.", +"id": "GoogleCloudAiplatformV1IndexDatapointSparseEmbedding", +"properties": { +"dimensions": { +"description": "Optional. The list of indexes for the embedding values of the sparse vector.", +"items": { +"format": "int64", +"type": "string" +}, +"type": "array" +}, +"values": { +"description": "Optional. The list of embedding values of the sparse vector.", +"items": { +"format": "float", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1IndexEndpoint": { "description": "Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes.", "id": "GoogleCloudAiplatformV1IndexEndpoint", @@ -23700,8 +23754,14 @@ "readOnly": true, "type": "integer" }, +"sparseVectorsCount": { +"description": "Output only. The number of sparse vectors in the Index.", +"format": "int64", +"readOnly": true, +"type": "string" +}, "vectorsCount": { -"description": "Output only. The number of vectors in the Index.", +"description": "Output only. The number of dense vectors in the Index.", "format": "int64", "readOnly": true, "type": "string" @@ -24720,7 +24780,8 @@ "NVIDIA_H100_80GB", "TPU_V2", "TPU_V3", -"TPU_V4_POD" +"TPU_V4_POD", +"TPU_V5_LITEPOD" ], "enumDescriptions": [ "Unspecified accelerator type, which means no accelerator.", @@ -24735,7 +24796,8 @@ "Nvidia H100 80Gb GPU.", "TPU v2.", "TPU v3.", -"TPU v4." +"TPU v4.", +"TPU v5." ], "type": "string" }, @@ -26840,7 +26902,10 @@ "OP_IN_DATAPOINT", "MULTIPLE_VALUES", "INVALID_NUMERIC_VALUE", -"INVALID_ENCODING" +"INVALID_ENCODING", +"INVALID_SPARSE_DIMENSIONS", +"INVALID_TOKEN_VALUE", +"INVALID_SPARSE_EMBEDDING" ], "enumDescriptions": [ "Default, shall not be used.", @@ -26849,14 +26914,17 @@ "Invalid csv format.", "Invalid avro format.", "The embedding id is not valid.", -"The size of the embedding vectors does not match with the specified dimension.", +"The size of the dense embedding vectors does not match with the specified dimension.", "The `namespace` field is missing.", "Generic catch-all error. Only used for validation failure where the root cause cannot be easily retrieved programmatically.", "There are multiple restricts with the same `namespace` value.", "Numeric restrict has operator specified in datapoint.", "Numeric restrict has multiple values specified.", "Numeric restrict has invalid numeric value specified.", -"File is not in UTF_8 format." +"File is not in UTF_8 format.", +"Error parsing sparse dimensions field.", +"Token restrict value is invalid.", +"Invalid sparse embedding." ], "type": "string" }, @@ -27219,8 +27287,7 @@ "description": "Optional. Immutable. The specification of a single machine for the template." }, "name": { -"description": "Output only. The resource name of the NotebookRuntimeTemplate.", -"readOnly": true, +"description": "The resource name of the NotebookRuntimeTemplate.", "type": "string" }, "networkSpec": { @@ -28260,6 +28327,10 @@ "$ref": "GoogleCloudAiplatformV1PublisherModelCallToActionDeployGke", "description": "Optional. Deploy PublisherModel to Google Kubernetes Engine." }, +"fineTune": { +"$ref": "GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences", +"description": "Optional. Fine tune the PublisherModel with the third-party model tuning UI." +}, "openEvaluationPipeline": { "$ref": "GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences", "description": "Optional. Open evaluation pipeline of the PublisherModel." @@ -28323,6 +28394,10 @@ "$ref": "GoogleCloudAiplatformV1DedicatedResources", "description": "A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration." }, +"deployTaskName": { +"description": "Optional. The name of the deploy task (e.g., \"text to image generation\").", +"type": "string" +}, "largeModelReference": { "$ref": "GoogleCloudAiplatformV1LargeModelReference", "description": "Optional. Large model reference. When this is set, model_artifact_spec is not needed." @@ -31569,6 +31644,10 @@ false }, "type": "array" }, +"systemInstructionGcsUri": { +"description": "The Google Cloud Storage URI that stores the system instruction, starting with gs://.", +"type": "string" +}, "temperature": { "description": "Temperature value used for sampling set when the dataset was saved. This value is used to tune the degree of randomness.", "format": "float", @@ -33413,6 +33492,22 @@ false }, "type": "object" }, +"GoogleCloudAiplatformV1SearchEntryPoint": { +"description": "Google search entry point.", +"id": "GoogleCloudAiplatformV1SearchEntryPoint", +"properties": { +"renderedContent": { +"description": "Optional. Web content snippet that can be embedded in a web page or an app webview.", +"type": "string" +}, +"sdkBlob": { +"description": "Optional. Base64 encoded JSON representing array of tuple.", +"format": "byte", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1SearchFeaturesResponse": { "description": "Response message for FeaturestoreService.SearchFeatures.", "id": "GoogleCloudAiplatformV1SearchFeaturesResponse", @@ -34390,12 +34485,12 @@ false "type": "string" }, "epochCount": { -"description": "Optional. Number of training epoches for this tuning job.", +"description": "Optional. Number of complete passes the model makes over the entire training dataset during training.", "format": "int64", "type": "string" }, "learningRateMultiplier": { -"description": "Optional. Learning rate multiplier for tuning.", +"description": "Optional. Multiplier for adjusting the default learning rate.", "format": "double", "type": "number" } @@ -34547,11 +34642,11 @@ false "description": "Optional. Hyperparameters for SFT." }, "trainingDatasetUri": { -"description": "Required. Cloud Storage path to file containing training dataset for tuning.", +"description": "Required. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.", "type": "string" }, "validationDatasetUri": { -"description": "Optional. Cloud Storage path to file containing validation dataset for tuning.", +"description": "Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.", "type": "string" } }, @@ -35442,7 +35537,7 @@ false "id": "GoogleCloudAiplatformV1TuningJob", "properties": { "baseModel": { -"description": "Model name for tuning, e.g., \"gemini-1.0-pro-002\".", +"description": "The base model that is being tuned, e.g., \"gemini-1.0-pro-002\".", "type": "string" }, "createTime": { @@ -36874,7 +36969,14 @@ false "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -37224,7 +37326,14 @@ false "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inpsiration." ], "type": "string" }, @@ -37339,7 +37448,7 @@ false "id": "LanguageLabsAidaTrustRecitationProtoSegmentResult", "properties": { "attributionDataset": { -"description": "The dataset the segment came from.", +"description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", "enum": [ "DATASET_UNSPECIFIED", "WIKIPEDIA", @@ -37688,7 +37797,14 @@ false "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -38038,7 +38154,14 @@ false "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inpsiration." ], "type": "string" }, @@ -38047,8 +38170,7 @@ false "type": "string" }, "docAttribution": { -"$ref": "LanguageLabsAidaTrustRecitationProtoDocAttribution", -"description": "populated when recitation_action == CITE" +"$ref": "LanguageLabsAidaTrustRecitationProtoDocAttribution" }, "docOccurrences": { "description": "number of documents that contained this segment", @@ -38080,6 +38202,32 @@ false ], "type": "string" }, +"sourceCategory": { +"description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", +"enum": [ +"SOURCE_CATEGORY_UNSPECIFIED", +"SOURCE_CATEGORY_WIKIPEDIA", +"SOURCE_CATEGORY_WEBDOCS", +"SOURCE_CATEGORY_GITHUB", +"SOURCE_CATEGORY_ARXIV", +"SOURCE_CATEGORY_PRIVATE_BOOKS", +"SOURCE_CATEGORY_OTHERS", +"SOURCE_CATEGORY_PUBLIC_BOOKS", +"SOURCE_CATEGORY_GNEWS" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +}, "startIndex": { "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", "format": "int32", @@ -38513,7 +38661,14 @@ false "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -38863,7 +39018,14 @@ false "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inspiration FT datasets." ], "type": "string" }, @@ -38978,7 +39140,7 @@ false "id": "LearningGenaiRecitationSegmentResult", "properties": { "attributionDataset": { -"description": "The dataset the segment came from.", +"description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", "enum": [ "DATASET_UNSPECIFIED", "WIKIPEDIA", @@ -39327,7 +39489,14 @@ false "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -39677,7 +39846,14 @@ false "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inspiration FT datasets." ], "type": "string" }, @@ -39686,8 +39862,7 @@ false "type": "string" }, "docAttribution": { -"$ref": "LearningGenaiRecitationDocAttribution", -"description": "populated when recitation_action == CITE" +"$ref": "LearningGenaiRecitationDocAttribution" }, "docOccurrences": { "description": "number of documents that contained this segment", @@ -39719,6 +39894,32 @@ false ], "type": "string" }, +"sourceCategory": { +"description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", +"enum": [ +"SOURCE_CATEGORY_UNSPECIFIED", +"SOURCE_CATEGORY_WIKIPEDIA", +"SOURCE_CATEGORY_WEBDOCS", +"SOURCE_CATEGORY_GITHUB", +"SOURCE_CATEGORY_ARXIV", +"SOURCE_CATEGORY_PRIVATE_BOOKS", +"SOURCE_CATEGORY_OTHERS", +"SOURCE_CATEGORY_PUBLIC_BOOKS", +"SOURCE_CATEGORY_GNEWS" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +}, "startIndex": { "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", "format": "int32", @@ -41198,10 +41399,37 @@ false }, "type": "object" }, +"LearningServingLlmAtlasOutputMetadata": { +"id": "LearningServingLlmAtlasOutputMetadata", +"properties": { +"requestTopic": { +"type": "string" +}, +"source": { +"enum": [ +"UNKNOWN", +"FACTUALITY", +"INFOBOT", +"LLM" +], +"enumDescriptions": [ +"", +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, "LearningServingLlmMessageMetadata": { -"description": "LINT.IfChange This metadata contains additional information required for debugging.", +"description": "LINT.IfChange This metadata contains additional information required for debugging. Next ID: 28", "id": "LearningServingLlmMessageMetadata", "properties": { +"atlasMetadata": { +"$ref": "LearningServingLlmAtlasOutputMetadata" +}, "classifierSummary": { "$ref": "LearningGenaiRootClassifierOutputSummary", "description": "Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not." @@ -41277,7 +41505,12 @@ false "type": "string" }, "perStreamDecodedTokenCount": { -"description": "NOT YET IMPLEMENTED. Applies to streaming only. Number of tokens decoded / emitted by the model as part of this stream. This may be different from token_count, which contains number of tokens returned in this response after any response rewriting / truncation.", +"description": "Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only.", +"format": "int32", +"type": "integer" +}, +"perStreamReturnedTokenCount": { +"description": "Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only.", "format": "int32", "type": "integer" }, @@ -41293,7 +41526,7 @@ false "description": "Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome." }, "returnTokenCount": { -"description": "NOT YET IMPLEMENTED. Number of tokens returned as part of this candidate.", +"description": "NOT IMPLEMENTED TODO (b/334187574) Remove this field after Labs migrates to per_stream_returned_token_count and total_returned_token_count.", "format": "int32", "type": "integer" }, @@ -41309,7 +41542,12 @@ false "type": "boolean" }, "totalDecodedTokenCount": { -"description": "NOT YET IMPLEMENTED. Aggregated number of total tokens decoded so far. For streaming, this is sum of all the tokens decoded so far i.e. aggregated count.", +"description": "Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate.", +"format": "int32", +"type": "integer" +}, +"totalReturnedTokenCount": { +"description": "Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only.", "format": "int32", "type": "integer" }, diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index 391b200134f..47343070162 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -305,6 +305,50 @@ }, "protocol": "rest", "resources": { +"media": { +"methods": { +"upload": { +"description": "Upload a file into a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles:upload", +"httpMethod": "POST", +"id": "aiplatform.media.upload", +"mediaUpload": { +"accept": [ +"*/*" +], +"protocols": { +"simple": { +"multipart": true, +"path": "/upload/v1beta1/{+parent}/ragFiles:upload" +} +} +}, +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The name of the RagCorpus resource into which to upload the file. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragFiles:upload", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1UploadRagFileRequest" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1UploadRagFileResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +], +"supportsMediaUpload": true +} +} +}, "projects": { "methods": { "getCacheConfig": { @@ -457,9 +501,347 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"retrieveContexts": { +"description": "Retrieves relevant contexts for a query.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}:retrieveContexts", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.retrieveContexts", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the Location from which to retrieve RagContexts. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}:retrieveContexts", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsRequest" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"agents": { +"resources": { +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.agents.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}:cancel", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.agents.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.agents.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/operations", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.agents.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1beta1/{+name}/operations", +"response": { +"$ref": "GoogleLongrunningListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"wait": { +"description": "Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/agents/{agentsId}/operations/{operationsId}:wait", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.agents.operations.wait", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to wait on.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/agents/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +}, +"timeout": { +"description": "The maximum duration to wait before timing out. If left blank, the wait will be at most the time permitted by the underlying HTTP/RPC protocol. If RPC context deadline is also specified, the shorter one will be used.", +"format": "google-duration", +"location": "query", +"type": "string" +} +}, +"path": "v1beta1/{+name}:wait", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} } }, +"apps": { "resources": { +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/apps/{appsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.apps.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/apps/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}:cancel", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/apps/{appsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.apps.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/apps/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/apps/{appsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.apps.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/apps/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/apps/{appsId}/operations", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.apps.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/apps/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1beta1/{+name}/operations", +"response": { +"$ref": "GoogleLongrunningListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"wait": { +"description": "Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/apps/{appsId}/operations/{operationsId}:wait", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.apps.operations.wait", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to wait on.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/apps/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +}, +"timeout": { +"description": "The maximum duration to wait before timing out. If left blank, the wait will be at most the time permitted by the underlying HTTP/RPC protocol. If RPC context deadline is also specified, the shorter one will be used.", +"format": "google-duration", +"location": "query", +"type": "string" +} +}, +"path": "v1beta1/{+name}:wait", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, "batchPredictionJobs": { "methods": { "cancel": { @@ -4622,161 +5004,6 @@ } }, "resources": { -"deployments": { -"resources": { -"operations": { -"methods": { -"cancel": { -"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", -"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/extensions/{extensionsId}/deployments/{deploymentsId}/operations/{operationsId}:cancel", -"httpMethod": "POST", -"id": "aiplatform.projects.locations.extensions.deployments.operations.cancel", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "The name of the operation resource to be cancelled.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/extensions/[^/]+/deployments/[^/]+/operations/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1beta1/{+name}:cancel", -"response": { -"$ref": "GoogleProtobufEmpty" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"delete": { -"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", -"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/extensions/{extensionsId}/deployments/{deploymentsId}/operations/{operationsId}", -"httpMethod": "DELETE", -"id": "aiplatform.projects.locations.extensions.deployments.operations.delete", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "The name of the operation resource to be deleted.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/extensions/[^/]+/deployments/[^/]+/operations/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1beta1/{+name}", -"response": { -"$ref": "GoogleProtobufEmpty" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"get": { -"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/extensions/{extensionsId}/deployments/{deploymentsId}/operations/{operationsId}", -"httpMethod": "GET", -"id": "aiplatform.projects.locations.extensions.deployments.operations.get", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "The name of the operation resource.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/extensions/[^/]+/deployments/[^/]+/operations/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1beta1/{+name}", -"response": { -"$ref": "GoogleLongrunningOperation" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"list": { -"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", -"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/extensions/{extensionsId}/deployments/{deploymentsId}/operations", -"httpMethod": "GET", -"id": "aiplatform.projects.locations.extensions.deployments.operations.list", -"parameterOrder": [ -"name" -], -"parameters": { -"filter": { -"description": "The standard list filter.", -"location": "query", -"type": "string" -}, -"name": { -"description": "The name of the operation's parent resource.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/extensions/[^/]+/deployments/[^/]+$", -"required": true, -"type": "string" -}, -"pageSize": { -"description": "The standard list page size.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "The standard list page token.", -"location": "query", -"type": "string" -} -}, -"path": "v1beta1/{+name}/operations", -"response": { -"$ref": "GoogleLongrunningListOperationsResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"wait": { -"description": "Waits until the specified long-running operation is done or reaches at most a specified timeout, returning the latest state. If the operation is already done, the latest state is immediately returned. If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC timeout is used. If the server does not support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort basis. It may return the latest state before the specified timeout (including immediately), meaning even an immediate response is no guarantee that the operation is done.", -"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/extensions/{extensionsId}/deployments/{deploymentsId}/operations/{operationsId}:wait", -"httpMethod": "POST", -"id": "aiplatform.projects.locations.extensions.deployments.operations.wait", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "The name of the operation resource to wait on.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/extensions/[^/]+/deployments/[^/]+/operations/[^/]+$", -"required": true, -"type": "string" -}, -"timeout": { -"description": "The maximum duration to wait before timing out. If left blank, the wait will be at most the time permitted by the underlying HTTP/RPC protocol. If RPC context deadline is also specified, the shorter one will be used.", -"format": "google-duration", -"location": "query", -"type": "string" -} -}, -"path": "v1beta1/{+name}:wait", -"response": { -"$ref": "GoogleLongrunningOperation" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -} -} -} -} -}, "operations": { "methods": { "cancel": { @@ -4947,7 +5174,7 @@ "type": "string" }, "parent": { -"description": "Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}'`", +"description": "Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -14809,6 +15036,127 @@ } }, "ragCorpora": { +"methods": { +"create": { +"description": "Creates a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.ragCorpora.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the Location to create the RagCorpus in. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragCorpora", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1RagCorpus" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.ragCorpora.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"force": { +"description": "Optional. If set to true, any RagFiles in this RagCorpus will also be deleted. Otherwise, the request will only work if the RagCorpus has no RagFiles.", +"location": "query", +"type": "boolean" +}, +"name": { +"description": "Required. The name of the RagCorpus resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the RagCorpus resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1RagCorpus" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists RagCorpora in a Location.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The standard list page token. Typically obtained via ListRagCorporaResponse.next_page_token of the previous VertexRagDataService.ListRagCorpora call.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the Location from which to list the RagCorpora. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragCorpora", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1ListRagCorporaResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, "resources": { "operations": { "methods": { @@ -14962,6 +15310,122 @@ } }, "ragFiles": { +"methods": { +"delete": { +"description": "Deletes a RagFile.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles/{ragFilesId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the RagFile resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+/ragFiles/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a RagFile.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles/{ragFilesId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the RagFile resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+/ragFiles/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"import": { +"description": "Import files from Google Cloud Storage or Google Drive into a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles:import", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.import", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The name of the RagCorpus resource into which to import files. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragFiles:import", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1ImportRagFilesRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists RagFiles in a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The standard list page token. Typically obtained via ListRagFilesResponse.next_page_token of the previous VertexRagDataService.ListRagFiles call.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the RagCorpus from which to list the RagFiles. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragFiles", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1ListRagFilesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, "resources": { "operations": { "methods": { @@ -18971,7 +19435,7 @@ } } }, -"revision": "20240424", +"revision": "20240501", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionEmbedVideoResponse": { @@ -19029,7 +19493,8 @@ "CHILD_TEXT", "DANGEROUS_CONTENT", "RECITATION_TEXT", -"CELEBRITY_IMG" +"CELEBRITY_IMG", +"WATERMARK_IMG_REMOVAL" ], "enumDescriptions": [ "", @@ -19066,7 +19531,8 @@ "", "Text category from SafetyCat v3", "", -"" +"", +"Error message when user attempts to remove watermark from editing image" ], "type": "string" }, @@ -19381,9 +19847,9 @@ "The maximum number of tokens as specified in the request was reached.", "The token generation was stopped as the response was flagged for safety reasons. NOTE: When streaming the Candidate.content will be empty if content filters blocked the output.", "The token generation was stopped as the response was flagged for unauthorized citations.", -"All other reasons that stopped the token generation.", +"All other reasons that stopped the token generation (currently only language filter).", "The token generation was stopped as the response was flagged for the terms which are included from the terminology blocklist.", -"The token generation was stopped as the response was flagged for the prohibited contents.", +"The token generation was stopped as the response was flagged for the prohibited contents (currently only CSAM).", "The token generation was stopped as the response was flagged for Sensitive Personally Identifiable Information (SPII) contents." ], "type": "string" @@ -19728,9 +20194,9 @@ "enumDescriptions": [ "Unspecified blocked reason.", "Candidates blocked due to safety.", -"Candidates blocked due to other reason.", +"Candidates blocked due to other reason (currently only language filter).", "Candidates blocked due to the terms which are included from the terminology blocklist.", -"Candidates blocked due to prohibited content." +"Candidates blocked due to prohibited content (currently only CSAM)." ], "type": "string" }, @@ -22246,6 +22712,25 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest": { +"description": "Request message for [NotebookService.CreateNotebookExecutionJob]", +"id": "GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest", +"properties": { +"notebookExecutionJob": { +"$ref": "GoogleCloudAiplatformV1beta1NotebookExecutionJob", +"description": "Required. The NotebookExecutionJob to create." +}, +"notebookExecutionJobId": { +"description": "Optional. User specified ID for the NotebookExecutionJob.", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}`", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1CreateNotebookRuntimeTemplateOperationMetadata": { "description": "Metadata information for NotebookService.CreateNotebookRuntimeTemplate.", "id": "GoogleCloudAiplatformV1beta1CreateNotebookRuntimeTemplateOperationMetadata", @@ -23419,6 +23904,12 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1DirectUploadSource": { +"description": "The input content is encapsulated and uploaded in the request.", +"id": "GoogleCloudAiplatformV1beta1DirectUploadSource", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1DiskSpec": { "description": "Represents the spec of disk options.", "id": "GoogleCloudAiplatformV1beta1DiskSpec", @@ -26362,6 +26853,22 @@ "description": "Crowding is a constraint on a neighbor list produced by nearest neighbor search requiring that no more than some value k' of the k neighbors returned have the same value of crowding_attribute. It's used for improving result diversity. This field is the maximum number of matches with the same crowding tag.", "format": "int32", "type": "integer" +}, +"rrf": { +"$ref": "GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF", +"description": "Optional. Represents RRF algorithm that combines search results." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF": { +"description": "Parameters for RRF algorithm that combines search results.", +"id": "GoogleCloudAiplatformV1beta1FindNeighborsRequestQueryRRF", +"properties": { +"alpha": { +"description": "Required. Users can provide an alpha value to give more weight to dense vs sparse results. For example, if the alpha is 0, we only return sparse and if the alpha is 1, we only return dense.", +"format": "float", +"type": "number" } }, "type": "object" @@ -26407,7 +26914,12 @@ "description": "The datapoint of the neighbor. Note that full datapoints are returned only when \"return_full_datapoint\" is set to true. Otherwise, only the \"datapoint_id\" and \"crowding_tag\" fields are populated." }, "distance": { -"description": "The distance between the neighbor and the query vector.", +"description": "The distance between the neighbor and the dense embedding query.", +"format": "double", +"type": "number" +}, +"sparseDistance": { +"description": "The distance between the neighbor and the query sparse_embedding.", "format": "double", "type": "number" } @@ -26930,6 +27442,45 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1GoogleDriveSource": { +"description": "The Google Drive location for the input content.", +"id": "GoogleCloudAiplatformV1beta1GoogleDriveSource", +"properties": { +"resourceIds": { +"description": "Required. Google Drive resource IDs.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId": { +"description": "The type and ID of the Google Drive resource.", +"id": "GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId", +"properties": { +"resourceId": { +"description": "Required. The ID of the Google Drive resource.", +"type": "string" +}, +"resourceType": { +"description": "Required. The type of the Google Drive resource.", +"enum": [ +"RESOURCE_TYPE_UNSPECIFIED", +"RESOURCE_TYPE_FILE", +"RESOURCE_TYPE_FOLDER" +], +"enumDescriptions": [ +"Unspecified resource type.", +"File resource type.", +"Folder resource type." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1GroundednessInput": { "description": "Input for groundedness metric.", "id": "GoogleCloudAiplatformV1beta1GroundednessInput", @@ -27007,6 +27558,10 @@ }, "type": "array" }, +"searchEntryPoint": { +"$ref": "GoogleCloudAiplatformV1beta1SearchEntryPoint", +"description": "Optional. Google search entry for the following-up web searches." +}, "webSearchQueries": { "description": "Optional. Web search queries for the following-up web search.", "items": { @@ -27370,6 +27925,36 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ImportRagFilesConfig": { +"description": "Config for importing RagFiles.", +"id": "GoogleCloudAiplatformV1beta1ImportRagFilesConfig", +"properties": { +"gcsSource": { +"$ref": "GoogleCloudAiplatformV1beta1GcsSource", +"description": "Google Cloud Storage location. Supports importing individual files as well as entire Google Cloud Storage directories. Sample formats: - `gs://bucket_name/my_directory/object_name/my_file.txt` - `gs://bucket_name/my_directory`" +}, +"googleDriveSource": { +"$ref": "GoogleCloudAiplatformV1beta1GoogleDriveSource", +"description": "Google Drive location. Supports importing individual files as well as Google Drive folders." +}, +"ragFileChunkingConfig": { +"$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"description": "Specifies the size and overlap of chunks after importing RagFiles." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ImportRagFilesRequest": { +"description": "Request message for VertexRagDataService.ImportRagFiles.", +"id": "GoogleCloudAiplatformV1beta1ImportRagFilesRequest", +"properties": { +"importRagFilesConfig": { +"$ref": "GoogleCloudAiplatformV1beta1ImportRagFilesConfig", +"description": "Required. The config for the RagFiles to be synced and imported into the RagCorpus. VertexRagDataService.ImportRagFiles." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Index": { "description": "A representation of a collection of database items organized in a way that allows for approximate nearest neighbor (a.k.a ANN) algorithms search.", "id": "GoogleCloudAiplatformV1beta1Index", @@ -27465,7 +28050,7 @@ "type": "string" }, "featureVector": { -"description": "Required. Feature embedding vector. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions].", +"description": "Required. Feature embedding vector for dense index. An array of numbers with the length of [NearestNeighborSearchConfig.dimensions].", "items": { "format": "float", "type": "number" @@ -27485,6 +28070,10 @@ "$ref": "GoogleCloudAiplatformV1beta1IndexDatapointRestriction" }, "type": "array" +}, +"sparseEmbedding": { +"$ref": "GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding", +"description": "Optional. Feature embedding vector for sparse index." } }, "type": "object" @@ -27573,6 +28162,29 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding": { +"description": "Feature embedding vector for sparse index. An array of numbers whose values are located in the specified dimensions.", +"id": "GoogleCloudAiplatformV1beta1IndexDatapointSparseEmbedding", +"properties": { +"dimensions": { +"description": "Optional. The list of indexes for the embedding values of the sparse vector.", +"items": { +"format": "int64", +"type": "string" +}, +"type": "array" +}, +"values": { +"description": "Optional. The list of embedding values of the sparse vector.", +"items": { +"format": "float", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1IndexEndpoint": { "description": "Indexes are deployed into it. An IndexEndpoint can have multiple DeployedIndexes.", "id": "GoogleCloudAiplatformV1beta1IndexEndpoint", @@ -27685,8 +28297,14 @@ "readOnly": true, "type": "integer" }, +"sparseVectorsCount": { +"description": "Output only. The number of sparse vectors in the Index.", +"format": "int64", +"readOnly": true, +"type": "string" +}, "vectorsCount": { -"description": "Output only. The number of vectors in the Index.", +"description": "Output only. The number of dense vectors in the Index.", "format": "int64", "readOnly": true, "type": "string" @@ -28611,6 +29229,42 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ListRagCorporaResponse": { +"description": "Response message for VertexRagDataService.ListRagCorpora.", +"id": "GoogleCloudAiplatformV1beta1ListRagCorporaResponse", +"properties": { +"nextPageToken": { +"description": "A token to retrieve the next page of results. Pass to ListRagCorporaRequest.page_token to obtain that page.", +"type": "string" +}, +"ragCorpora": { +"description": "List of RagCorpora in the requested page.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RagCorpus" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ListRagFilesResponse": { +"description": "Response message for VertexRagDataService.ListRagFiles.", +"id": "GoogleCloudAiplatformV1beta1ListRagFilesResponse", +"properties": { +"nextPageToken": { +"description": "A token to retrieve the next page of results. Pass to ListRagFilesRequest.page_token to obtain that page.", +"type": "string" +}, +"ragFiles": { +"description": "List of RagFiles in the requested page.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse": { "description": "Response message for ReasoningEngineService.ListReasoningEngines", "id": "GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse", @@ -30889,10 +31543,6 @@ }, "type": "array" }, -"instanceType": { -"description": "The prediction instance type that the Model accepts when serving. Supported values are: * `object`: Each input is a JSON object format. * `array`: Each input is a JSON array format.", -"type": "string" -}, "predictionFields": { "description": "Prediction output names of the model. The requirements are the same as the feature_fields. For AutoML Tables, the prediction output name presented in schema will be: `predicted_{target_column}`, the `target_column` is the one you specified when you train the model. For Prediction output drift analysis: * AutoML Classification, the distribution of the argmax label will be analyzed. * AutoML Regression, the distribution of the value will be analyzed.", "items": { @@ -31727,7 +32377,10 @@ "OP_IN_DATAPOINT", "MULTIPLE_VALUES", "INVALID_NUMERIC_VALUE", -"INVALID_ENCODING" +"INVALID_ENCODING", +"INVALID_SPARSE_DIMENSIONS", +"INVALID_TOKEN_VALUE", +"INVALID_SPARSE_EMBEDDING" ], "enumDescriptions": [ "Default, shall not be used.", @@ -31736,14 +32389,17 @@ "Invalid csv format.", "Invalid avro format.", "The embedding id is not valid.", -"The size of the embedding vectors does not match with the specified dimension.", +"The size of the dense embedding vectors does not match with the specified dimension.", "The `namespace` field is missing.", "Generic catch-all error. Only used for validation failure where the root cause cannot be easily retrieved programmatically.", "There are multiple restricts with the same `namespace` value.", "Numeric restrict has operator specified in datapoint.", "Numeric restrict has multiple values specified.", "Numeric restrict has invalid numeric value specified.", -"File is not in UTF_8 format." +"File is not in UTF_8 format.", +"Error parsing sparse dimensions field.", +"Token restrict value is invalid.", +"Invalid sparse embedding." ], "type": "string" }, @@ -31874,10 +32530,18 @@ "readOnly": true, "type": "string" }, +"customEnvironmentSpec": { +"$ref": "GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec", +"description": "The custom compute configuration for an execution job." +}, "dataformRepositorySource": { "$ref": "GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource", "description": "The Dataform Repository pointing to a single file notebook repository." }, +"directNotebookSource": { +"$ref": "GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource", +"description": "The contents of an input notebook file." +}, "displayName": { "description": "The display name of the NotebookExecutionJob. The name can be up to 128 characters long and can consist of any UTF-8 characters.", "type": "string" @@ -31964,6 +32628,25 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec": { +"description": "Compute configuration to use for an execution job.", +"id": "GoogleCloudAiplatformV1beta1NotebookExecutionJobCustomEnvironmentSpec", +"properties": { +"machineSpec": { +"$ref": "GoogleCloudAiplatformV1beta1MachineSpec", +"description": "The specification of a single machine for the execution job." +}, +"networkSpec": { +"$ref": "GoogleCloudAiplatformV1beta1NetworkSpec", +"description": "The network configuration to use for the execution job." +}, +"persistentDiskSpec": { +"$ref": "GoogleCloudAiplatformV1beta1PersistentDiskSpec", +"description": "The specification of a persistent disk to attach for the execution job." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource": { "description": "The Dataform Repository containing the input notebook.", "id": "GoogleCloudAiplatformV1beta1NotebookExecutionJobDataformRepositorySource", @@ -31979,6 +32662,18 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource": { +"description": "The content of the input notebook in ipynb format.", +"id": "GoogleCloudAiplatformV1beta1NotebookExecutionJobDirectNotebookSource", +"properties": { +"content": { +"description": "The base64-encoded contents of the input notebook file.", +"format": "byte", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1NotebookExecutionJobGcsNotebookSource": { "description": "The Cloud Storage uri for the input notebook.", "id": "GoogleCloudAiplatformV1beta1NotebookExecutionJobGcsNotebookSource", @@ -32236,8 +32931,7 @@ "description": "Optional. Immutable. The specification of a single machine for the template." }, "name": { -"description": "Output only. The resource name of the NotebookRuntimeTemplate.", -"readOnly": true, +"description": "The resource name of the NotebookRuntimeTemplate.", "type": "string" }, "networkSpec": { @@ -33471,6 +34165,10 @@ "$ref": "GoogleCloudAiplatformV1beta1PublisherModelCallToActionDeployGke", "description": "Optional. Deploy PublisherModel to Google Kubernetes Engine." }, +"fineTune": { +"$ref": "GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences", +"description": "Optional. Fine tune the PublisherModel with the third-party model tuning UI." +}, "openEvaluationPipeline": { "$ref": "GoogleCloudAiplatformV1beta1PublisherModelCallToActionRegionalResourceReferences", "description": "Optional. Open evaluation pipeline of the PublisherModel." @@ -33534,6 +34232,10 @@ "$ref": "GoogleCloudAiplatformV1beta1DedicatedResources", "description": "A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration." }, +"deployTaskName": { +"description": "Optional. The name of the deploy task (e.g., \"text to image generation\").", +"type": "string" +}, "largeModelReference": { "$ref": "GoogleCloudAiplatformV1beta1LargeModelReference", "description": "Optional. Large model reference. When this is set, model_artifact_spec is not needed." @@ -34279,6 +34981,173 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1RagContexts": { +"description": "Relevant contexts for one query.", +"id": "GoogleCloudAiplatformV1beta1RagContexts", +"properties": { +"contexts": { +"description": "All its contexts.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RagContextsContext" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContextsContext": { +"description": "A context of the query.", +"id": "GoogleCloudAiplatformV1beta1RagContextsContext", +"properties": { +"distance": { +"description": "The distance between the query vector and the context text vector.", +"format": "double", +"type": "number" +}, +"sourceUri": { +"description": "For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name.", +"type": "string" +}, +"text": { +"description": "The text chunk.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagCorpus": { +"description": "A RagCorpus is a RagFile container and a project can have multiple RagCorpora.", +"id": "GoogleCloudAiplatformV1beta1RagCorpus", +"properties": { +"createTime": { +"description": "Output only. Timestamp when this RagCorpus was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. The description of the RagCorpus.", +"type": "string" +}, +"displayName": { +"description": "Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.", +"type": "string" +}, +"name": { +"description": "Output only. The resource name of the RagCorpus.", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when this RagCorpus was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagFile": { +"description": "A RagFile contains user data for chunking, embedding and indexing.", +"id": "GoogleCloudAiplatformV1beta1RagFile", +"properties": { +"createTime": { +"description": "Output only. Timestamp when this RagFile was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. The description of the RagFile.", +"type": "string" +}, +"directUploadSource": { +"$ref": "GoogleCloudAiplatformV1beta1DirectUploadSource", +"description": "Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.", +"readOnly": true +}, +"displayName": { +"description": "Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.", +"type": "string" +}, +"gcsSource": { +"$ref": "GoogleCloudAiplatformV1beta1GcsSource", +"description": "Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the Cloud Storage uri for now.", +"readOnly": true +}, +"googleDriveSource": { +"$ref": "GoogleCloudAiplatformV1beta1GoogleDriveSource", +"description": "Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.", +"readOnly": true +}, +"name": { +"description": "Output only. The resource name of the RagFile.", +"readOnly": true, +"type": "string" +}, +"ragFileType": { +"description": "Output only. The type of the RagFile.", +"enum": [ +"RAG_FILE_TYPE_UNSPECIFIED", +"RAG_FILE_TYPE_TXT", +"RAG_FILE_TYPE_PDF" +], +"enumDescriptions": [ +"RagFile type is unspecified.", +"RagFile type is TXT.", +"RagFile type is PDF." +], +"readOnly": true, +"type": "string" +}, +"sizeBytes": { +"description": "Output only. The size of the RagFile in bytes.", +"format": "int64", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when this RagFile was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagFileChunkingConfig": { +"description": "Specifies the size and overlap of chunks for RagFiles.", +"id": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"properties": { +"chunkOverlap": { +"description": "The overlap between chunks.", +"format": "int32", +"type": "integer" +}, +"chunkSize": { +"description": "The size of the chunks.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagQuery": { +"description": "A query to retrieve relevant contexts.", +"id": "GoogleCloudAiplatformV1beta1RagQuery", +"properties": { +"similarityTopK": { +"description": "Optional. The number of contexts to retrieve.", +"format": "int32", +"type": "integer" +}, +"text": { +"description": "Optional. The query in text format to get relevant contexts.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1RawPredictRequest": { "description": "Request message for PredictionService.RawPredict.", "id": "GoogleCloudAiplatformV1beta1RawPredictRequest", @@ -34918,6 +35787,77 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1RetrieveContextsRequest": { +"description": "Request message for VertexRagService.RetrieveContexts.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsRequest", +"properties": { +"query": { +"$ref": "GoogleCloudAiplatformV1beta1RagQuery", +"description": "Required. Single RAG retrieve query." +}, +"vertexRagStore": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore", +"description": "The data source for Vertex RagStore." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore": { +"description": "The data source for Vertex RagStore.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore", +"properties": { +"ragCorpora": { +"deprecated": true, +"description": "Optional. Deprecated. Please use rag_resources to specify the data source.", +"items": { +"type": "string" +}, +"type": "array" +}, +"ragResources": { +"description": "Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource" +}, +"type": "array" +}, +"vectorDistanceThreshold": { +"description": "Optional. Only return contexts with vector distance smaller than the threshold.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource": { +"description": "The definition of the Rag resource.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStoreRagResource", +"properties": { +"ragCorpus": { +"description": "Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"type": "string" +}, +"ragFileIds": { +"description": "Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RetrieveContextsResponse": { +"description": "Response message for VertexRagService.RetrieveContexts.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsResponse", +"properties": { +"contexts": { +"$ref": "GoogleCloudAiplatformV1beta1RagContexts", +"description": "The contexts of the query." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1RougeInput": { "description": "Input for rouge metric.", "id": "GoogleCloudAiplatformV1beta1RougeInput", @@ -35038,8 +35978,13 @@ "GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig": { "id": "GoogleCloudAiplatformV1beta1RuntimeConfigVertexAISearchRuntimeConfig", "properties": { +"appId": { +"description": "Vertex AI Search App ID. This is used to construct the search request. By setting this app_id, API will construct the serving config which is required to call search API for the user. The app_id and serving_config_name cannot both be empty at the same time.", +"type": "string" +}, "servingConfigName": { -"description": "Required. Vertext AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}`", +"deprecated": true, +"description": "[Deprecated] Please use app_id instead. Vertex AI Search serving config name. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}`", "type": "string" } }, @@ -35394,6 +36339,10 @@ "$ref": "GoogleCloudAiplatformV1beta1CreateModelMonitoringJobRequest", "description": "Request for ModelMonitoringService.CreateModelMonitoringJob." }, +"createNotebookExecutionJobRequest": { +"$ref": "GoogleCloudAiplatformV1beta1CreateNotebookExecutionJobRequest", +"description": "Request for NotebookService.CreateNotebookExecutionJob." +}, "createPipelineJobRequest": { "$ref": "GoogleCloudAiplatformV1beta1CreatePipelineJobRequest", "description": "Request for PipelineService.CreatePipelineJob. CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location})." @@ -39308,6 +40257,22 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1SearchEntryPoint": { +"description": "Google search entry point.", +"id": "GoogleCloudAiplatformV1beta1SearchEntryPoint", +"properties": { +"renderedContent": { +"description": "Optional. Web content snippet that can be embedded in a web page or an app webview.", +"type": "string" +}, +"sdkBlob": { +"description": "Optional. Base64 encoded JSON representing array of tuple.", +"format": "byte", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1SearchFeaturesResponse": { "description": "Response message for FeaturestoreService.SearchFeatures.", "id": "GoogleCloudAiplatformV1beta1SearchFeaturesResponse", @@ -42261,6 +43226,47 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1UploadRagFileConfig": { +"description": "Config for uploading RagFile.", +"id": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", +"properties": { +"ragFileChunkingConfig": { +"$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"description": "Specifies the size and overlap of chunks after uploading RagFile." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1UploadRagFileRequest": { +"description": "Request message for VertexRagDataService.UploadRagFile.", +"id": "GoogleCloudAiplatformV1beta1UploadRagFileRequest", +"properties": { +"ragFile": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile", +"description": "Required. The RagFile to upload." +}, +"uploadRagFileConfig": { +"$ref": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", +"description": "Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1UploadRagFileResponse": { +"description": "Response message for VertexRagDataService.UploadRagFile.", +"id": "GoogleCloudAiplatformV1beta1UploadRagFileResponse", +"properties": { +"error": { +"$ref": "GoogleRpcStatus", +"description": "The error that occurred while processing the RagFile." +}, +"ragFile": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile", +"description": "The RagFile that had been uploaded into the RagCorpus." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest": { "description": "Request message for IndexService.UpsertDatapoints", "id": "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest", @@ -42342,12 +43348,19 @@ "id": "GoogleCloudAiplatformV1beta1VertexRagStore", "properties": { "ragCorpora": { -"description": "Required. Vertex RAG Store corpus resource name: `projects/{project}/locations/{location}/ragCorpora/{ragCorpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location.", +"description": "Optional. Deprecated. Please use rag_resources instead.", "items": { "type": "string" }, "type": "array" }, +"ragResources": { +"description": "Optional. The representation of the rag source. It can be used to specify corpus only or ragfiles. Currently only support one corpus or multiple files from one corpus. In the future we may open up multiple corpora support.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource" +}, +"type": "array" +}, "similarityTopK": { "description": "Optional. Number of top k results to return from the selected corpora.", "format": "int32", @@ -42361,6 +43374,24 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1VertexRagStoreRagResource": { +"description": "The definition of the Rag resource.", +"id": "GoogleCloudAiplatformV1beta1VertexRagStoreRagResource", +"properties": { +"ragCorpus": { +"description": "Optional. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"type": "string" +}, +"ragFileIds": { +"description": "Optional. rag_file_id. The files should be in the same rag_corpus set in rag_corpus field.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1VideoMetadata": { "description": "Metadata describes the input video content.", "id": "GoogleCloudAiplatformV1beta1VideoMetadata", @@ -43331,7 +44362,14 @@ "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -43681,7 +44719,14 @@ "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inpsiration." ], "type": "string" }, @@ -43796,7 +44841,7 @@ "id": "LanguageLabsAidaTrustRecitationProtoSegmentResult", "properties": { "attributionDataset": { -"description": "The dataset the segment came from.", +"description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", "enum": [ "DATASET_UNSPECIFIED", "WIKIPEDIA", @@ -44145,7 +45190,14 @@ "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -44495,7 +45547,14 @@ "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inpsiration." ], "type": "string" }, @@ -44504,8 +45563,7 @@ "type": "string" }, "docAttribution": { -"$ref": "LanguageLabsAidaTrustRecitationProtoDocAttribution", -"description": "populated when recitation_action == CITE" +"$ref": "LanguageLabsAidaTrustRecitationProtoDocAttribution" }, "docOccurrences": { "description": "number of documents that contained this segment", @@ -44537,6 +45595,32 @@ ], "type": "string" }, +"sourceCategory": { +"description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", +"enum": [ +"SOURCE_CATEGORY_UNSPECIFIED", +"SOURCE_CATEGORY_WIKIPEDIA", +"SOURCE_CATEGORY_WEBDOCS", +"SOURCE_CATEGORY_GITHUB", +"SOURCE_CATEGORY_ARXIV", +"SOURCE_CATEGORY_PRIVATE_BOOKS", +"SOURCE_CATEGORY_OTHERS", +"SOURCE_CATEGORY_PUBLIC_BOOKS", +"SOURCE_CATEGORY_GNEWS" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +}, "startIndex": { "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", "format": "int32", @@ -44970,7 +46054,14 @@ "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -45320,7 +46411,14 @@ "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inspiration FT datasets." ], "type": "string" }, @@ -45435,7 +46533,7 @@ "id": "LearningGenaiRecitationSegmentResult", "properties": { "attributionDataset": { -"description": "The dataset the segment came from.", +"description": "The dataset the segment came from. Datasets change often as model evolves. Treat this field as informational only and avoid depending on it directly.", "enum": [ "DATASET_UNSPECIFIED", "WIKIPEDIA", @@ -45784,7 +46882,14 @@ "GEMINI_V2_TAIL_PATCH_OPEN_BOOKS_QA_UNANSWERABLE", "GEMINI_V2_TAIL_PATCH_PMC", "GEMINI_V2_TAIL_PATCH_VOXPOPULI", -"GEMINI_V2_TAIL_PATCH_FLEURS" +"GEMINI_V2_TAIL_PATCH_FLEURS", +"GEMINI_V2_SSFS", +"GEMINI_V2_CODE_TRANSFORM_SYNTHETIC_ERROR_FIX", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_COMMITS", +"GEMINI_V2_CODE_TRANSFORM_GITHUB_PR", +"GEMINI_V2_SQL_REPAIR_SFT", +"GEMINI_V2_JSON_MODE_SYS_INSTRUCTION", +"YT_CONTENT_INSPIRATION" ], "enumDescriptions": [ "", @@ -46134,7 +47239,14 @@ "", "Gemini V2 only tail patch.", "", -"" +"", +"Gemini V2 rev10", +"", +"", +"", +"", +"", +"Youtube Content Inspiration FT datasets." ], "type": "string" }, @@ -46143,8 +47255,7 @@ "type": "string" }, "docAttribution": { -"$ref": "LearningGenaiRecitationDocAttribution", -"description": "populated when recitation_action == CITE" +"$ref": "LearningGenaiRecitationDocAttribution" }, "docOccurrences": { "description": "number of documents that contained this segment", @@ -46176,6 +47287,32 @@ ], "type": "string" }, +"sourceCategory": { +"description": "The category of the source dataset where the segment came from. This is more stable than Dataset.", +"enum": [ +"SOURCE_CATEGORY_UNSPECIFIED", +"SOURCE_CATEGORY_WIKIPEDIA", +"SOURCE_CATEGORY_WEBDOCS", +"SOURCE_CATEGORY_GITHUB", +"SOURCE_CATEGORY_ARXIV", +"SOURCE_CATEGORY_PRIVATE_BOOKS", +"SOURCE_CATEGORY_OTHERS", +"SOURCE_CATEGORY_PUBLIC_BOOKS", +"SOURCE_CATEGORY_GNEWS" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +}, "startIndex": { "description": "The segment boundary start (inclusive) and end index (exclusive) in the given text. In the streaming RPC, the indexes always start from the beginning of the first text in the entire stream. The indexes are measured in UTF-16 code units.", "format": "int32", @@ -47655,10 +48792,37 @@ false }, "type": "object" }, +"LearningServingLlmAtlasOutputMetadata": { +"id": "LearningServingLlmAtlasOutputMetadata", +"properties": { +"requestTopic": { +"type": "string" +}, +"source": { +"enum": [ +"UNKNOWN", +"FACTUALITY", +"INFOBOT", +"LLM" +], +"enumDescriptions": [ +"", +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, "LearningServingLlmMessageMetadata": { -"description": "LINT.IfChange This metadata contains additional information required for debugging.", +"description": "LINT.IfChange This metadata contains additional information required for debugging. Next ID: 28", "id": "LearningServingLlmMessageMetadata", "properties": { +"atlasMetadata": { +"$ref": "LearningServingLlmAtlasOutputMetadata" +}, "classifierSummary": { "$ref": "LearningGenaiRootClassifierOutputSummary", "description": "Summary of classifier output. We attach this to all messages regardless of whether classification rules triggered or not." @@ -47734,7 +48898,12 @@ false "type": "string" }, "perStreamDecodedTokenCount": { -"description": "NOT YET IMPLEMENTED. Applies to streaming only. Number of tokens decoded / emitted by the model as part of this stream. This may be different from token_count, which contains number of tokens returned in this response after any response rewriting / truncation.", +"description": "Number of tokens decoded by the model as part of a stream. This count may be different from `per_stream_returned_token_count` which, is counted after any response rewriting or truncation. Applies to streaming response only.", +"format": "int32", +"type": "integer" +}, +"perStreamReturnedTokenCount": { +"description": "Number of tokens returned per stream in a response candidate after any response rewriting or truncation. Applies to streaming response only. Applies to Gemini models only.", "format": "int32", "type": "integer" }, @@ -47750,7 +48919,7 @@ false "description": "Recitation Results. It will be populated as long as Recitation processing is enabled, regardless of recitation outcome." }, "returnTokenCount": { -"description": "NOT YET IMPLEMENTED. Number of tokens returned as part of this candidate.", +"description": "NOT IMPLEMENTED TODO (b/334187574) Remove this field after Labs migrates to per_stream_returned_token_count and total_returned_token_count.", "format": "int32", "type": "integer" }, @@ -47766,7 +48935,12 @@ false "type": "boolean" }, "totalDecodedTokenCount": { -"description": "NOT YET IMPLEMENTED. Aggregated number of total tokens decoded so far. For streaming, this is sum of all the tokens decoded so far i.e. aggregated count.", +"description": "Total tokens decoded so far per response_candidate. For streaming: Count of all the tokens decoded so far (aggregated count). For unary: Count of all the tokens decoded per response_candidate.", +"format": "int32", +"type": "integer" +}, +"totalReturnedTokenCount": { +"description": "Total number of tokens returned in a response candidate. For streaming, it is the aggregated count (i.e. total so far) Applies to Gemini models only.", "format": "int32", "type": "integer" }, diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json index 4c12c81068e..31016d66f6b 100644 --- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json @@ -423,7 +423,7 @@ } } }, -"revision": "20240422", +"revision": "20240429", "rootUrl": "https://alertcenter.googleapis.com/", "schemas": { "AbuseDetected": { diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1.json b/googleapiclient/discovery_cache/documents/alloydb.v1.json index edc5a08c66b..35e52302b75 100644 --- a/googleapiclient/discovery_cache/documents/alloydb.v1.json +++ b/googleapiclient/discovery_cache/documents/alloydb.v1.json @@ -1461,7 +1461,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -1856,6 +1856,11 @@ false "description": "Labels as key value pairs", "type": "object" }, +"maintenanceSchedule": { +"$ref": "MaintenanceSchedule", +"description": "Output only. The maintenance schedule for the cluster, generated for a specific rollout if a maintenance window is set.", +"readOnly": true +}, "maintenanceUpdatePolicy": { "$ref": "MaintenanceUpdatePolicy", "description": "Optional. The maintenance update policy determines when to allow or deny updates." @@ -2596,6 +2601,19 @@ false }, "type": "object" }, +"MaintenanceSchedule": { +"description": "MaintenanceSchedule stores the maintenance schedule generated from the MaintenanceUpdatePolicy, once a maintenance rollout is triggered, if MaintenanceWindow is set, and if there is no conflicting DenyPeriod. The schedule is cleared once the update takes place. This field cannot be manually changed; modify the MaintenanceUpdatePolicy instead.", +"id": "MaintenanceSchedule", +"properties": { +"startTime": { +"description": "Output only. The scheduled start time for the maintenance.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "MaintenanceUpdatePolicy": { "description": "MaintenanceUpdatePolicy defines the policy for system updates.", "id": "MaintenanceUpdatePolicy", @@ -4209,6 +4227,7 @@ false "PRODUCT_TYPE_ON_PREM", "ON_PREM", "PRODUCT_TYPE_MEMORYSTORE", +"PRODUCT_TYPE_BIGTABLE", "PRODUCT_TYPE_OTHER" ], "enumDeprecated": [ @@ -4221,6 +4240,7 @@ false, false, true, false, +false, false ], "enumDescriptions": [ @@ -4233,6 +4253,7 @@ false "On premises database product.", "On premises database product.", "Memorystore product area in GCP", +"Bigtable product area in GCP", "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum." ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json b/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json index b24d8c0ad1b..97f6ca495cf 100644 --- a/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json @@ -1461,7 +1461,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -4338,6 +4338,7 @@ false "PRODUCT_TYPE_ON_PREM", "ON_PREM", "PRODUCT_TYPE_MEMORYSTORE", +"PRODUCT_TYPE_BIGTABLE", "PRODUCT_TYPE_OTHER" ], "enumDeprecated": [ @@ -4350,6 +4351,7 @@ false, false, true, false, +false, false ], "enumDescriptions": [ @@ -4362,6 +4364,7 @@ false "On premises database product.", "On premises database product.", "Memorystore product area in GCP", +"Bigtable product area in GCP", "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum." ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1beta.json b/googleapiclient/discovery_cache/documents/alloydb.v1beta.json index a0ba704c67f..1215b4a7f87 100644 --- a/googleapiclient/discovery_cache/documents/alloydb.v1beta.json +++ b/googleapiclient/discovery_cache/documents/alloydb.v1beta.json @@ -1458,7 +1458,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -4314,6 +4314,7 @@ false "PRODUCT_TYPE_ON_PREM", "ON_PREM", "PRODUCT_TYPE_MEMORYSTORE", +"PRODUCT_TYPE_BIGTABLE", "PRODUCT_TYPE_OTHER" ], "enumDeprecated": [ @@ -4326,6 +4327,7 @@ false, false, true, false, +false, false ], "enumDescriptions": [ @@ -4338,6 +4340,7 @@ false "On premises database product.", "On premises database product.", "Memorystore product area in GCP", +"Bigtable product area in GCP", "Other refers to rest of other product type. This is to be when product type is known, but it is not present in this enum." ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json index 7c0de3dbb78..e357eb9c579 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json @@ -4617,7 +4617,7 @@ } } }, -"revision": "20240427", +"revision": "20240503", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1alphaAccessBetweenFilter": { diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json index 2f1e276e6b4..ea2b1e3697f 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json @@ -1788,7 +1788,7 @@ } } }, -"revision": "20240427", +"revision": "20240503", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1betaAccessBetweenFilter": { diff --git a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json index 89dc602b05b..3c8eb09eca8 100644 --- a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json +++ b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json @@ -440,7 +440,7 @@ } } }, -"revision": "20240427", +"revision": "20240504", "rootUrl": "https://analyticsdata.googleapis.com/", "schemas": { "ActiveMetricRestriction": { diff --git a/googleapiclient/discovery_cache/documents/analyticshub.v1.json b/googleapiclient/discovery_cache/documents/analyticshub.v1.json index 5ea31ba780a..784534ca888 100644 --- a/googleapiclient/discovery_cache/documents/analyticshub.v1.json +++ b/googleapiclient/discovery_cache/documents/analyticshub.v1.json @@ -1022,7 +1022,7 @@ } } }, -"revision": "20240425", +"revision": "20240429", "rootUrl": "https://analyticshub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json b/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json index 6b8b7de044a..ae5091bd9b2 100644 --- a/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json @@ -695,7 +695,7 @@ } } }, -"revision": "20240425", +"revision": "20240429", "rootUrl": "https://analyticshub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json index b2264cd1488..563f6f66c48 100644 --- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json +++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json @@ -851,7 +851,7 @@ } } }, -"revision": "20240427", +"revision": "20240430", "rootUrl": "https://androiddeviceprovisioning.googleapis.com/", "schemas": { "ClaimDeviceRequest": { diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json index e8d2c456a5f..0fea43a6ca4 100644 --- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json @@ -2649,7 +2649,7 @@ } } }, -"revision": "20240425", +"revision": "20240502", "rootUrl": "https://androidenterprise.googleapis.com/", "schemas": { "Administrator": { diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index 568399861c8..3a62ce760c2 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -4731,7 +4731,7 @@ } } }, -"revision": "20240429", +"revision": "20240502", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Abi": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1.json b/googleapiclient/discovery_cache/documents/apigateway.v1.json index 51ee78b1c99..8ab26c8afbb 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1.json @@ -1083,7 +1083,7 @@ } } }, -"revision": "20240410", +"revision": "20240424", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json index bbf005516ab..5f450f1742f 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json @@ -1083,7 +1083,7 @@ } } }, -"revision": "20240410", +"revision": "20240424", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apigee.v1.json b/googleapiclient/discovery_cache/documents/apigee.v1.json index b7b7ac97870..2f455dc6b82 100644 --- a/googleapiclient/discovery_cache/documents/apigee.v1.json +++ b/googleapiclient/discovery_cache/documents/apigee.v1.json @@ -10045,7 +10045,7 @@ } } }, -"revision": "20240419", +"revision": "20240430", "rootUrl": "https://apigee.googleapis.com/", "schemas": { "EdgeConfigstoreBundleBadBundle": { diff --git a/googleapiclient/discovery_cache/documents/apikeys.v2.json b/googleapiclient/discovery_cache/documents/apikeys.v2.json index 3514df3ec69..575e61bda50 100644 --- a/googleapiclient/discovery_cache/documents/apikeys.v2.json +++ b/googleapiclient/discovery_cache/documents/apikeys.v2.json @@ -396,7 +396,7 @@ } } }, -"revision": "20240414", +"revision": "20240505", "rootUrl": "https://apikeys.googleapis.com/", "schemas": { "Operation": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json index 954202770e3..79e6bdbc1be 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1.json @@ -1718,7 +1718,7 @@ } } }, -"revision": "20240422", +"revision": "20240429", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json index 38fa2286170..40041cccc89 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json @@ -946,7 +946,7 @@ } } }, -"revision": "20240422", +"revision": "20240429", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "AuthorizedCertificate": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json index f34ed1cc239..852deec0ea9 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json @@ -1918,7 +1918,7 @@ } } }, -"revision": "20240422", +"revision": "20240429", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/apphub.v1.json b/googleapiclient/discovery_cache/documents/apphub.v1.json index 192eb07ae7c..be1d6377638 100644 --- a/googleapiclient/discovery_cache/documents/apphub.v1.json +++ b/googleapiclient/discovery_cache/documents/apphub.v1.json @@ -1346,7 +1346,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://apphub.googleapis.com/", "schemas": { "Application": { diff --git a/googleapiclient/discovery_cache/documents/apphub.v1alpha.json b/googleapiclient/discovery_cache/documents/apphub.v1alpha.json index 881580c39f1..11107f9ffc1 100644 --- a/googleapiclient/discovery_cache/documents/apphub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/apphub.v1alpha.json @@ -1438,7 +1438,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://apphub.googleapis.com/", "schemas": { "Application": { diff --git a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json index 7eeeb41a1a6..bcede99437b 100644 --- a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json @@ -586,7 +586,7 @@ } } }, -"revision": "20240427", +"revision": "20240505", "rootUrl": "https://area120tables.googleapis.com/", "schemas": { "BatchCreateRowsRequest": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json index 91d440668e9..7d21c2298c9 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json @@ -115,38 +115,6 @@ }, "protocol": "rest", "resources": { -"media": { -"methods": { -"download": { -"description": "Download a file.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/files/{filesId}:download", -"httpMethod": "GET", -"id": "artifactregistry.media.download", -"parameterOrder": [ -"name" -], -"parameters": { -"name": { -"description": "Required. The name of the file to download.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/repositories/[^/]+/files/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+name}:download", -"response": { -"$ref": "DownloadFileResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform", -"https://www.googleapis.com/auth/cloud-platform.read-only" -], -"supportsMediaDownload": true, -"useMediaDownloadService": true -} -} -}, "projects": { "methods": { "getProjectSettings": { @@ -538,7 +506,7 @@ ], "parameters": { "name": { -"description": "The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`.", +"description": "The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/repositories/[^/]+$", "required": true, @@ -767,6 +735,34 @@ }, "files": { "methods": { +"download": { +"description": "Download a file.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/files/{filesId}:download", +"httpMethod": "GET", +"id": "artifactregistry.projects.locations.repositories.files.download", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the file to download.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/repositories/[^/]+/files/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:download", +"response": { +"$ref": "DownloadFileResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloud-platform.read-only" +], +"supportsMediaDownload": true, +"useMediaDownloadService": true +}, "get": { "description": "Gets a file.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/files/{filesId}", @@ -1782,7 +1778,7 @@ } } }, -"revision": "20240425", +"revision": "20240501", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { @@ -2224,7 +2220,7 @@ "type": "array" }, "name": { -"description": "The name of the file, for example: \"projects/p1/locations/us-central1/repositories/repo1/files/a%2Fb%2Fc.txt\". If the file ID part contains slashes, they are escaped.", +"description": "The name of the file, for example: `projects/p1/locations/us-central1/repositories/repo1/files/a%2Fb%2Fc.txt`. If the file ID part contains slashes, they are escaped.", "type": "string" }, "owner": { @@ -3319,13 +3315,18 @@ false "type": "string" }, "name": { -"description": "The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`.", +"description": "The name of the repository, for example: `projects/p1/locations/us-central1/repositories/repo1`. For each location in a project, repository names must be unique.", "type": "string" }, "remoteRepositoryConfig": { "$ref": "RemoteRepositoryConfig", "description": "Configuration specific for a Remote Repository." }, +"satisfiesPzi": { +"description": "Output only. If set, the repository satisfies physical zone isolation.", +"readOnly": true, +"type": "boolean" +}, "satisfiesPzs": { "description": "Output only. If set, the repository satisfies physical zone separation.", "readOnly": true, @@ -3754,7 +3755,7 @@ false "type": "object" }, "VirtualRepositoryConfig": { -"description": "Virtual repository configuration.", +"description": "LINT.IfChange Virtual repository configuration.", "id": "VirtualRepositoryConfig", "properties": { "upstreamPolicies": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json index 1e4b09f130d..3ede16d36e0 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json @@ -936,7 +936,7 @@ } } }, -"revision": "20240412", +"revision": "20240425", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json index 52471489054..9e04329ce73 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json @@ -1208,7 +1208,7 @@ } } }, -"revision": "20240412", +"revision": "20240425", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json index ea73fdf8ea8..87b878c20b9 100644 --- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json @@ -563,7 +563,7 @@ } } }, -"revision": "20240425", +"revision": "20240502", "rootUrl": "https://assuredworkloads.googleapis.com/", "schemas": { "GoogleCloudAssuredworkloadsV1beta1AcknowledgeViolationRequest": { diff --git a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json index e703187f04a..afae53add05 100644 --- a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json +++ b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json @@ -206,6 +206,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"disableHyperthreading": { +"description": "Perform disable hyperthreading operation on a single server.", +"flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:disableHyperthreading", +"httpMethod": "POST", +"id": "baremetalsolution.projects.locations.instances.disableHyperthreading", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The `name` field is used to identify the instance. Format: projects/{project}/locations/{location}/instances/{instance}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2/{+name}:disableHyperthreading", +"request": { +"$ref": "DisableHyperthreadingRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "disableInteractiveSerialConsole": { "description": "Disable the interactive serial console feature on an instance.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:disableInteractiveSerialConsole", @@ -234,6 +262,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"enableHyperthreading": { +"description": "Perform enable hyperthreading operation on a single server.", +"flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:enableHyperthreading", +"httpMethod": "POST", +"id": "baremetalsolution.projects.locations.instances.enableHyperthreading", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The `name` field is used to identify the instance. Format: projects/{project}/locations/{location}/instances/{instance}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2/{+name}:enableHyperthreading", +"request": { +"$ref": "EnableHyperthreadingRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "enableInteractiveSerialConsole": { "description": "Enable the interactive serial console feature on an instance.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:enableInteractiveSerialConsole", @@ -387,6 +443,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"reimage": { +"description": "Perform reimage operation on a single server.", +"flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:reimage", +"httpMethod": "POST", +"id": "baremetalsolution.projects.locations.instances.reimage", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The `name` field is used to identify the instance. Format: projects/{project}/locations/{location}/instances/{instance}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v2/{+name}:reimage", +"request": { +"$ref": "ReimageInstanceRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "rename": { "description": "RenameInstance sets a new name for an instance. Use with caution, previous names become immediately invalidated.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:rename", @@ -1638,7 +1722,7 @@ } } }, -"revision": "20240321", +"revision": "20240422", "rootUrl": "https://baremetalsolution.googleapis.com/", "schemas": { "AllowedClient": { @@ -1707,6 +1791,12 @@ }, "type": "object" }, +"DisableHyperthreadingRequest": { +"description": "Message requesting to perform disable hyperthreading operation on a server.", +"id": "DisableHyperthreadingRequest", +"properties": {}, +"type": "object" +}, "DisableInteractiveSerialConsoleRequest": { "description": "Message for disabling the interactive serial console on an instance.", "id": "DisableInteractiveSerialConsoleRequest", @@ -1725,6 +1815,12 @@ "properties": {}, "type": "object" }, +"EnableHyperthreadingRequest": { +"description": "Message requesting to perform enable hyperthreading operation on a server.", +"id": "EnableHyperthreadingRequest", +"properties": {}, +"type": "object" +}, "EnableInteractiveSerialConsoleRequest": { "description": "Message for enabling the interactive serial console on an instance.", "id": "EnableInteractiveSerialConsoleRequest", @@ -3175,6 +3271,28 @@ }, "type": "object" }, +"ReimageInstanceRequest": { +"description": "Message requesting to perform reimage operation on a server.", +"id": "ReimageInstanceRequest", +"properties": { +"kmsKeyVersion": { +"description": "Optional. Name of the KMS crypto key version used to encrypt the initial passwords. The key has to have ASYMMETRIC_DECRYPT purpose. Format is `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}/cryptoKeyVersions/{version}`.", +"type": "string" +}, +"osImage": { +"description": "Required. The OS image code of the image which will be used in the reimage operation.", +"type": "string" +}, +"sshKeys": { +"description": "Optional. List of SSH Keys used during reimaging an instance.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "RenameInstanceRequest": { "description": "Message requesting rename of a server.", "id": "RenameInstanceRequest", diff --git a/googleapiclient/discovery_cache/documents/batch.v1.json b/googleapiclient/discovery_cache/documents/batch.v1.json index 749f0d98cf2..211608285bc 100644 --- a/googleapiclient/discovery_cache/documents/batch.v1.json +++ b/googleapiclient/discovery_cache/documents/batch.v1.json @@ -561,7 +561,7 @@ } } }, -"revision": "20240411", +"revision": "20240425", "rootUrl": "https://batch.googleapis.com/", "schemas": { "Accelerator": { @@ -2067,7 +2067,7 @@ "id": "TaskExecution", "properties": { "exitCode": { -"description": "When task is completed as the status of FAILED or SUCCEEDED, exit code is for one task execution result, default is 0 as success.", +"description": "The exit code of a finished task. If the task succeeded, the exit code will be 0. If the task failed but not due to the following reasons, the exit code will be 50000. Otherwise, it can be from different sources: - Batch known failures as https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes. - Batch runnable execution failures: You can rely on Batch logs for further diagnose: https://cloud.google.com/batch/docs/analyze-job-using-logs. If there are multiple runnables failures, Batch only exposes the first error caught for now.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/beyondcorp.v1.json b/googleapiclient/discovery_cache/documents/beyondcorp.v1.json index 90a4b3439bc..c0c898b7bdd 100644 --- a/googleapiclient/discovery_cache/documents/beyondcorp.v1.json +++ b/googleapiclient/discovery_cache/documents/beyondcorp.v1.json @@ -1804,7 +1804,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://beyondcorp.googleapis.com/", "schemas": { "AllocatedConnection": { @@ -1868,6 +1868,16 @@ "description": "Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway.", "type": "string" }, +"satisfiesPzi": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, +"satisfiesPzs": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, "state": { "description": "Output only. The current state of the AppGateway.", "enum": [ @@ -2094,6 +2104,16 @@ "description": "Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection.", "type": "string" }, +"satisfiesPzi": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, +"satisfiesPzs": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, "state": { "description": "Output only. The current state of the AppConnection.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json b/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json index 27e92bec360..e39deb5d5da 100644 --- a/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json @@ -1206,6 +1206,33 @@ }, "subscriptions": { "methods": { +"cancel": { +"description": "Cancels an existing BeyondCorp Enterprise Subscription in a given organization. Location will always be global as BeyondCorp subscriptions are per organization. Returns the timestamp for when the cancellation will become effective", +"flatPath": "v1alpha/organizations/{organizationsId}/locations/{locationsId}/subscriptions/{subscriptionsId}:cancel", +"httpMethod": "GET", +"id": "beyondcorp.organizations.locations.subscriptions.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the resource.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/subscriptions/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v1alpha/{+name}:cancel", +"response": { +"$ref": "GoogleCloudBeyondcorpSaasplatformSubscriptionsV1alphaCancelSubscriptionResponse" +} +}, "create": { "description": "Creates a new BeyondCorp Enterprise Subscription in a given organization. Location will always be global as BeyondCorp subscriptions are per organization.", "flatPath": "v1alpha/organizations/{organizationsId}/locations/{locationsId}/subscriptions", @@ -1308,7 +1335,7 @@ "type": "string" }, "updateMask": { -"description": "Required. Field mask is used to specify the fields to be overwritten in the Subscription resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. If the user does not provide a mask then all mutable fields will be overwritten. Mutable fields: type, state.", +"description": "Required. Field mask is used to specify the fields to be overwritten in the Subscription resource by the update. The fields specified in the update_mask are relative to the resource, not the full request. A field will be overwritten if it is in the mask. Mutable fields: seat_count.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -3939,7 +3966,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://beyondcorp.googleapis.com/", "schemas": { "AllocatedConnection": { @@ -4003,6 +4030,16 @@ "description": "Required. Unique resource name of the AppGateway. The name is ignored when creating an AppGateway.", "type": "string" }, +"satisfiesPzi": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, +"satisfiesPzs": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, "state": { "description": "Output only. The current state of the AppGateway.", "enum": [ @@ -4645,6 +4682,16 @@ "description": "Required. Unique resource name of the AppConnection. The name is ignored when creating a AppConnection.", "type": "string" }, +"satisfiesPzi": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, +"satisfiesPzs": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, "state": { "description": "Output only. The current state of the AppConnection.", "enum": [ @@ -5936,6 +5983,18 @@ }, "type": "object" }, +"GoogleCloudBeyondcorpSaasplatformSubscriptionsV1alphaCancelSubscriptionResponse": { +"description": "Response message for BeyondCorp.CancelSubscription", +"id": "GoogleCloudBeyondcorpSaasplatformSubscriptionsV1alphaCancelSubscriptionResponse", +"properties": { +"effectiveCancellationTime": { +"description": "Time when the cancellation will become effective", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudBeyondcorpSaasplatformSubscriptionsV1alphaListSubscriptionsResponse": { "description": "Response message for BeyondCorp.ListSubscriptions.", "id": "GoogleCloudBeyondcorpSaasplatformSubscriptionsV1alphaListSubscriptionsResponse", @@ -5963,6 +6022,10 @@ "readOnly": true, "type": "boolean" }, +"billingAccount": { +"description": "Optional. Name of the billing account in the format. e.g. billingAccounts/123456-123456-123456 Required if Subscription is of Paid type.", +"type": "string" +}, "createTime": { "description": "Output only. Create time of the subscription.", "format": "google-datetime", diff --git a/googleapiclient/discovery_cache/documents/biglake.v1.json b/googleapiclient/discovery_cache/documents/biglake.v1.json index 51dbedcf4d6..31c1ae7222a 100644 --- a/googleapiclient/discovery_cache/documents/biglake.v1.json +++ b/googleapiclient/discovery_cache/documents/biglake.v1.json @@ -616,7 +616,7 @@ } } }, -"revision": "20240422", +"revision": "20240429", "rootUrl": "https://biglake.googleapis.com/", "schemas": { "Catalog": { diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json index 7e966f27b38..cdc1782ee44 100644 --- a/googleapiclient/discovery_cache/documents/bigquery.v2.json +++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json @@ -1935,7 +1935,7 @@ } } }, -"revision": "20240418", +"revision": "20240423", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -9006,7 +9006,7 @@ "readOnly": true }, "replicationIntervalMs": { -"description": "Required. Specifies the interval at which the source table is polled for updates.", +"description": "Optional. Specifies the interval at which the source table is polled for updates. It's Optional. If not specified, default replication interval would be applied.", "format": "int64", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1.json index 1c0cd930eb4..1121c409a45 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1.json @@ -366,7 +366,7 @@ } } }, -"revision": "20240418", +"revision": "20240423", "rootUrl": "https://bigqueryconnection.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json index 0f11651e375..29bd2deee8d 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json @@ -395,7 +395,7 @@ } } }, -"revision": "20240418", +"revision": "20240423", "rootUrl": "https://bigqueryconnection.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json index 3506242bbcc..73b0c5e4850 100644 --- a/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json @@ -395,7 +395,7 @@ } } }, -"revision": "20240415", +"revision": "20240422", "rootUrl": "https://bigquerydatapolicy.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json index 354f69ede26..4ed92b063c3 100644 --- a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json @@ -1398,7 +1398,7 @@ } } }, -"revision": "20240420", +"revision": "20240427", "rootUrl": "https://bigquerydatatransfer.googleapis.com/", "schemas": { "CheckValidCredsRequest": { diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json index 8084fc8161b..d134328548a 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json @@ -880,7 +880,7 @@ } } }, -"revision": "20240423", +"revision": "20240430", "rootUrl": "https://bigqueryreservation.googleapis.com/", "schemas": { "Assignment": { diff --git a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json index 16a5906ca8e..4b22c9aaa9d 100644 --- a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json +++ b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json @@ -1998,7 +1998,7 @@ "type": "string" }, "view": { -"description": "Optional. The resource_view to be applied to the returned views' fields. Default to NAME_ONLY.", +"description": "Optional. The resource_view to be applied to the returned AuthorizedViews' fields. Default to NAME_ONLY.", "enum": [ "RESPONSE_VIEW_UNSPECIFIED", "NAME_ONLY", @@ -2194,7 +2194,7 @@ } } }, -"revision": "20240413", +"revision": "20240429", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { @@ -2747,7 +2747,7 @@ }, "originalRequest": { "$ref": "CreateAuthorizedViewRequest", -"description": "The request that prompted the initiation of this CreateInstance operation." +"description": "The request that prompted the initiation of this CreateAuthorizedView operation." }, "requestTime": { "description": "The time at which the original request was received.", @@ -3293,6 +3293,11 @@ "description": "The unique name of the instance. Values are of the form `projects/{project}/instances/a-z+[a-z0-9]`.", "type": "string" }, +"satisfiesPzi": { +"description": "Output only. Reserved for future use.", +"readOnly": true, +"type": "boolean" +}, "satisfiesPzs": { "description": "Output only. Reserved for future use.", "readOnly": true, diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json index 8fc5c9c1c04..0dc4db4884c 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json @@ -742,7 +742,7 @@ } } }, -"revision": "20240419", +"revision": "20240426", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json index d2e2cb64cb7..e642590b42a 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json @@ -551,7 +551,7 @@ } } }, -"revision": "20240419", +"revision": "20240426", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json b/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json index 5dc5c774483..86861f1edd6 100644 --- a/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json +++ b/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json @@ -487,7 +487,7 @@ } } }, -"revision": "20240417", +"revision": "20240425", "rootUrl": "https://blockchainnodeengine.googleapis.com/", "schemas": { "BlockchainNode": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json index dfe65288c25..eb4026b48c1 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v2.json +++ b/googleapiclient/discovery_cache/documents/blogger.v2.json @@ -401,7 +401,7 @@ } } }, -"revision": "20240429", +"revision": "20240501", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json index d2925e697a4..77cf6670573 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v3.json +++ b/googleapiclient/discovery_cache/documents/blogger.v3.json @@ -1710,7 +1710,7 @@ } } }, -"revision": "20240429", +"revision": "20240501", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json index 6f99923fb43..16b4541981f 100644 --- a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json +++ b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json @@ -417,7 +417,7 @@ } } }, -"revision": "20240427", +"revision": "20240504", "rootUrl": "https://businessprofileperformance.googleapis.com/", "schemas": { "DailyMetricTimeSeries": { diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json index 75e825af41f..4cca6760e9b 100644 --- a/googleapiclient/discovery_cache/documents/calendar.v3.json +++ b/googleapiclient/discovery_cache/documents/calendar.v3.json @@ -1759,7 +1759,7 @@ } } }, -"revision": "20240419", +"revision": "20240425", "rootUrl": "https://www.googleapis.com/", "schemas": { "Acl": { diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json index bd690d02d0d..09d9d1d00fb 100644 --- a/googleapiclient/discovery_cache/documents/chat.v1.json +++ b/googleapiclient/discovery_cache/documents/chat.v1.json @@ -1215,7 +1215,7 @@ } } }, -"revision": "20240423", +"revision": "20240427", "rootUrl": "https://chat.googleapis.com/", "schemas": { "AccessoryWidget": { diff --git a/googleapiclient/discovery_cache/documents/checks.v1alpha.json b/googleapiclient/discovery_cache/documents/checks.v1alpha.json index 95d9d76ac3d..6e2cdc85dbf 100644 --- a/googleapiclient/discovery_cache/documents/checks.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/checks.v1alpha.json @@ -414,7 +414,7 @@ } } }, -"revision": "20240428", +"revision": "20240505", "rootUrl": "https://checks.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json index 7ec3fae8b57..0b6dd1565f8 100644 --- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json @@ -1172,7 +1172,7 @@ } } }, -"revision": "20240428", +"revision": "20240505", "rootUrl": "https://chromemanagement.googleapis.com/", "schemas": { "GoogleChromeManagementV1AndroidAppInfo": { diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json index b351428bf1b..67f63e06f95 100644 --- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json @@ -557,7 +557,7 @@ } } }, -"revision": "20240428", +"revision": "20240505", "rootUrl": "https://chromepolicy.googleapis.com/", "schemas": { "GoogleChromePolicyVersionsV1AdditionalTargetKeyName": { diff --git a/googleapiclient/discovery_cache/documents/civicinfo.v2.json b/googleapiclient/discovery_cache/documents/civicinfo.v2.json index f2812d9625f..67df8b20adc 100644 --- a/googleapiclient/discovery_cache/documents/civicinfo.v2.json +++ b/googleapiclient/discovery_cache/documents/civicinfo.v2.json @@ -365,7 +365,7 @@ } } }, -"revision": "20240423", +"revision": "20240430", "rootUrl": "https://civicinfo.googleapis.com/", "schemas": { "AdministrationRegion": { diff --git a/googleapiclient/discovery_cache/documents/classroom.v1.json b/googleapiclient/discovery_cache/documents/classroom.v1.json index a9105ba0c46..24f4613e813 100644 --- a/googleapiclient/discovery_cache/documents/classroom.v1.json +++ b/googleapiclient/discovery_cache/documents/classroom.v1.json @@ -2400,7 +2400,7 @@ } } }, -"revision": "20240423", +"revision": "20240429", "rootUrl": "https://classroom.googleapis.com/", "schemas": { "Announcement": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1.json index e3188490a27..ac17e0744f8 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1.json @@ -1095,7 +1095,7 @@ } } }, -"revision": "20240419", +"revision": "20240504", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AccessSelector": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json index 72e925219b9..b8cdb387a23 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json @@ -411,7 +411,7 @@ } } }, -"revision": "20240419", +"revision": "20240504", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json index aba5680a1c2..f0312ffbb83 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json @@ -207,7 +207,7 @@ } } }, -"revision": "20240419", +"revision": "20240504", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json index 7b321897424..f1e7ad804fe 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json @@ -177,7 +177,7 @@ } } }, -"revision": "20240419", +"revision": "20240504", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json index d39d20d2e26..44eb5108290 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json @@ -167,7 +167,7 @@ } } }, -"revision": "20240419", +"revision": "20240504", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json index 44844f25b27..c2b59e23d21 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json @@ -2346,7 +2346,7 @@ } } }, -"revision": "20240415", +"revision": "20240427", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ApprovalConfig": { @@ -2804,6 +2804,10 @@ "readOnly": true, "type": "string" }, +"gitConfig": { +"$ref": "GitConfig", +"description": "Optional. Configuration for git operations." +}, "id": { "description": "Output only. Unique identifier of the build.", "readOnly": true, @@ -3734,6 +3738,25 @@ false }, "type": "object" }, +"DeveloperConnectConfig": { +"description": "This config defines the location of a source through Developer Connect.", +"id": "DeveloperConnectConfig", +"properties": { +"dir": { +"description": "Required. Directory, relative to the source root, in which to run the build.", +"type": "string" +}, +"gitRepositoryLink": { +"description": "Required. The Developer Connect Git repository link, formatted as `projects/*/locations/*/connections/*/gitRepositoryLink/*`.", +"type": "string" +}, +"revision": { +"description": "Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref.", +"type": "string" +} +}, +"type": "object" +}, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "Empty", @@ -3787,6 +3810,17 @@ false }, "type": "object" }, +"GitConfig": { +"description": "GitConfig is a configuration for git operations.", +"id": "GitConfig", +"properties": { +"http": { +"$ref": "HttpConfig", +"description": "Configuration for HTTP related git operations." +} +}, +"type": "object" +}, "GitFileSource": { "description": "GitFileSource describes a file within a (possibly remote) code repository.", "id": "GitFileSource", @@ -4256,6 +4290,17 @@ false }, "type": "object" }, +"HttpConfig": { +"description": "HttpConfig is a configuration for HTTP related git operations.", +"id": "HttpConfig", +"properties": { +"proxySecretVersionName": { +"description": "SecretVersion resource of the HTTP proxy URL. The proxy URL should be in format protocol://@]proxyhost[:port].", +"type": "string" +} +}, +"type": "object" +}, "InlineSecret": { "description": "Pairs a set of secret environment variables mapped to encrypted values with the Cloud KMS key to use to decrypt the value.", "id": "InlineSecret", @@ -5001,6 +5046,10 @@ false "$ref": "ConnectedRepository", "description": "Optional. If provided, get the source from this 2nd-gen Google Cloud Build repository resource." }, +"developerConnectConfig": { +"$ref": "DeveloperConnectConfig", +"description": "If provided, get the source from this Developer Connect config." +}, "gitSource": { "$ref": "GitSource", "description": "If provided, get the source from this Git repository." diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json index 97eef6e5e77..2002ca785ee 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json @@ -844,7 +844,7 @@ } } }, -"revision": "20240415", +"revision": "20240427", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "AuditConfig": { @@ -1872,13 +1872,15 @@ "RESOLVER_NAME_UNSPECIFIED", "BUNDLES", "GCB_REPO", -"GIT" +"GIT", +"DEVELOPER_CONNECT" ], "enumDescriptions": [ "Default enum type; should not be used.", "Bundles resolver. https://tekton.dev/docs/pipelines/bundle-resolver/", "GCB repo resolver.", -"Simple Git resolver. https://tekton.dev/docs/pipelines/git-resolver/" +"Simple Git resolver. https://tekton.dev/docs/pipelines/git-resolver/", +"Developer Connect resolver." ], "type": "string" } @@ -2721,6 +2723,17 @@ "description": "Name of the container specified as a DNS_LABEL.", "type": "string" }, +"params": { +"description": "Optional. Optional parameters passed to the StepAction.", +"items": { +"$ref": "Param" +}, +"type": "array" +}, +"ref": { +"$ref": "StepRef", +"description": "Optional. Optional reference to a remote StepAction." +}, "script": { "description": "The contents of an executable file to execute.", "type": "string" @@ -2748,6 +2761,42 @@ }, "type": "object" }, +"StepRef": { +"description": "A reference to a remote Step, i.e. a StepAction.", +"id": "StepRef", +"properties": { +"name": { +"description": "Optional. Name of the step.", +"type": "string" +}, +"params": { +"description": "Optional. Parameters used to control the resolution.", +"items": { +"$ref": "Param" +}, +"type": "array" +}, +"resolver": { +"description": "Optional. Type of the resolver.", +"enum": [ +"RESOLVER_NAME_UNSPECIFIED", +"BUNDLES", +"GCB_REPO", +"GIT", +"DEVELOPER_CONNECT" +], +"enumDescriptions": [ +"Default enum type; should not be used.", +"Bundles resolver. https://tekton.dev/docs/pipelines/bundle-resolver/", +"GCB repo resolver.", +"Simple Git resolver. https://tekton.dev/docs/pipelines/git-resolver/", +"Developer Connect resolver." +], +"type": "string" +} +}, +"type": "object" +}, "StepTemplate": { "description": "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", "id": "StepTemplate", @@ -2791,13 +2840,15 @@ "RESOLVER_NAME_UNSPECIFIED", "BUNDLES", "GCB_REPO", -"GIT" +"GIT", +"DEVELOPER_CONNECT" ], "enumDescriptions": [ "Default enum type; should not be used.", "Bundles resolver. https://tekton.dev/docs/pipelines/bundle-resolver/", "GCB repo resolver.", -"Simple Git resolver. https://tekton.dev/docs/pipelines/git-resolver/" +"Simple Git resolver. https://tekton.dev/docs/pipelines/git-resolver/", +"Developer Connect resolver." ], "type": "string" } @@ -2838,6 +2889,10 @@ "Object type" ], "type": "string" +}, +"value": { +"$ref": "ParamValue", +"description": "Optional. Optionally used to initialize a Task's result with a Step's result." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json index 30fd2fbf820..b39e4a44d10 100644 --- a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json @@ -2183,7 +2183,7 @@ } } }, -"revision": "20240427", +"revision": "20240505", "rootUrl": "https://cloudchannel.googleapis.com/", "schemas": { "GoogleCloudChannelV1ActivateEntitlementRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1.json b/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1.json index 52e03feba18..698713c2029 100644 --- a/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1.json @@ -482,7 +482,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://cloudcontrolspartner.googleapis.com/", "schemas": { "AccessApprovalRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1beta.json b/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1beta.json index e745755890c..46c7c7c3a9d 100644 --- a/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1beta.json +++ b/googleapiclient/discovery_cache/documents/cloudcontrolspartner.v1beta.json @@ -482,7 +482,7 @@ } } }, -"revision": "20240417", +"revision": "20240424", "rootUrl": "https://cloudcontrolspartner.googleapis.com/", "schemas": { "AccessApprovalRequest": { diff --git a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json index 038f2df1294..115589f709b 100644 --- a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json +++ b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json @@ -2065,7 +2065,7 @@ } } }, -"revision": "20240417", +"revision": "20240428", "rootUrl": "https://clouddeploy.googleapis.com/", "schemas": { "AbandonReleaseRequest": { @@ -2181,7 +2181,7 @@ "id": "AnthosCluster", "properties": { "membership": { -"description": "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", +"description": "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", "type": "string" } }, @@ -3451,7 +3451,7 @@ true "id": "GkeCluster", "properties": { "cluster": { -"description": "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", +"description": "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", "type": "string" }, "internalIp": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json index e9570108a06..f97deb3a41a 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json @@ -552,7 +552,7 @@ } } }, -"revision": "20240418", +"revision": "20240425", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json index 442082a2f92..192464fb61e 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json @@ -716,7 +716,7 @@ } } }, -"revision": "20240418", +"revision": "20240425", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AbortFunctionUpgradeRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json index c4fdefaa7bb..b8b687c9ff2 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json @@ -716,7 +716,7 @@ } } }, -"revision": "20240418", +"revision": "20240425", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AbortFunctionUpgradeRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json index a32b3280971..1dc884968c9 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json @@ -716,7 +716,7 @@ } } }, -"revision": "20240418", +"revision": "20240425", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AbortFunctionUpgradeRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json index 57fd5b69b16..e13a2eab182 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json @@ -1990,7 +1990,7 @@ } } }, -"revision": "20240423", +"revision": "20240430", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json index 8d104896934..476cafea513 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json @@ -2015,7 +2015,7 @@ } } }, -"revision": "20240423", +"revision": "20240430", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json index 3edc060caf8..8e947748add 100644 --- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json @@ -125,7 +125,100 @@ }, "protocol": "rest", "resources": { +"folders": { +"methods": { +"getAutokeyConfig": { +"description": "Returns the AutokeyConfig for a folder.", +"flatPath": "v1/folders/{foldersId}/autokeyConfig", +"httpMethod": "GET", +"id": "cloudkms.folders.getAutokeyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.", +"location": "path", +"pattern": "^folders/[^/]+/autokeyConfig$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "AutokeyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, +"updateAutokeyConfig": { +"description": "Updates the AutokeyConfig for a folder. The caller must have both `cloudkms.autokeyConfigs.update` permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` permission on the provided key project. An empty key project may be provided to clear the configuration.", +"flatPath": "v1/folders/{foldersId}/autokeyConfig", +"httpMethod": "PATCH", +"id": "cloudkms.folders.updateAutokeyConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.", +"location": "path", +"pattern": "^folders/[^/]+/autokeyConfig$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Required. Masks which fields of the AutokeyConfig to update, e.g. `keyProject`.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "AutokeyConfig" +}, +"response": { +"$ref": "AutokeyConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +} +} +}, "projects": { +"methods": { +"showEffectiveAutokeyConfig": { +"description": "Returns the effective Cloud KMS Autokey configuration for a given project.", +"flatPath": "v1/projects/{projectsId}:showEffectiveAutokeyConfig", +"httpMethod": "GET", +"id": "cloudkms.projects.showEffectiveAutokeyConfig", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. Name of the resource project to the show effective Cloud KMS Autokey configuration for. This may be helpful for interrogating the effect of nested folder configurations on a given resource project.", +"location": "path", +"pattern": "^projects/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}:showEffectiveAutokeyConfig", +"response": { +"$ref": "ShowEffectiveAutokeyConfigResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +} +}, "resources": { "locations": { "methods": { @@ -645,6 +738,101 @@ } } }, +"keyHandles": { +"methods": { +"create": { +"description": "Creates a new KeyHandle, triggering the provisioning of a new CryptoKey for CMEK use with the given resource type in the configured key project and the same location. GetOperation should be used to resolve the resulting long-running operation and get the resulting KeyHandle and CryptoKey.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyHandles", +"httpMethod": "POST", +"id": "cloudkms.projects.locations.keyHandles.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"keyHandleId": { +"description": "Optional. Id of the KeyHandle. Must be unique to the resource project and location. If not provided by the caller, a new UUID is used.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Name of the resource project and location to create the KeyHandle in, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/keyHandles", +"request": { +"$ref": "KeyHandle" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, +"get": { +"description": "Returns the KeyHandle.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyHandles/{keyHandlesId}", +"httpMethod": "GET", +"id": "cloudkms.projects.locations.keyHandles.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the KeyHandle resource, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/keyHandles/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "KeyHandle" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +}, +"list": { +"description": "Lists KeyHandles.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyHandles", +"httpMethod": "GET", +"id": "cloudkms.projects.locations.keyHandles.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. Filter to apply when listing KeyHandles, e.g. `resource_type_selector=\"{SERVICE}.googleapis.com/{TYPE}\"`.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Name of the resource project and location from which to list KeyHandles, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/keyHandles", +"response": { +"$ref": "ListKeyHandlesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +} +} +}, "keyRings": { "methods": { "create": { @@ -1832,13 +2020,43 @@ } } } +}, +"operations": { +"methods": { +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "cloudkms.projects.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/cloudkms" +] +} +} } } } } } }, -"revision": "20240418", +"revision": "20240425", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { @@ -2018,6 +2236,21 @@ }, "type": "object" }, +"AutokeyConfig": { +"description": "Cloud KMS Autokey configuration for a folder.", +"id": "AutokeyConfig", +"properties": { +"keyProject": { +"description": "Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision new CryptoKeys. On UpdateAutokeyConfig, the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on this key project. Once configured, for Cloud KMS Autokey to function properly, this key project must have the Cloud KMS API activated and the Cloud KMS Service Agent for this key project must be granted the `cloudkms.admin` role (or pertinent permissions).", +"type": "string" +}, +"name": { +"description": "Identifier. Name of the AutokeyConfig resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.", +"type": "string" +} +}, +"type": "object" +}, "Binding": { "description": "Associates `members`, or principals, with a `role`.", "id": "Binding", @@ -2238,6 +2471,7 @@ "EC_SIGN_P256_SHA256", "EC_SIGN_P384_SHA384", "EC_SIGN_SECP256K1_SHA256", +"EC_SIGN_ED25519", "HMAC_SHA256", "HMAC_SHA1", "HMAC_SHA384", @@ -2275,6 +2509,7 @@ "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", +"EdDSA on the Curve25519 in pure mode (taking data as input).", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -2442,6 +2677,7 @@ "EC_SIGN_P256_SHA256", "EC_SIGN_P384_SHA384", "EC_SIGN_SECP256K1_SHA256", +"EC_SIGN_ED25519", "HMAC_SHA256", "HMAC_SHA1", "HMAC_SHA384", @@ -2479,6 +2715,7 @@ "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", +"EdDSA on the Curve25519 in pure mode (taking data as input).", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -2861,6 +3098,7 @@ "EC_SIGN_P256_SHA256", "EC_SIGN_P384_SHA384", "EC_SIGN_SECP256K1_SHA256", +"EC_SIGN_ED25519", "HMAC_SHA256", "HMAC_SHA1", "HMAC_SHA384", @@ -2898,6 +3136,7 @@ "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", +"EdDSA on the Curve25519 in pure mode (taking data as input).", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -3031,6 +3270,27 @@ }, "type": "object" }, +"KeyHandle": { +"description": "Resource-oriented representation of a request to Cloud KMS Autokey and the resulting provisioning of a CryptoKey.", +"id": "KeyHandle", +"properties": { +"kmsKey": { +"description": "Output only. Name of a CryptoKey that has been provisioned for Customer Managed Encryption Key (CMEK) use in the KeyHandle's project and location for the requested resource type.", +"readOnly": true, +"type": "string" +}, +"name": { +"description": "Output only. Identifier. Name of the [KeyHandle] resource, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.", +"readOnly": true, +"type": "string" +}, +"resourceTypeSelector": { +"description": "Required. Indicates the resource type that the resulting CryptoKey is meant to protect, e.g. `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource types.", +"type": "string" +} +}, +"type": "object" +}, "KeyOperationAttestation": { "description": "Contains an HSM-generated attestation about a key operation. For more information, see [Verifying attestations] (https://cloud.google.com/kms/docs/attest-key).", "id": "KeyOperationAttestation", @@ -3174,6 +3434,20 @@ }, "type": "object" }, +"ListKeyHandlesResponse": { +"description": "Response message for Autokey.ListKeyHandles.", +"id": "ListKeyHandlesResponse", +"properties": { +"keyHandles": { +"description": "Resulting KeyHandles.", +"items": { +"$ref": "KeyHandle" +}, +"type": "array" +} +}, +"type": "object" +}, "ListKeyRingsResponse": { "description": "Response message for KeyManagementService.ListKeyRings.", "id": "ListKeyRingsResponse", @@ -3396,6 +3670,41 @@ }, "type": "object" }, +"Operation": { +"description": "This resource represents a long-running operation that is the result of a network API call.", +"id": "Operation", +"properties": { +"done": { +"description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", +"type": "boolean" +}, +"error": { +"$ref": "Status", +"description": "The error result of the operation in case of failure or cancellation." +}, +"metadata": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", +"type": "object" +}, +"name": { +"description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", +"type": "string" +}, +"response": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", +"type": "object" +} +}, +"type": "object" +}, "Policy": { "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", @@ -3463,6 +3772,7 @@ "EC_SIGN_P256_SHA256", "EC_SIGN_P384_SHA384", "EC_SIGN_SECP256K1_SHA256", +"EC_SIGN_ED25519", "HMAC_SHA256", "HMAC_SHA1", "HMAC_SHA384", @@ -3500,6 +3810,7 @@ "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", +"EdDSA on the Curve25519 in pure mode (taking data as input).", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -3783,6 +4094,44 @@ }, "type": "object" }, +"ShowEffectiveAutokeyConfigResponse": { +"description": "Response message for ShowEffectiveAutokeyConfig.", +"id": "ShowEffectiveAutokeyConfigResponse", +"properties": { +"keyProject": { +"description": "Name of the key project configured in the resource project's folder ancestry.", +"type": "string" +} +}, +"type": "object" +}, +"Status": { +"description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", +"id": "Status", +"properties": { +"code": { +"description": "The status code, which should be an enum value of google.rpc.Code.", +"format": "int32", +"type": "integer" +}, +"details": { +"description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", +"items": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", +"type": "string" +} +}, +"type": "object" +}, "TestIamPermissionsRequest": { "description": "Request message for `TestIamPermissions` method.", "id": "TestIamPermissionsRequest", diff --git a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json index 4a878211c92..c7903ecf66b 100644 --- a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json @@ -254,7 +254,7 @@ } } }, -"revision": "20240423", +"revision": "20240429", "rootUrl": "https://cloudprofiler.googleapis.com/", "schemas": { "CreateProfileRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json index c6fa6d9e612..34ee63c1c81 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json @@ -450,7 +450,7 @@ } } }, -"revision": "20240421", +"revision": "20240428", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudshell.v1.json b/googleapiclient/discovery_cache/documents/cloudshell.v1.json index c99a1dd26da..0cabde5b154 100644 --- a/googleapiclient/discovery_cache/documents/cloudshell.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudshell.v1.json @@ -374,7 +374,7 @@ } } }, -"revision": "20240422", +"revision": "20240429", "rootUrl": "https://cloudshell.googleapis.com/", "schemas": { "AddPublicKeyMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json index bf4b3944768..a5d0d2d6340 100644 --- a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json @@ -552,7 +552,7 @@ } } }, -"revision": "20240428", +"revision": "20240505", "rootUrl": "https://cloudsupport.googleapis.com/", "schemas": { "Actor": { diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json index 36172c04748..42f4213874f 100644 --- a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json +++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json @@ -415,7 +415,7 @@ "type": "integer" }, "pageToken": { -"description": "Optional. A token identifying the page of results to return. If unspecified, the first page is retrieved.", +"description": "Optional. A token identifying the page of results to return. If unspecified, it retrieves the first page.", "location": "query", "type": "string" }, @@ -619,7 +619,7 @@ } } }, -"revision": "20240428", +"revision": "20240505", "rootUrl": "https://cloudsupport.googleapis.com/", "schemas": { "Actor": { @@ -1128,7 +1128,7 @@ }, "bodyContent": { "$ref": "TextContent", -"description": "Output only. The full email message body in both plaintext and richtext. The plaintext field of body_content will be elided in a best-effort attempt to remove extraneous reply threads.", +"description": "Output only. The full email message body. A best-effort attempt is made to remove extraneous reply threads.", "readOnly": true }, "ccEmailAddresses": { @@ -1561,15 +1561,11 @@ "type": "object" }, "TextContent": { -"description": "Used for content on cases/comments/etc. On input either plain_text or rich_text should be supplied but not both.", +"description": "Stores text attached to a support object.", "id": "TextContent", "properties": { "plainText": { -"description": "Content in this field should be rendered and interpreted as-is. If not provided on input, then rich_text must be provided and this field will contain the plain text extracted from the rich_text input.", -"type": "string" -}, -"richText": { -"description": "Content in this field should be rendered and interpreted as HTML. If not provided on input, then plain_text must be provided and this field will contain the escaped plain text content. Only a subset of HTML tags and styles are allowed on input, all other tags will be stripped/sanitized. Output will always contain safe and valid HTML.", +"description": "Content in this field should be rendered and interpreted as-is.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json index 65be545c2be..e7d247dcb97 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v1.json @@ -257,7 +257,7 @@ } } }, -"revision": "20240419", +"revision": "20240426", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json index 7abceffbd3a..80a2001cf08 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2.json @@ -181,7 +181,7 @@ } } }, -"revision": "20240419", +"revision": "20240426", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json index 6520d9e3648..144f4b9862e 100644 --- a/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudtrace.v2beta1.json @@ -273,7 +273,7 @@ } } }, -"revision": "20240419", +"revision": "20240426", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json index 1371e037edf..23bccacacf3 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1.json @@ -938,7 +938,7 @@ } } }, -"revision": "20240415", +"revision": "20240426", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json index b3999dd52ce..0f65e7a36ae 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json @@ -994,7 +994,7 @@ } } }, -"revision": "20240415", +"revision": "20240426", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index d3226fbf3d7..e8f965bdd4e 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -41579,7 +41579,7 @@ } } }, -"revision": "20240421", +"revision": "20240430", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -42156,7 +42156,7 @@ false "type": "string" }, "publicPtrDomainName": { -"description": "The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.", +"description": "The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be created for first IP in associated external IPv6 range.", "type": "string" }, "securityPolicy": { @@ -46588,6 +46588,7 @@ false "enum": [ "ACCELERATOR_OPTIMIZED", "ACCELERATOR_OPTIMIZED_A3", +"ACCELERATOR_OPTIMIZED_A3_MEGA", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", "COMPUTE_OPTIMIZED_C3", @@ -46623,6 +46624,7 @@ false "", "", "", +"", "" ], "type": "string" @@ -52042,7 +52044,7 @@ false "type": "string" }, "requestPath": { -"description": "The request path of the HTTP/2 health check request. The default value is /.", +"description": "The request path of the HTTP/2 health check request. The default value is /. Must comply with RFC3986.", "type": "string" }, "response": { @@ -52095,7 +52097,7 @@ false "type": "string" }, "requestPath": { -"description": "The request path of the HTTP health check request. The default value is /.", +"description": "The request path of the HTTP health check request. The default value is /. Must comply with RFC3986.", "type": "string" }, "response": { @@ -52148,7 +52150,7 @@ false "type": "string" }, "requestPath": { -"description": "The request path of the HTTPS health check request. The default value is /.", +"description": "The request path of the HTTPS health check request. The default value is /. Must comply with RFC3986.", "type": "string" }, "response": { @@ -52220,7 +52222,7 @@ false "type": "string" }, "sourceRegions": { -"description": "The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of GCP regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. ", +"description": "The list of cloud regions from which health checks are performed. If any regions are specified, then exactly 3 regions should be specified. The region names must be valid names of Google Cloud regions. This can only be set for global health check. If this list is non-empty, then there are restrictions on what other health check fields are supported and what other resources can use this health check: - SSL, HTTP2, and GRPC protocols are not supported. - The TCP request field is not supported. - The proxyHeader field for HTTP, HTTPS, and TCP is not supported. - The checkIntervalSec field must be at least 30. - The health check cannot be used with BackendService nor with managed instance group auto-healing. ", "items": { "type": "string" }, @@ -71429,6 +71431,7 @@ false "TPU_LITE_PODSLICE_V5", "TPU_PODSLICE_V4", "URL_MAPS", +"VARIABLE_IPV6_PUBLIC_DELEGATED_PREFIXES", "VPN_GATEWAYS", "VPN_TUNNELS", "XPN_SERVICE_PROJECTS" @@ -71597,6 +71600,7 @@ false "", "", "", +"", "" ], "type": "string" @@ -78168,6 +78172,11 @@ false "description": "[Output Only] Server-defined URL for this resource with the resource id.", "type": "string" }, +"shortName": { +"description": "User-provided name of the organization security policy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is CLOUD_ARMOR. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"type": "string" +}, "type": { "description": "The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time.", "enum": [ @@ -78401,6 +78410,20 @@ false "description": "[Output Only] The display name of the security policy of the association.", "type": "string" }, +"excludedFolders": { +"description": "A list of folders to exclude from the security policy.", +"items": { +"type": "string" +}, +"type": "array" +}, +"excludedProjects": { +"description": "A list of projects to exclude from the security policy.", +"items": { +"type": "string" +}, +"type": "array" +}, "name": { "description": "The name for an association.", "type": "string" @@ -78408,6 +78431,10 @@ false "securityPolicyId": { "description": "[Output Only] The security policy ID of the association.", "type": "string" +}, +"shortName": { +"description": "[Output Only] The short name of the security policy of the association.", +"type": "string" } }, "type": "object" @@ -80611,7 +80638,7 @@ false "additionalProperties": { "$ref": "SnapshotSettingsStorageLocationSettingsStorageLocationPreference" }, -"description": "When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are GCS bucket locations.", +"description": "When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are Cloud Storage bucket locations. Only one location can be specified.", "type": "object" }, "policy": { @@ -80624,7 +80651,7 @@ false ], "enumDescriptions": [ "Store snapshot in the same region as with the originating disk. No additional parameters are needed.", -"Store snapshot to the nearest multi region GCS bucket, relative to the originating disk. No additional parameters are needed.", +"Store snapshot in the nearest multi region Cloud Storage bucket, relative to the originating disk. No additional parameters are needed.", "Store snapshot in the specific locations, as specified by the user. The list of regions to store must be defined under the `locations` field.", "" ], @@ -80638,7 +80665,7 @@ false "id": "SnapshotSettingsStorageLocationSettingsStorageLocationPreference", "properties": { "name": { -"description": "Name of the location. It should be one of the GCS buckets.", +"description": "Name of the location. It should be one of the Cloud Storage buckets. Only one location can be specified.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/config.v1.json b/googleapiclient/discovery_cache/documents/config.v1.json index 1d1ca998aa4..a810a1b97cc 100644 --- a/googleapiclient/discovery_cache/documents/config.v1.json +++ b/googleapiclient/discovery_cache/documents/config.v1.json @@ -1187,7 +1187,7 @@ } } }, -"revision": "20240410", +"revision": "20240424", "rootUrl": "https://config.googleapis.com/", "schemas": { "ApplyResults": { @@ -1308,7 +1308,7 @@ "additionalProperties": { "type": "string" }, -"description": "Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations.", +"description": "Optional. Arbitrary key-value metadata storage e.g. to help client tools identify deployments during automation. See https://google.aip.dev/148#annotations for details on format and size limitations.", "type": "object" }, "artifactsGcsBucket": { @@ -1996,6 +1996,13 @@ "description": "A preview represents a set of actions Infra Manager would perform to move the resources towards the desired state as specified in the configuration.", "id": "Preview", "properties": { +"annotations": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. Arbitrary key-value metadata storage e.g. to help client tools identifiy preview during automation. See https://google.aip.dev/148#annotations for details on format and size limitations.", +"type": "object" +}, "artifactsGcsBucket": { "description": "Optional. User-defined location of Cloud Build logs, artifacts, and in Google Cloud Storage. Format: `gs://{bucket}/{folder}` A default bucket will be bootstrapped if the field is not set or empty Default Bucket Format: `gs://--blueprint-config` Constraints: - The bucket needs to be in the same project as the deployment - The path cannot be within the path of `gcs_source` If omitted and deployment resource ref provided has artifacts_gcs_bucket defined, that artifact bucket is used.", "type": "string" @@ -2124,6 +2131,15 @@ "readOnly": true, "type": "array" }, +"tfVersion": { +"description": "Output only. The current Terraform version set on the preview. It is in the format of \"Major.Minor.Patch\", for example, \"1.3.10\".", +"readOnly": true, +"type": "string" +}, +"tfVersionConstraint": { +"description": "Optional. The user-specified Terraform version constraint. Example: \"=1.3.10\".", +"type": "string" +}, "workerPool": { "description": "Optional. The user-specified Worker Pool resource in which the Cloud Build job will execute. Format projects/{project}/locations/{location}/workerPools/{workerPoolId} If this field is unspecified, the default Cloud Build worker pool will be used. If omitted and deployment resource ref provided has worker_pool defined, that worker pool is used.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json b/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json index 3a23e1104ec..ad26495f75b 100644 --- a/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json +++ b/googleapiclient/discovery_cache/documents/contactcenterinsights.v1.json @@ -1473,7 +1473,7 @@ } } }, -"revision": "20240420", +"revision": "20240503", "rootUrl": "https://contactcenterinsights.googleapis.com/", "schemas": { "GoogleCloudContactcenterinsightsV1AgentCoachingInstruction": { diff --git a/googleapiclient/discovery_cache/documents/content.v2.1.json b/googleapiclient/discovery_cache/documents/content.v2.1.json index 026c79228b1..07907e00f49 100644 --- a/googleapiclient/discovery_cache/documents/content.v2.1.json +++ b/googleapiclient/discovery_cache/documents/content.v2.1.json @@ -6219,7 +6219,7 @@ } } }, -"revision": "20240428", +"revision": "20240502", "rootUrl": "https://shoppingcontent.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json b/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json index aa814a3af2a..b45cfdb622c 100644 --- a/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json +++ b/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json @@ -1156,149328 +1156,4450 @@ } } }, -"revision": "20240412", +"revision": "20240506", "rootUrl": "https://contentwarehouse.googleapis.com/", "schemas": { -"AbuseiamAbuseType": { -"id": "AbuseiamAbuseType", +"CloudAiPlatformTenantresourceCloudSqlInstanceConfig": { +"description": "The identity to configure a CloudSQL instance provisioned via SLM Terraform.", +"id": "CloudAiPlatformTenantresourceCloudSqlInstanceConfig", "properties": { -"id": { -"enum": [ -"NONE", -"OTHER_ABUSE", -"CHILD_PORN", -"PORNOGRAPHY", -"SPAM", -"PHISHING", -"HATE", -"TOS_OTHER", -"MALWARE", -"MALICIOUS_JAVASCRIPT", -"NOT_FAMILY_SAFE", -"IMPERSONATION", -"PEDOPHILIA", -"PERSONAL_INFO", -"COPYRIGHT", -"HIGH_RISK", -"VIOLENCE", -"UNSAFE_RACY", -"UNSAFE_OTHER", -"FAKE_USER", -"NAME_VIOLATION", -"PLUSONE_VIOLATION", -"DEFAMATION", -"TRADEMARK", -"COURT_ORDER", -"GOVERNMENT_ORDER", -"LOCAL_LAWS", -"PRIVACY", -"ES_BLACKLIST", -"ES_COMMENTS_BLACKLIST", -"HARASSMENT", -"COMPROMISED", -"LOW_QUALITY", -"API_VIOLATION", -"REGULATED", -"CAROUSEL_FRAME_BLACKLIST", -"QUOTA_EXCEEDED", -"FOUNTAIN_BLACKLIST", -"COPPA_REGULATED", -"DOXXING", -"SOFT_HATE", -"SOFT_HARASSMENT", -"OBNOXIOUS", -"UNWANTED", -"NOT_UNICORN_SAFE", -"FAKE_ENGAGEMENT", -"COUNTERFEIT", -"CTM" -], -"enumDescriptions": [ -"no abuse", -"generic abuse that isn't enumerated here", -"illegal child porn images", -"", -"", -"", -"", -"To be only used for Multi-Image Review.", -"MALWARE includes viruses, worms, trojans, adware and even potentially unwanted software which may not necessarily be harmful.", -"", -"DEPRECATED - For safe searches, etc.", -"", -"Not illegal, but image or text content", -"promoting or related to young pornography Personally identifiable information", -"", -"High risk reviews for any client.", -"", -"Content that is unsafe because it is sexually", -"suggestive/racy Content that is unsafe for other reasons than", -"being sexually suggestive/racy. Category for users who are not real", -"User's name that violates policy.", -"Violates PLUSONE policy.", -"Defamation claims", -"Trademark violations where Google could be", -"liable Third-party court orders", -"Government request, regardless of reason", -"Other misc country-specific laws", -"Local privacy laws", -"Internal type to check es content against", -"the ocelot blocklist. Internal type.", -"Consistent harassing behaviour directed", -"towards a person. Hijacked or Compromised profile.", -"Low quality/Annoying content.", -"Violates APIARY policy.", -"Contains regulated products and services, such as pharmaceuticals, alcohol, tobacco, etc. For details, https://sites.google.com/a/google.com/crt-policy-site/regulated", -"Internal type.", -"Quota exceeded for the action.", -"Internal type.", -"Contains content regulated by COPPA.", -"Revealing personal/protected information.", -"Pattern of behavior verdicts. Used to aggregate verdicts that aren't typically bad enough for one of the above verdicts, but something we want to keep track of in cases of rotten behavior. Hate without incitement", -"Harassment that does meet the harm bar.", -"Obnoxious, Lousy behavior, not against TOS.", -"Content targeted at a person who considered", -"Not safe for kids.", -"Engagements that are not organic.", -"Promotion of counterfeit product claims.", -"Circumvention of Technological measures" -], +"cloudSqlInstanceConnectionName": { +"description": "Output only. The CloudSQL instance connection name.", "type": "string" }, -"subtype": { -"description": "Optional client specific subtype of abuse that is too specific to belong in the above enumeration. For example, some client may want to differentiate nudity from graphic sex, but both are PORNOGRAPHY.", +"cloudSqlInstanceName": { +"description": "Input/Output [Optional]. The CloudSQL instance name within SLM instance. If not set, a random UUIC will be generated as instance name.", "type": "string" -} -}, -"type": "object" -}, -"AbuseiamAgeRestriction": { -"id": "AbuseiamAgeRestriction", -"properties": { -"ageYears": { -"description": "This restriction applies if the user is between [min_age_years, age_years) years old.", -"format": "int32", -"type": "integer" -}, -"minAgeYears": { -"format": "int32", -"type": "integer" -} }, -"type": "object" +"kmsKeyReference": { +"description": "Input [Optional]. The KMS key name or the KMS grant name used for CMEK encryption. Only set this field when provisioning new CloudSQL instances. For existing CloudSQL instances, this field will be ignored because CMEK re-encryption is not supported.", +"type": "string" }, -"AbuseiamAndRestriction": { -"id": "AbuseiamAndRestriction", -"properties": { -"child": { -"description": "This restriction applies if all of the children apply.", +"mdbRolesForCorpAccess": { +"description": "Input [Optional]. MDB roles for corp access to CloudSQL instance.", "items": { -"$ref": "AbuseiamUserRestriction" +"type": "string" }, "type": "array" -} }, -"type": "object" -}, -"AbuseiamClient": { -"description": "A client is be a Google product, or subproduct that provides content for AbuseIAm to classify.", -"id": "AbuseiamClient", -"properties": { -"id": { -"enum": [ -"ABUSE_TEAM", -"SEARCH", -"GROUPS", -"PROFILES", -"WRITELY", -"EXPLORER", -"URL_SHORTENER", -"USENET", -"PHOTOS", -"ANDROID_MARKET", -"ES", -"BABEL", -"STRATUS", -"GMAIL", -"GJOBS" -], -"enumDescriptions": [ -"", -"Web Search.", -"", -"", -"Being superseded by KIX soon.", -"Blobs stored by explorer.", -"", -"", -"", -"", -"Emerald Sea.", -"", -"Stratus uses the corpus name as subservice.", -"Gmail.", -"Garage, or G+ Jobs." -], +"slmInstanceName": { +"description": "Output only. The SLM instance's full resource name.", "type": "string" }, -"subservice": { -"description": "The name of the subservice within a client. This subservice can be used to affect the flow of decision script, or selection of backend classifiers. For example, StreetView may want to specify a panel is insufficiently blurred (maybe there is a lisense plate or public sex, etc), which requires manual review then the subservice might be \"blurring\".", +"slmInstanceTemplate": { +"description": "Input [Required]. The SLM instance template to provision CloudSQL.", "type": "string" -} -}, -"type": "object" }, -"AbuseiamConstantRestriction": { -"id": "AbuseiamConstantRestriction", -"properties": { -"type": { -"description": "A constant of type TRUE always applies, and of type FALSE never applies.", -"enum": [ -"ALWAYS_TRUE", -"ALWAYS_FALSE" -], -"enumDescriptions": [ -"", -"" -], +"slmInstanceType": { +"description": "Input [Required]. The SLM instance type to provision CloudSQL.", "type": "string" } }, "type": "object" }, -"AbuseiamContentRestriction": { -"description": "Pair of Verdicts used for ProjectR age/geo gating. See http://go/projectr for more information.", -"id": "AbuseiamContentRestriction", +"CloudAiPlatformTenantresourceGcsBucketConfig": { +"description": "The identity to configure a GCS bucket.", +"id": "CloudAiPlatformTenantresourceGcsBucketConfig", "properties": { -"adminVerdict": { -"description": "Takedowns specified by admins via AbuseIAm", +"admins": { "items": { -"$ref": "AbuseiamVerdict" +"type": "string" }, "type": "array" }, -"userVerdict": { -"description": "User-specified takedowns", +"bucketName": { +"description": "Input/Output [Optional]. The name of a GCS bucket with max length of 63 chars. If not set, a random UUID will be generated as bucket name.", +"type": "string" +}, +"entityName": { +"description": "Input/Output [Optional]. Only needed for per-entity tenant GCP resources. During Deprovision API, the on-demand deletion will only cover the tenant GCP resources with the specified entity name.", +"type": "string" +}, +"kmsKeyReference": { +"description": "Input/Output [Optional]. The KMS key name or the KMS grant name used for CMEK encryption. Only set this field when provisioning new GCS bucket. For existing GCS bucket, this field will be ignored because CMEK re-encryption is not supported.", +"type": "string" +}, +"ttlDays": { +"description": "Input/Output [Optional]. Only needed when the content in bucket need to be garbage collected within some amount of days.", +"format": "int32", +"type": "integer" +}, +"viewers": { +"description": "Input/Output [Required]. IAM roles (viewer/admin) put on the bucket.", "items": { -"$ref": "AbuseiamVerdict" +"type": "string" }, "type": "array" } }, "type": "object" }, -"AbuseiamEvaluation": { -"description": "Backends return Evaluations to AbuseIAm. One of the things Evaluations are used for is to explain Verdicts.", -"id": "AbuseiamEvaluation", +"CloudAiPlatformTenantresourceIamPolicyBinding": { +"description": "The dynamic IAM bindings to be granted after tenant projects are created.", +"id": "CloudAiPlatformTenantresourceIamPolicyBinding", "properties": { -"abuseType": { -"$ref": "AbuseiamAbuseType" +"members": { +"description": "Input/Output [Required]. The member service accounts with the roles above. Note: placeholders are same as the resource above.", +"items": { +"type": "string" }, -"backend": { -"description": "Who creates this Evaluation. This field is required.", +"type": "array" +}, +"resource": { +"description": "Input/Output [Required]. The resource name that will be accessed by members, which also depends on resource_type. Note: placeholders are supported in resource names. For example, ${tpn} will be used when the tenant project number is not ready.", +"type": "string" +}, +"resourceType": { +"description": "Input/Output [Required]. Specifies the type of resource that will be accessed by members.", "enum": [ -"UNKNOWN", -"ADMIN", -"OCELOT", -"SPAMIAM", -"MANUAL_REVIEW", -"PORN_CLASSIFIER", -"GIBBERISH_DETECTOR", -"BADWORD", -"IMAGE_PORN_CLASSIFIER", -"FIFE_IMAGE_FETCHER", -"RULE", -"FOCUS", -"PATTERNLIST", -"WIGGUM", -"BINARY_EXPLORATION", -"QUOTASERVER", -"YOUTUBE", -"POLICY", -"PHOTO_SERVICE", -"ARES" +"RESOURCE_TYPE_UNSPECIFIED", +"PROJECT", +"SERVICE_ACCOUNT", +"GCS_BUCKET", +"SERVICE_CONSUMER", +"AR_REPO" ], "enumDescriptions": [ "", -"Eval directly from the AbuseIAm server.", -"", -"", -"", -"Text-based porn classifier.", -"Language model-based gibberish detector.", -"Deprecated. Please use PATTERNLIST instead.", -"", -"", -"For miscellaneous rules in decision scripts", -"that are not separate backends.", -"Check texts against a list of regex patterns.", -"Does search-by-image using Wiggum Server.", -"Slow backend. See http://go/bineval.", -"Quota server adapter.", -"Video classification from youtube.", -"For evaluation generated according to policy.", -"Adapter that queries Photo Service.", -"For evaluations generated by Ares." +"The value of resource field is the ID or number of a project. Format is ", +"The value of resource field is the resource name of a service account. Format is projects//serviceAccounts/", +"The value of resource field is the name of a GCS bucket (not its resource name). Format is .", +"The value of resource field is the resource name of a service consumer. Format is services//consumers/", +"The value of the resource field is the AR Image Uri which identifies an AR REPO. Allowed formats are: /// ///: ///@" ], "type": "string" }, -"comment": { -"description": "Backends can choose to put some debug info in addition to abuse_type, score, and status.", +"role": { +"description": "Input/Output [Required]. The role for members below.", "type": "string" +} }, -"miscData": { -"description": "This field is used to store miscellaneous information that Backend might provide. If you find youself here considering to use this field, please prefer using the repeated feature field below instead. It supports a richer structure for passing complex data back from the backend.", -"items": { -"$ref": "AbuseiamNameValuePair" +"type": "object" }, -"type": "array" +"CloudAiPlatformTenantresourceInfraSpannerConfig": { +"description": "The configuration for a spanner database provisioning. Next ID: 8", +"id": "CloudAiPlatformTenantresourceInfraSpannerConfig", +"properties": { +"createDatabaseOptions": { +"$ref": "CloudAiPlatformTenantresourceInfraSpannerConfigCreateDatabaseOptions", +"description": "Input [Optional]. The options to create a spanner database. Note: give the right options to ensure the right KMS key access audit logging and AxT logging in expected logging category." }, -"processedMicros": { -"description": "When the evaluation was processed by the decision script.", -"format": "int64", +"kmsKeyReference": { +"description": "Input [Optional]. The KMS key name or the KMS grant name used for CMEK encryption. Only set this field when provisioning new Infra Spanner databases. For existing Infra Spanner databases, this field will be ignored because CMEK re-encryption is not supported. For example, projects//locations//keyRings//cryptoKeys/", "type": "string" }, -"region": { -"description": "The list of regions where the evaluation applies.", -"items": { -"$ref": "AbuseiamRegion" -}, -"type": "array" -}, -"score": { -"format": "double", -"type": "number" -}, -"status": { -"enum": [ -"OK", -"ERROR" -], -"enumDescriptions": [ -"", -"The Backend failed to process the target." -], +"sdlBundlePath": { +"description": "Input [Required]. The file path to the spanner SDL bundle.", "type": "string" }, -"target": { -"$ref": "AbuseiamTarget" +"spannerBorgServiceAccount": { +"description": "Input [Optional]. The spanner borg service account for delegating the kms key to. For example, spanner-infra-cmek-nonprod@system.gserviceaccount.com, for the nonprod universe.", +"type": "string" }, -"timestampMicros": { -"description": "When the Evaluation was generated.", -"format": "int64", +"spannerLocalNamePrefix": { "type": "string" }, -"userRestriction": { -"$ref": "AbuseiamUserRestriction", -"description": "A boolean expression tree used to define the restrictions where the verdict applies. Please use java/com/google/ccc/abuse/abuseiam/client/TakedownManager.java to evaluate this proto." +"spannerNamespace": { +"type": "string" }, -"version": { -"description": "Version of Backend. For rules, this string is the only way to differentiate between them.", +"spannerUniverse": { +"description": "Input [Required]. Every database in Spanner can be identified by the following path name: /span//:", "type": "string" } }, "type": "object" }, -"AbuseiamGeoRestriction": { -"description": "A node representing a table of regions and restrictions that apply to those regions. This table understands region inclusion and knows to apply the most specific rule, for example, a rule for France would override a rule for the EU for a user in France.", -"id": "AbuseiamGeoRestriction", +"CloudAiPlatformTenantresourceInfraSpannerConfigCreateDatabaseOptions": { +"description": "The options to create a spanner database. KMS key access audit logging and AxT logging will be associated with the given resource name, resource type and service name. Please ensure to give right options to enable correct audit logging and AxT logging.", +"id": "CloudAiPlatformTenantresourceInfraSpannerConfigCreateDatabaseOptions", "properties": { -"locale": { -"items": { -"$ref": "AbuseiamGeoRestrictionLocale" +"cmekCloudResourceName": { +"description": "The cloud resource name for the CMEK encryption. For example, projects//locations/", +"type": "string" }, -"type": "array" +"cmekCloudResourceType": { +"description": "The cloud resource type for the CMEK encryption. For example, contentwarehouse.googleapis.com/Location", +"type": "string" +}, +"cmekServiceName": { +"description": "The service name for the CMEK encryption. For example, contentwarehouse.googleapis.com", +"type": "string" } }, "type": "object" }, -"AbuseiamGeoRestrictionLocale": { -"id": "AbuseiamGeoRestrictionLocale", +"CloudAiPlatformTenantresourceServiceAccountIdentity": { +"description": "The identity to configure a service account.", +"id": "CloudAiPlatformTenantresourceServiceAccountIdentity", "properties": { -"location": { -"description": "The location where the restriction applies. Defaults to the \"The world\". See go/iii.", +"serviceAccountEmail": { +"description": "Output only. The service account email that has been created.", "type": "string" }, -"restriction": { -"$ref": "AbuseiamUserRestriction", -"description": "The UserRestriction that applies to this location. If not specified evaluates to true." +"tag": { +"description": "Input/Output [Optional]. The tag that configures the service account, as defined in google3/configs/production/cdpush/acl-zanzibar-cloud-prod/activation_grants/activation_grants.gcl. Note: The default P4 service account has the empty tag.", +"type": "string" } }, "type": "object" }, -"AbuseiamHash": { -"description": "Information about various hashes that can be computed on a message ex: simhash, attachment hash, etc", -"id": "AbuseiamHash", +"CloudAiPlatformTenantresourceTenantProjectConfig": { +"description": "The identity to configure a tenant project.", +"id": "CloudAiPlatformTenantresourceTenantProjectConfig", "properties": { -"hash": { -"description": "64 bit hash in the hex form.", +"billingConfig": { +"$ref": "GoogleApiServiceconsumermanagementV1BillingConfig", +"description": "Input/Output [Required]. The billing account properties to create the tenant project." +}, +"folder": { +"description": "Input/Output [Required]. The folder that holds tenant projects and folder-level permissions will be automatically granted to all tenant projects under the folder. Note: the valid folder format is `folders/{folder_number}`.", "type": "string" }, -"type": { +"policyBindings": { +"description": "Input/Output [Required]. The policy bindings that are applied to the tenant project during creation. At least one binding must have the role `roles/owner` with either `user` or `group` type.", +"items": { +"$ref": "GoogleApiServiceconsumermanagementV1PolicyBinding" +}, +"type": "array" +}, +"services": { +"description": "Input/Output [Required]. The API services that are enabled on the tenant project during creation.", +"items": { "type": "string" +}, +"type": "array" } }, "type": "object" }, -"AbuseiamNameValuePair": { -"id": "AbuseiamNameValuePair", +"CloudAiPlatformTenantresourceTenantProjectResource": { +"description": "The tenant project and tenant resources. Next ID: 10", +"id": "CloudAiPlatformTenantresourceTenantProjectResource", "properties": { -"name": { -"type": "string" +"cloudSqlInstances": { +"description": "The CloudSQL instances that are provisioned under the tenant project.", +"items": { +"$ref": "CloudAiPlatformTenantresourceCloudSqlInstanceConfig" }, -"nonUtf8Value": { -"format": "byte", +"type": "array" +}, +"gcsBuckets": { +"description": "The GCS buckets that are provisioned under the tenant project.", +"items": { +"$ref": "CloudAiPlatformTenantresourceGcsBucketConfig" +}, +"type": "array" +}, +"iamPolicyBindings": { +"description": "The dynamic IAM bindings that are granted under the tenant project. Note: this should only add new bindings to the project if they don't exist and the existing bindings won't be affected.", +"items": { +"$ref": "CloudAiPlatformTenantresourceIamPolicyBinding" +}, +"type": "array" +}, +"infraSpannerConfigs": { +"description": "The Infra Spanner databases that are provisioned under the tenant project. Note: this is an experimental feature.", +"items": { +"$ref": "CloudAiPlatformTenantresourceInfraSpannerConfig" +}, +"type": "array" +}, +"tag": { +"description": "Input/Output [Required]. The tag that uniquely identifies a tenant project within a tenancy unit. Note: for the same tenant project tag, all tenant manager operations should be idempotent.", "type": "string" }, -"value": { +"tenantProjectConfig": { +"$ref": "CloudAiPlatformTenantresourceTenantProjectConfig", +"description": "The configurations of a tenant project." +}, +"tenantProjectId": { +"description": "Output only. The tenant project ID that has been created.", "type": "string" -} }, -"type": "object" +"tenantProjectNumber": { +"description": "Output only. The tenant project number that has been created.", +"format": "int64", +"type": "string" }, -"AbuseiamNotRestriction": { -"id": "AbuseiamNotRestriction", -"properties": { -"child": { -"description": "This restriction applies if the child does not apply. Only one is allowed. \"repeated\" is used to avoid breaking Sawzall (See b/6758277).", +"tenantServiceAccounts": { +"description": "The service account identities (or enabled API service's P4SA) that are expclicitly created under the tenant project (before JIT provisioning during enabled API services).", "items": { -"$ref": "AbuseiamUserRestriction" +"$ref": "CloudAiPlatformTenantresourceTenantServiceAccountIdentity" }, "type": "array" } }, "type": "object" }, -"AbuseiamOrRestriction": { -"id": "AbuseiamOrRestriction", +"CloudAiPlatformTenantresourceTenantResource": { +"description": "A collection of tenant resources.", +"id": "CloudAiPlatformTenantresourceTenantResource", "properties": { -"child": { -"description": "This restriction applies if any of the children apply.", +"p4ServiceAccounts": { +"description": "A list of P4 service accounts (go/p4sa) to provision or deprovision.", +"items": { +"$ref": "CloudAiPlatformTenantresourceServiceAccountIdentity" +}, +"type": "array" +}, +"tenantProjectResources": { +"description": "A list of tenant projects and tenant resources to provision or deprovision.", "items": { -"$ref": "AbuseiamUserRestriction" +"$ref": "CloudAiPlatformTenantresourceTenantProjectResource" }, "type": "array" } }, "type": "object" }, -"AbuseiamRegion": { -"id": "AbuseiamRegion", +"CloudAiPlatformTenantresourceTenantServiceAccountIdentity": { +"description": "The identity of service accounts that have been explicitly created under tenant projects.", +"id": "CloudAiPlatformTenantresourceTenantServiceAccountIdentity", "properties": { -"region": { -"description": "This is a CLDR Region Code: http://wiki/Main/IIIHowTo#using_region It is used to denote the region affected by a verdict.", +"serviceAccountEmail": { +"description": "Output only. The email address of the generated service account.", +"type": "string" +}, +"serviceName": { +"description": "Input/Output [Required]. The service that the service account belongs to. (e.g. cloudbuild.googleapis.com for GCB service accounts)", "type": "string" } }, "type": "object" }, -"AbuseiamSpecialRestriction": { -"description": "A SpecialRestriction is a standardized UserRestriction which lives in a table maintained via CDD.", -"id": "AbuseiamSpecialRestriction", +"GoogleApiServiceconsumermanagementV1BillingConfig": { +"description": "Describes the billing configuration for a new tenant project.", +"id": "GoogleApiServiceconsumermanagementV1BillingConfig", "properties": { -"type": { -"enum": [ -"ALCOHOL" -], -"enumDescriptions": [ -"" -], +"billingAccount": { +"description": "Name of the billing account. For example `billingAccounts/012345-567890-ABCDEF`.", "type": "string" } }, "type": "object" }, -"AbuseiamTarget": { -"id": "AbuseiamTarget", +"GoogleApiServiceconsumermanagementV1PolicyBinding": { +"description": "Translates to IAM Policy bindings (without auditing at this level)", +"id": "GoogleApiServiceconsumermanagementV1PolicyBinding", "properties": { -"id": { +"members": { +"description": "Uses the same format as in IAM policy. `member` must include both a prefix and ID. For example, `user:{emailId}`, `serviceAccount:{emailId}`, `group:{emailId}`.", +"items": { "type": "string" }, -"type": { -"enum": [ -"MESSAGE_ID", -"CHUNK_ID", -"IMAGE_URL", -"URL", -"USER_ID", -"IP", -"SITE", -"SITEDOMAIN", -"ENTITY_ID", -"PERFECT_STREAM_ID", -"ACTIVITY_ID", -"COMMENT_ID", -"AD_ID", -"TEXT", -"TEXT_FRAGMENT", -"CLUSTER_MEMBER_ID", -"EMBED_ID", -"ANDROID_ID" -], -"enumDescriptions": [ -"verdict is issued on the whole message", -"verdict is issued on the specified chunk", -"target id is an image url inside chunk content", -"target id is a non-image url inside chunk content", -"abuse happens at gaia user level", -"target id is an IP address", -"target id is a sitechunk", -"target id is a domain", -"The 4 entries below are *only* for the +1 pipeline. Please do not use elsewhere. entity id is a generic Zipit ID", -"Obsolete.", -"activity id in ASBE for ES", -"comment id for ASBE comments in ES", -"ad id for ES", -"text target. Used in Ocelot name reputation.", -"text fragment. Ocelot for name reputation.", -"Targets for Cluster reviews. See http://go/cluff-design-doc. Message ID for the targeted cluster member", -"embed_id for ASBE embeds in ES", -"android_id of the device" -], +"type": "array" +}, +"role": { +"description": "Role. (https://cloud.google.com/iam/docs/understanding-roles) For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", "type": "string" } }, "type": "object" }, -"AbuseiamUserNotification": { -"description": "A structure used to configure a notification to a user.", -"id": "AbuseiamUserNotification", +"GoogleCloudContentwarehouseV1AccessControlAction": { +"description": "Represents the action responsible for access control list management operations.", +"id": "GoogleCloudContentwarehouseV1AccessControlAction", "properties": { -"channel": { +"operationType": { +"description": "Identifies the type of operation.", "enum": [ "UNKNOWN", -"BUILT_IN", -"EMAIL", -"GAIA" +"ADD_POLICY_BINDING", +"REMOVE_POLICY_BINDING", +"REPLACE_POLICY_BINDING" ], "enumDescriptions": [ -"", -"Use product built-in notification, such as pop-ups.", -"Email user, e.g., through ANS(http://sites/cat/ans).", -"Show notifications at Gaia level, such as during login." +"The unknown operation type.", +"Adds newly given policy bindings in the existing bindings list.", +"Removes newly given policy bindings from the existing bindings list.", +"Replaces existing policy bindings with the given policy binding list" ], "type": "string" +}, +"policy": { +"$ref": "GoogleIamV1Policy", +"description": "Represents the new policy from which bindings are added, removed or replaced based on the type of the operation. the policy is limited to a few 10s of KB." } }, "type": "object" }, -"AbuseiamUserRestriction": { -"description": "Describes restrictions on where the verdict applies. Please use {@code TakedownManager} to evaluate this proto.", -"id": "AbuseiamUserRestriction", +"GoogleCloudContentwarehouseV1Action": { +"description": "Represents the action triggered by Rule Engine when the rule is true.", +"id": "GoogleCloudContentwarehouseV1Action", "properties": { -"ageRestriction": { -"$ref": "AbuseiamAgeRestriction" +"accessControl": { +"$ref": "GoogleCloudContentwarehouseV1AccessControlAction", +"description": "Action triggering access control operations." +}, +"actionId": { +"description": "ID of the action. Managed internally.", +"type": "string" }, -"andRestriction": { -"$ref": "AbuseiamAndRestriction", -"description": "Operators" +"addToFolder": { +"$ref": "GoogleCloudContentwarehouseV1AddToFolderAction", +"description": "Action triggering create document link operation." }, -"constantRestriction": { -"$ref": "AbuseiamConstantRestriction", -"description": "Constant" +"dataUpdate": { +"$ref": "GoogleCloudContentwarehouseV1DataUpdateAction", +"description": "Action triggering data update operations." }, -"geoRestriction": { -"$ref": "AbuseiamGeoRestriction", -"description": "Leaf Nodes" +"dataValidation": { +"$ref": "GoogleCloudContentwarehouseV1DataValidationAction", +"description": "Action triggering data validation operations." }, -"notRestriction": { -"$ref": "AbuseiamNotRestriction" +"deleteDocumentAction": { +"$ref": "GoogleCloudContentwarehouseV1DeleteDocumentAction", +"description": "Action deleting the document." }, -"orRestriction": { -"$ref": "AbuseiamOrRestriction" +"publishToPubSub": { +"$ref": "GoogleCloudContentwarehouseV1PublishAction", +"description": "Action publish to Pub/Sub operation." }, -"specialRestriction": { -"$ref": "AbuseiamSpecialRestriction" +"removeFromFolderAction": { +"$ref": "GoogleCloudContentwarehouseV1RemoveFromFolderAction", +"description": "Action removing a document from a folder." } }, "type": "object" }, -"AbuseiamVerdict": { -"description": "Verdict against a target. AbuseIAm generates a verdict based on evaluations. AbuseIAm can send such verdicts to clients for enforcement.", -"id": "AbuseiamVerdict", +"GoogleCloudContentwarehouseV1ActionExecutorOutput": { +"description": "Represents the output of the Action Executor.", +"id": "GoogleCloudContentwarehouseV1ActionExecutorOutput", "properties": { -"client": { -"$ref": "AbuseiamClient", -"description": "Target client of the verdict. It can be used to differentiate verdicts from multiple clients when such verdicts are processed in one common place." -}, -"comment": { -"description": "Additional info regarding the verdict.", -"type": "string" -}, -"decision": { -"enum": [ -"ERROR", -"NO_ACTION", -"GOOD", -"DELETE", -"INTERSTITIAL", -"HIDE", -"BLACK_LIST", -"MARK_AS_SPAM", -"REWRITE_LINKS", -"HIDE_AND_NOTIFY", -"FREEZE_SERVICE", -"SUSPEND_SERVICE", -"SMS_DISABLE", -"NOTIFY" -], -"enumDescriptions": [ -"", -"The content is okay and no action needs to be taken.", -"To mark content/user as good.", -"", -"Give a user an interstial message. E.g., via a pop-up or an email.", -"", -"The product should black list this content itself.", -"", -"There's something bad about the links, so we recommend that the links be 'scrubbed' to go through the GWS redirector to protect against malware and phishing: http://wiki.corp.google.com/twiki/bin/view/Main/SafeRedirector", -"HIDE_AND_NOTIFY is used when the target needs to be hidden, but the UI needs to show an appropriate error message.", -"Freeze a user on service level. E.g., a frozen user can not generate new content.", -"Suspend a user on service level.", -"SMS-disable a user on Gaia level.", -"Notify a user (e.g., warn a user)." -], -"type": "string" -}, -"durationMins": { -"description": "Time duration (in minutes) of the verdict.", -"format": "int32", -"type": "integer" -}, -"evaluation": { -"description": "Evaluations relevant to this verdict. Every Verdict should contain at least one Evaluation.", -"items": { -"$ref": "AbuseiamEvaluation" -}, -"type": "array" -}, -"hashes": { -"description": "Details of all the hashes that can be computed on a message, such as simhash and attachment hash", +"ruleActionsPairs": { +"description": "List of rule and corresponding actions result.", "items": { -"$ref": "AbuseiamHash" +"$ref": "GoogleCloudContentwarehouseV1RuleActionsPair" }, "type": "array" +} }, -"isLegalIssued": { -"description": "Is this verdict issued by legal?", -"type": "boolean" -}, -"miscScores": { -"description": "This field is used to pass relevant / necessary scores to our clients. For eg: ASBE propagates these scores to moonshine.", -"items": { -"$ref": "AbuseiamNameValuePair" -}, -"type": "array" +"type": "object" }, -"reasonCode": { -"description": "A short description of the reason why the verdict decision is made.", +"GoogleCloudContentwarehouseV1ActionOutput": { +"description": "Represents the result of executing an action.", +"id": "GoogleCloudContentwarehouseV1ActionOutput", +"properties": { +"actionId": { +"description": "ID of the action.", "type": "string" }, -"region": { -"description": "The regions in which this verdict should be enforced. Absence of this field indicates that the verdict is applicable everywhere.", -"items": { -"$ref": "AbuseiamRegion" -}, -"type": "array" -}, -"restriction": { -"description": "Restrictions on where this verdict applies. If any restriction is met, the verdict is applied there. If no restrictions are present, the verdict is considered global.", -"items": { -"$ref": "AbuseiamVerdictRestriction" -}, -"type": "array" -}, -"strikeCategory": { -"description": "Category of the strike if this is a strike verdict.", +"actionState": { +"description": "State of an action.", "enum": [ -"ES", -"CP", -"COPYRIGHT", -"BLOGGER", -"GRANDCENTRAL", -"DRIVE", -"CLOUD", -"SITES" +"UNKNOWN", +"ACTION_SUCCEEDED", +"ACTION_FAILED", +"ACTION_TIMED_OUT", +"ACTION_PENDING" ], "enumDescriptions": [ -"General Emerald Sea.", -"Child porn.", -"Copyright violations.", -"Blogger.", -"Google Voice.", -"Drive.", -"Cloud platform.", -"Google Sites." +"The unknown state.", +"State indicating action executed successfully.", +"State indicating action failed.", +"State indicating action timed out.", +"State indicating action is pending." ], "type": "string" }, -"target": { -"$ref": "AbuseiamTarget" -}, -"targetTimestampMicros": { -"description": "The timestamp of the target. E.g., the time when the target was updated.", -"format": "int64", +"outputMessage": { +"description": "Action execution output message.", "type": "string" +} }, -"timestampMicros": { -"description": "When the verdict is generated", -"format": "int64", -"type": "string" +"type": "object" }, -"userNotification": { -"description": "Extra notification(s) to be delivered to target user or message owner about the verdict.", +"GoogleCloudContentwarehouseV1AddToFolderAction": { +"description": "Represents the action responsible for adding document under a folder.", +"id": "GoogleCloudContentwarehouseV1AddToFolderAction", +"properties": { +"folders": { +"description": "Names of the folder under which new document is to be added. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", "items": { -"$ref": "AbuseiamUserNotification" +"type": "string" }, "type": "array" -}, -"version": { -"description": "version of decision script", -"type": "string" } }, "type": "object" }, -"AbuseiamVerdictRestriction": { -"description": "Describes restrictions on where the verdict applies.", -"id": "AbuseiamVerdictRestriction", +"GoogleCloudContentwarehouseV1CloudAIDocumentOption": { +"description": "Request Option for processing Cloud AI Document in CW Document.", +"id": "GoogleCloudContentwarehouseV1CloudAIDocumentOption", "properties": { -"context": { -"description": "For a restriction to apply, all contexts must be satisfied. For example, if context[0] is COUNTRY/'GERMANY' and context[1] is DESTINATION_STREAM/'gplus:SQUARE:knitting_discussion', then the verdict applies only when the 'knitting discussion' square is viewed from inside Germany. Please note that this is present for legacy reasons and users of this field would be migrated to use the user_restriction field defined below.", -"items": { -"$ref": "AbuseiamVerdictRestrictionContext" +"customizedEntitiesPropertiesConversions": { +"additionalProperties": { +"type": "string" }, -"type": "array" +"description": "If set, only selected entities will be converted to properties.", +"type": "object" }, -"userRestriction": { -"$ref": "AbuseiamUserRestriction", -"description": "A boolean expression tree used to define the restrictions where the verdict applies. Please use java/com/google/ccc/abuse/abuseiam/client/TakedownManager.java to evaluate this proto." +"enableEntitiesConversions": { +"description": "Whether to convert all the entities to properties.", +"type": "boolean" } }, "type": "object" }, -"AbuseiamVerdictRestrictionContext": { -"description": "Describes a dimension of a context where a verdict applies.", -"id": "AbuseiamVerdictRestrictionContext", +"GoogleCloudContentwarehouseV1CreateDocumentLinkRequest": { +"description": "Request message for DocumentLinkService.CreateDocumentLink.", +"id": "GoogleCloudContentwarehouseV1CreateDocumentLinkRequest", "properties": { -"id": { -"description": "String identifying the context.", -"type": "string" +"documentLink": { +"$ref": "GoogleCloudContentwarehouseV1DocumentLink", +"description": "Required. Document links associated with the source documents (source_document_id)." }, -"type": { -"enum": [ -"UNKNOWN", -"DESTINATION_STREAM" -], -"enumDescriptions": [ -"Dummy default value. Avoid using it explicitly as a Type.", -"DESTINATION_STREAM is used when a verdict only applies to content in a particular indexed stream/channel. Examples include Squares, Blogs, and Youtube videos." -], -"type": "string" +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the document creator, used to enforce access control for the service." } }, "type": "object" }, -"AdsShoppingReportingOffersSerializedSoriId": { -"description": "The serialized form of a SORI id. NOTE that this proto is stored in V4/O4 index and that new fields should not be added without getting an agreement from the serving team as well.", -"id": "AdsShoppingReportingOffersSerializedSoriId", +"GoogleCloudContentwarehouseV1CreateDocumentMetadata": { +"description": "Metadata object for CreateDocument request (currently empty).", +"id": "GoogleCloudContentwarehouseV1CreateDocumentMetadata", +"properties": {}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1CreateDocumentRequest": { +"description": "Request message for DocumentService.CreateDocument.", +"id": "GoogleCloudContentwarehouseV1CreateDocumentRequest", "properties": { -"highId": { -"format": "uint64", -"type": "string" +"cloudAiDocumentOption": { +"$ref": "GoogleCloudContentwarehouseV1CloudAIDocumentOption", +"description": "Request Option for processing Cloud AI Document in Document Warehouse. This field offers limited support for mapping entities from Cloud AI Document to Warehouse Document. Please consult with product team before using this field and other available options." }, -"lowId1": { -"format": "uint64", +"createMask": { +"description": "Field mask for creating Document fields. If mask path is empty, it means all fields are masked. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", +"format": "google-fieldmask", "type": "string" }, -"lowId2": { -"format": "uint64", -"type": "string" +"document": { +"$ref": "GoogleCloudContentwarehouseV1Document", +"description": "Required. The document to create." +}, +"policy": { +"$ref": "GoogleIamV1Policy", +"description": "Default document policy during creation. This refers to an Identity and Access (IAM) policy, which specifies access controls for the Document. Conditions defined in the policy will be ignored." +}, +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the end user, used to enforce access control for the service." } }, "type": "object" }, -"Anchors": { -"id": "Anchors", +"GoogleCloudContentwarehouseV1CreateDocumentResponse": { +"description": "Response message for DocumentService.CreateDocument.", +"id": "GoogleCloudContentwarehouseV1CreateDocumentResponse", "properties": { -"anchor": { +"document": { +"$ref": "GoogleCloudContentwarehouseV1Document", +"description": "Document created after executing create request." +}, +"longRunningOperations": { +"description": "post-processing LROs", "items": { -"$ref": "AnchorsAnchor" +"$ref": "GoogleLongrunningOperation" }, "type": "array" }, -"homepageAnchorsDropped": { -"description": "The total # of local homepage anchors dropped in AnchorAccumulator.", -"format": "int64", -"type": "string" -}, -"indexTier": { -"description": "The index tier from which the anchors were extracted. Note that this is only valid in the anchor record written by linkextractor. The value can be one of the enum values defined in segindexer/types.h.", -"format": "int32", -"type": "integer" -}, -"localAnchorsDropped": { -"description": "The total # of local non-homepage anchors dropped in AnchorAccumulator.", -"format": "int64", -"type": "string" +"metadata": { +"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", +"description": "Additional information for the API invocation, such as the request tracking id." }, -"nonlocalAnchorsDropped": { -"description": "The total # of non-local anchors dropped in AnchorAccumulator.", -"format": "int64", -"type": "string" +"ruleEngineOutput": { +"$ref": "GoogleCloudContentwarehouseV1RuleEngineOutput", +"description": "Output from Rule Engine recording the rule evaluator and action executor's output. Refer format in: google/cloud/contentwarehouse/v1/rule_engine.proto" +} }, -"redundantAnchorsDropped": { -"description": "The *_anchors_dropped fields below are not populated by Alexandria, which uses cdoc.anchor_stats instead. The total # of redundant anchors dropped in linkextractor.", -"format": "int64", -"type": "string" +"type": "object" }, -"redundantanchorinfo": { +"GoogleCloudContentwarehouseV1CustomWeightsMetadata": { +"description": "To support the custom weighting across document schemas.", +"id": "GoogleCloudContentwarehouseV1CustomWeightsMetadata", +"properties": { +"weightedSchemaProperties": { +"description": "List of schema and property name. Allows a maximum of 10 schemas to be specified for relevance boosting.", "items": { -"$ref": "AnchorsRedundantAnchorInfo" +"$ref": "GoogleCloudContentwarehouseV1WeightedSchemaProperty" }, "type": "array" +} }, -"supplementalAnchorsDropped": { -"description": "The total # of supplemental anchors dropped in AnchorAccumulator. ## DEPRECATED.", -"format": "int64", -"type": "string" -}, -"targetDocid": { -"description": "may be implicit", -"format": "uint64", -"type": "string" +"type": "object" }, -"targetSite": { -"description": "HOST_LEVEL site chunking.", +"GoogleCloudContentwarehouseV1DataUpdateAction": { +"description": "Represents the action responsible for properties update operations.", +"id": "GoogleCloudContentwarehouseV1DataUpdateAction", +"properties": { +"entries": { +"additionalProperties": { "type": "string" }, -"targetUrl": { -"description": "This is produced during link extraction but not written out in the linklogs in order to save space.", -"type": "string" +"description": "Map of (K, V) -> (valid name of the field, new value of the field) E.g., (\"age\", \"60\") entry triggers update of field age with a value of 60. If the field is not present then new entry is added. During update action execution, value strings will be casted to appropriate types.", +"type": "object" } }, "type": "object" }, -"AnchorsAnchor": { -"id": "AnchorsAnchor", +"GoogleCloudContentwarehouseV1DataValidationAction": { +"description": "Represents the action responsible for data validation operations.", +"id": "GoogleCloudContentwarehouseV1DataValidationAction", "properties": { -"bucket": { -"format": "int32", -"type": "integer" +"conditions": { +"additionalProperties": { +"type": "string" }, -"catfishTags": { -"description": "CATfish tags attached to a link. These are similar to link tags, except the values are created on the fly within Cookbook. See: http://sites/cookbook/exporting/indexing", -"items": { -"format": "int32", -"type": "integer" +"description": "Map of (K, V) -> (field, string condition to be evaluated on the field) E.g., (\"age\", \"age > 18 && age < 60\") entry triggers validation of field age with the given condition. Map entries will be ANDed during validation.", +"type": "object" +} }, -"type": "array" +"type": "object" }, -"compressedImageUrls": { -"description": "If the anchor contained images, these image urls are stored here in compressed form.", +"GoogleCloudContentwarehouseV1DateTimeArray": { +"description": "DateTime values.", +"id": "GoogleCloudContentwarehouseV1DateTimeArray", +"properties": { +"values": { +"description": "List of datetime values. Both OffsetDateTime and ZonedDateTime are supported.", "items": { -"format": "byte", -"type": "string" +"$ref": "GoogleTypeDateTime" }, "type": "array" +} }, -"compressedOriginalTargetUrl": { -"description": "The anchor's original target url, compressed. Available only in Alexandria docjoins when the anchor is forwarded.", -"format": "byte", -"type": "string" -}, -"context": { -"format": "int32", -"type": "integer" -}, -"context2": { -"description": "This is a hash of terms near the anchor. (This is a second-generation hash replacing the value stored in the 'context' field.)", -"format": "int32", -"type": "integer" +"type": "object" }, -"creationDate": { -"description": "used for history - the first and last time we have seen this anchor. creation_date also used for Freshdocs Twitter indexing, a retweet is an anchor of the original tweet. This field records the time when a retweet is created.", -"format": "int32", -"type": "integer" +"GoogleCloudContentwarehouseV1DateTimeTypeOptions": { +"description": "Configurations for a date time property.", +"id": "GoogleCloudContentwarehouseV1DateTimeTypeOptions", +"properties": {}, +"type": "object" }, -"deleted": { +"GoogleCloudContentwarehouseV1DeleteDocumentAction": { +"description": "Represents the action responsible for deleting the document.", +"id": "GoogleCloudContentwarehouseV1DeleteDocumentAction", +"properties": { +"enableHardDelete": { +"description": "Boolean field to select between hard vs soft delete options. Set 'true' for 'hard delete' and 'false' for 'soft delete'.", "type": "boolean" +} }, -"deletionDate": { -"format": "int32", -"type": "integer" +"type": "object" }, -"demotionreason": { -"description": "DEPRECATED", -"format": "int32", -"type": "integer" +"GoogleCloudContentwarehouseV1DeleteDocumentLinkRequest": { +"description": "Request message for DocumentLinkService.DeleteDocumentLink.", +"id": "GoogleCloudContentwarehouseV1DeleteDocumentLinkRequest", +"properties": { +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the document creator, used to enforce access control for the service." +} }, -"encodedNewsAnchorData": { -"description": "Encoded data containing information about newsiness of anchor. Populated only if anchor is classified as coming from a newsy, high quality site. Encoded data for anchor sources are being stored in googledata/quality/freshness/news_anchors/encoded_news_anchors_data.txt Scores are being computed with quality/freshness/news_anchors/ routines.", -"format": "uint32", -"type": "integer" +"type": "object" }, -"experimental": { -"description": "If true, the anchor is for experimental purposes and should not be used in serving.", -"type": "boolean" +"GoogleCloudContentwarehouseV1DeleteDocumentRequest": { +"description": "Request message for DocumentService.DeleteDocument.", +"id": "GoogleCloudContentwarehouseV1DeleteDocumentRequest", +"properties": { +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the end user, used to enforce access control for the service." +} }, -"expired": { -"description": "true iff exp domain", -"type": "boolean" +"type": "object" }, -"firstseenDate": { -"description": "# days past Dec 31, 1994, 23:00:00 UTC (Unix time @788914800) that this link was first seen. Should never occupy more than 15 bits. NOTE: this is NOT the same as creation_date; firstseen_date is filled during link extraction", -"format": "int32", -"type": "integer" +"GoogleCloudContentwarehouseV1Document": { +"description": "Defines the structure for content warehouse document proto.", +"id": "GoogleCloudContentwarehouseV1Document", +"properties": { +"cloudAiDocument": { +"$ref": "GoogleCloudDocumentaiV1Document", +"description": "Document AI format to save the structured content, including OCR." }, -"firstseenNearCreation": { -"description": "true if we think 'firstseen_date' is an accurate estimate of when the link was actually added to the source page. false if it may have existed for some time before we saw it.", -"type": "boolean" +"contentCategory": { +"description": "Indicates the category (image, audio, video etc.) of the original content.", +"enum": [ +"CONTENT_CATEGORY_UNSPECIFIED", +"CONTENT_CATEGORY_IMAGE", +"CONTENT_CATEGORY_AUDIO", +"CONTENT_CATEGORY_VIDEO" +], +"enumDescriptions": [ +"No category is specified.", +"Content is of image type.", +"Content is of audio type.", +"Content is of video type." +], +"type": "string" }, -"fontsize": { -"format": "int32", -"type": "integer" +"createTime": { +"description": "Output only. The time when the document is created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" }, -"forwardingTypes": { -"description": "How the anchor is forwarded to the canonical, available only for forwarded anchors (i.e., the field is set). The forwarding types are defined in URLForwardingUtil (segindexer/segment-indexer-util.h). Always use URLForwardingUtil to access this field and use URLForwardingUtil::GetAnchorForwardingReason to get the explanation how the anchor is forwarded to the canonical. NOTE: Use with caution as it is only set for docjoins generated using the urlmap from repository/updater.", -"format": "uint32", -"type": "integer" +"creator": { +"description": "The user who creates the document.", +"type": "string" }, -"fragment": { -"description": "The URL fragment for this anchor (the foo in http://www.google.com#foo)", +"displayName": { +"description": "Required. Display name of the document given by the user. This name will be displayed in the UI. Customer can populate this field with the name of the document. This differs from the 'title' field as 'title' is optional and stores the top heading in the document.", "type": "string" }, -"fullLeftContext": { -"description": "The full context. These are not written out in the linklogs.", -"items": { -"format": "uint64", +"displayUri": { +"description": "Uri to display the document, for example, in the UI.", "type": "string" }, -"type": "array" +"dispositionTime": { +"description": "Output only. If linked to a Collection with RetentionPolicy, the date when the document becomes mutable.", +"format": "google-datetime", +"readOnly": true, +"type": "string" }, -"fullRightContext": { -"items": { -"format": "uint64", +"documentSchemaName": { +"description": "The Document schema name. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", "type": "string" }, -"type": "array" +"inlineRawDocument": { +"description": "Raw document content.", +"format": "byte", +"type": "string" }, -"isLocal": { -"description": "The bit ~roughly~ indicates whether an anchor's source and target pages are on the same domain. Note: this plays no role in determining whether an anchor is onsite, ondomain, or offdomain in mustang (i.e., the bit above).", +"legalHold": { +"description": "Output only. Indicates if the document has a legal hold on it.", +"readOnly": true, "type": "boolean" }, -"lastUpdateTimestamp": { -"description": "Used for history and freshness tracking - the timestamp this anchor is updated in indexing.", -"format": "int32", -"type": "integer" +"name": { +"description": "The resource name of the document. Format: projects/{project_number}/locations/{location}/documents/{document_id}. The name is ignored when creating a document.", +"type": "string" }, -"linkAdditionalInfo": { -"$ref": "Proto2BridgeMessageSet", -"description": "Additional information related to the anchor, such as additional anchor text or scores." +"plainText": { +"description": "Other document format, such as PPTX, XLXS", +"type": "string" }, -"linkTags": { -"description": "Contains info on link type, source page, etc.", +"properties": { +"description": "List of values that are user supplied metadata.", "items": { -"format": "int32", -"type": "integer" +"$ref": "GoogleCloudContentwarehouseV1Property" }, "type": "array" }, -"locality": { -"description": "For ranking purposes, the quality of an anchor is measured by its \"locality\" and \"bucket\". See quality/anchors/definitions.h for more information.", -"format": "int32", -"type": "integer" -}, -"offset": { -"description": "This is the offset for the first term in the anchor - it can be used as a unique ID for the anchor within the document and compared against all per-tag data. This is measured in bytes from the start of the document. We write this out to the linklogs to recover the original order of links after source/target forwarding. This is necessary for computing the global related data.", -"format": "int32", -"type": "integer" -}, -"origText": { -"description": "Original text, including capitalization and punctuation. Runs of whitespace are collapsed into a single space.", +"rawDocumentFileType": { +"description": "This is used when DocAI was not used to load the document and parsing/ extracting is needed for the inline_raw_document. For example, if inline_raw_document is the byte representation of a PDF file, then this should be set to: RAW_DOCUMENT_FILE_TYPE_PDF.", +"enum": [ +"RAW_DOCUMENT_FILE_TYPE_UNSPECIFIED", +"RAW_DOCUMENT_FILE_TYPE_PDF", +"RAW_DOCUMENT_FILE_TYPE_DOCX", +"RAW_DOCUMENT_FILE_TYPE_XLSX", +"RAW_DOCUMENT_FILE_TYPE_PPTX", +"RAW_DOCUMENT_FILE_TYPE_TEXT", +"RAW_DOCUMENT_FILE_TYPE_TIFF" +], +"enumDescriptions": [ +"No raw document specified or it is non-parsable", +"Adobe PDF format", +"Microsoft Word format", +"Microsoft Excel format", +"Microsoft Powerpoint format", +"UTF-8 encoded text format", +"TIFF or TIF image file format" +], "type": "string" }, -"originalTargetDocid": { -"description": "The docid of the anchor's original target. This field is available if and only if the anchor is forwarded.", -"format": "uint64", +"rawDocumentPath": { +"description": "Raw document file in Cloud Storage path.", "type": "string" }, -"pagerankWeight": { -"description": "Weight to be stored in linkmaps for pageranker", -"format": "float", -"type": "number" +"referenceId": { +"description": "The reference ID set by customers. Must be unique per project and location.", +"type": "string" }, -"parallelLinks": { -"description": "The number of additional links from the same source page to the same target domain. Not populated if is_local is true.", -"format": "int32", -"type": "integer" +"textExtractionDisabled": { +"deprecated": true, +"description": "If true, text extraction will not be performed.", +"type": "boolean" }, -"possiblyOldFirstseenDate": { -"description": "DEPRECATED. It used to be set if firstseen_date is not set. It's to indicate that the anchor is possibly old, but we don't have enough information to tell until the linkage map is updated. TODO(hxu) rename it to possibly_old_firstseen_date_DEPRECATED after clean up other dependencies.", +"textExtractionEnabled": { +"description": "If true, text extraction will be performed.", "type": "boolean" }, -"setiPagerankWeight": { -"description": "TEMPORARY", -"format": "float", -"type": "number" +"title": { +"description": "Title that describes the document. This can be the top heading or text that describes the document.", +"type": "string" }, -"source": { -"$ref": "AnchorsAnchorSource" +"updateTime": { +"description": "Output only. The time when the document is last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" }, -"sourceType": { -"description": " is to record the quality of the anchor's source page and is correlated with but not identical to the index tier of the source page. In the docjoins built by the indexing pipeline (Alexandria), - Anchors marked TYPE_HIGH_QUALITY are from base documents. - Anchors marked TYPE_MEDIUM_QUALITY are from documents of medium quality (roughly but not exactly supplemental tier documents). - Anchors marked TYPE_LOW_QUALITY are from documents of low quality (roughly but not exactly blackhole documents). Note that the source_type can also be used as an importance indicator of an anchor (a lower source_type value indicates a more important anchor), so it is important to enforce that TYPE_HIGH_QUALITY < TYPE_MEDIUM_QUALITY < TYPE_LOW_QUALITY To add a new source type in future, please maintain the proper relationship among the types as well. TYPE_FRESHDOCS, only available in freshdocs indexing, is a special case and is considered the same type as TYPE_HIGH_QUALITY for the purpose of anchor importance in duplicate anchor removal.", -"format": "int32", -"type": "integer" +"updater": { +"description": "The user who lastly updates the document.", +"type": "string" +} }, -"targetUrlEncoding": { -"description": "A given target URL may be found in different encodings in different documents. We store the URL encoding with each source anchor so that we can count them later to find the encoding most likely to be expected by the Web site. Around 0.7% of target URLs are expected to require a non-default value here. The default value 0 is referenced in C++ as webutil::kDefaultUrlEncoding. See also webutil/urlencoding.", -"format": "int32", -"type": "integer" +"type": "object" }, -"text": { -"description": "Space-delimited anchor words. Text that needs segmentation (like CJK or Thai) is unsegmented, since we set FLAGS_segment_during_lexing to false in mr-linkextractor.cc .", +"GoogleCloudContentwarehouseV1DocumentLink": { +"description": "A document-link between source and target document.", +"id": "GoogleCloudContentwarehouseV1DocumentLink", +"properties": { +"createTime": { +"description": "Output only. The time when the documentLink is created.", +"format": "google-datetime", +"readOnly": true, "type": "string" }, -"timestamp": { -"description": "This field is DEPRECATED and no longer filled. For source page crawl timestamp, use Source.crawl_timestamp. Next tag id should be 62.", -"format": "int64", +"description": { +"description": "Description of this document-link.", "type": "string" }, -"type": { -"description": "DEPRECATED: Now in link_tags", -"format": "int32", -"type": "integer" +"name": { +"description": "Name of this document-link. It is required that the parent derived form the name to be consistent with the source document reference. Otherwise an exception will be thrown. Format: projects/{project_number}/locations/{location}/documents/{source_document_id}/documentLinks/{document_link_id}.", +"type": "string" }, -"weight": { -"description": "weights are 0-127", -"format": "int32", -"type": "integer" +"sourceDocumentReference": { +"$ref": "GoogleCloudContentwarehouseV1DocumentReference", +"description": "Document references of the source document." +}, +"state": { +"description": "The state of the documentlink. If target node has been deleted, the link is marked as invalid. Removing a source node will result in removal of all associated links.", +"enum": [ +"STATE_UNSPECIFIED", +"ACTIVE", +"SOFT_DELETED" +], +"enumDescriptions": [ +"Unknown state of documentlink.", +"The documentlink has both source and target documents detected.", +"Target document is deleted, and mark the documentlink as soft-deleted." +], +"type": "string" +}, +"targetDocumentReference": { +"$ref": "GoogleCloudContentwarehouseV1DocumentReference", +"description": "Document references of the target document." +}, +"updateTime": { +"description": "Output only. The time when the documentLink is last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" }, -"AnchorsAnchorSource": { -"description": "attributes of the source document for the link", -"id": "AnchorsAnchorSource", +"GoogleCloudContentwarehouseV1DocumentQuery": { +"id": "GoogleCloudContentwarehouseV1DocumentQuery", "properties": { -"additionalInfo": { -"$ref": "Proto2BridgeMessageSet", -"description": "Additional information related to the source, such as news hub info." +"customPropertyFilter": { +"deprecated": true, +"description": "This filter specifies a structured syntax to match against the [PropertyDefinition].is_filterable marked as `true`. The syntax for this expression is a subset of SQL syntax. Supported operators are: `=`, `!=`, `<`, `<=`, `>`, and `>=` where the left of the operator is a property name and the right of the operator is a number or a quoted string. You must escape backslash (\\\\) and quote (\\\") characters. Supported functions are `LOWER([property_name])` to perform a case insensitive match and `EMPTY([property_name])` to filter on the existence of a key. Boolean expressions (AND/OR/NOT) are supported up to 3 levels of nesting (for example, \"((A AND B AND C) OR NOT D) AND E\"), a maximum of 100 comparisons or functions are allowed in the expression. The expression must be < 6000 bytes in length. Sample Query: `(LOWER(driving_license)=\"class \\\"a\\\"\" OR EMPTY(driving_license)) AND driving_years > 10`", +"type": "string" }, -"cluster": { -"description": "anchor++ cluster id", -"format": "int32", -"type": "integer" +"customWeightsMetadata": { +"$ref": "GoogleCloudContentwarehouseV1CustomWeightsMetadata", +"description": "To support the custom weighting across document schemas, customers need to provide the properties to be used to boost the ranking in the search request. For a search query with CustomWeightsMetadata specified, only the RetrievalImportance for the properties in the CustomWeightsMetadata will be honored." }, -"compressedUrl": { -"description": "compressed source url", -"format": "byte", +"documentCreatorFilter": { +"description": "The exact creator(s) of the documents to search against. If a value isn't specified, documents within the search results are associated with any creator. If multiple values are specified, documents within the search results may be associated with any of the specified creators.", +"items": { "type": "string" }, -"crawlTimestamp": { -"description": "Source page crawl timestamp.", -"format": "int64", -"type": "string" +"type": "array" }, -"docid": { -"description": "The docid field used to be \"required\", but it is now \"optional\" because it is not present when anchors are stored in webtable. When anchors are stored as part of docjoin files in the segment indexer, however, docid should be considered required.", -"format": "uint64", +"documentNameFilter": { +"description": "Search the documents in the list. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", +"items": { "type": "string" }, -"doclength": { -"description": "necessary for anything?", -"format": "int32", -"type": "integer" -}, -"homePageInfo": { -"description": "Information about if the source page is a home page. It can be one of the enum values defined in PerDocData::HomePageInfo (NOT_HOMEPAGE, NOT_TRUSTED, PARTIALLY_TRUSTED, and FULLY_TRUSTED).", -"format": "int32", -"type": "integer" +"type": "array" }, -"indyrank": { -"description": "uint16 scale", -"format": "int32", -"type": "integer" +"documentSchemaNames": { +"description": "This filter specifies the exact document schema Document.document_schema_name of the documents to search against. If a value isn't specified, documents within the search results are associated with any schema. If multiple values are specified, documents within the search results may be associated with any of the specified schemas. At most 20 document schema names are allowed.", +"items": { +"type": "string" }, -"ipaddr": { -"description": "DEPRECATED, use packed_ipaddress", -"format": "int32", -"type": "integer" +"type": "array" }, -"language": { -"description": "default -> English", -"format": "int32", -"type": "integer" +"fileTypeFilter": { +"$ref": "GoogleCloudContentwarehouseV1FileTypeFilter", +"description": "This filter specifies the types of files to return: ALL, FOLDER, or FILE. If FOLDER or FILE is specified, then only either folders or files will be returned, respectively. If ALL is specified, both folders and files will be returned. If no value is specified, ALL files will be returned." }, -"linkhash": { -"description": "0 -> no hash", -"format": "uint64", +"folderNameFilter": { +"description": "Search all the documents under this specified folder. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", "type": "string" }, -"localCountryCodes": { -"description": "Countries to which the source page is local/most relevant; stored as III identifiers for country/region codes (see http://go/iii).", +"isNlQuery": { +"description": "Experimental, do not use. If the query is a natural language question. False by default. If true, then the question-answering feature will be used instead of search, and `result_count` in SearchDocumentsRequest must be set. In addition, all other input fields related to search (pagination, histograms, etc.) will be ignored.", +"type": "boolean" +}, +"propertyFilter": { +"description": "This filter specifies a structured syntax to match against the PropertyDefinition.is_filterable marked as `true`. The relationship between the PropertyFilters is OR.", "items": { -"format": "int32", -"type": "integer" +"$ref": "GoogleCloudContentwarehouseV1PropertyFilter" }, "type": "array" }, -"nsr": { -"description": "This NSR value has range [0,1000] and is the original value [0.0,1.0] multiplied by 1000 rounded to an integer.", -"format": "uint32", -"type": "integer" -}, -"outdegree": { -"format": "int32", -"type": "integer" -}, -"outsites": { -"description": "approx num of pointed-to sites", -"format": "int32", -"type": "integer" +"query": { +"description": "The query string that matches against the full text of the document and the searchable properties. The query partially supports [Google AIP style syntax](https://google.aip.dev/160). Specifically, the query supports literals, logical operators, negation operators, comparison operators, and functions. Literals: A bare literal value (examples: \"42\", \"Hugo\") is a value to be matched against. It searches over the full text of the document and the searchable properties. Logical operators: \"AND\", \"and\", \"OR\", and \"or\" are binary logical operators (example: \"engineer OR developer\"). Negation operators: \"NOT\" and \"!\" are negation operators (example: \"NOT software\"). Comparison operators: support the binary comparison operators =, !=, <, >, <= and >= for string, numeric, enum, boolean. Also support like operator `~~` for string. It provides semantic search functionality by parsing, stemming and doing synonyms expansion against the input query. To specify a property in the query, the left hand side expression in the comparison must be the property ID including the parent. The right hand side must be literals. For example: \"\\\"projects/123/locations/us\\\".property_a < 1\" matches results whose \"property_a\" is less than 1 in project 123 and us location. The literals and comparison expression can be connected in a single query (example: \"software engineer \\\"projects/123/locations/us\\\".salary > 100\"). Functions: supported functions are `LOWER([property_name])` to perform a case insensitive match and `EMPTY([property_name])` to filter on the existence of a key. Support nested expressions connected using parenthesis and logical operators. The default logical operators is `AND` if there is no operators between expressions. The query can be used with other filters e.g. `time_filters` and `folder_name_filter`. They are connected with `AND` operator under the hood. The maximum number of allowed characters is 255.", +"type": "string" }, -"packedIpaddress": { -"description": "string in IPAddress::ToPackedString() format.", -"format": "byte", +"queryContext": { +"description": "For custom synonyms. Customers provide the synonyms based on context. One customer can provide multiple set of synonyms based on different context. The search query will be expanded based on the custom synonyms of the query context set. By default, no custom synonyms wll be applied if no query context is provided. It is not supported for CMEK compliant deployment.", +"items": { "type": "string" }, -"pageTags": { -"description": "Page tags are described by enum PageTag in PerDocData. Page tags are used in anchors to identify properties of the linking page. These are DEPRECATED: in the future, use link_tags instead. DEPRECATED", +"type": "array" +}, +"timeFilters": { +"description": "Documents created/updated within a range specified by this filter are searched against.", "items": { -"format": "int32", -"type": "integer" +"$ref": "GoogleCloudContentwarehouseV1TimeFilter" }, "type": "array" +} }, -"pagerank": { -"description": "uint16 scale", -"format": "int32", -"type": "integer" +"type": "object" }, -"pagerankNs": { -"description": "unit16 scale", -"format": "int32", -"type": "integer" +"GoogleCloudContentwarehouseV1DocumentReference": { +"description": "References to the documents.", +"id": "GoogleCloudContentwarehouseV1DocumentReference", +"properties": { +"createTime": { +"description": "Output only. The time when the document is created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" }, -"seglanguage": { -"description": "DEPRECATED", -"format": "int32", -"type": "integer" +"deleteTime": { +"description": "Output only. The time when the document is deleted.", +"format": "google-datetime", +"readOnly": true, +"type": "string" }, -"site": { +"displayName": { +"description": "display_name of the referenced document; this name does not need to be consistent to the display_name in the Document proto, depending on the ACL constraint.", "type": "string" }, -"spamrank": { -"description": "uint16 scale", -"format": "int32", -"type": "integer" +"documentIsFolder": { +"description": "The document type of the document being referenced.", +"type": "boolean" }, -"spamscore1": { -"description": "deprecated, to be removed after October 20. 0-127 scale", -"format": "int32", -"type": "integer" +"documentIsLegalHoldFolder": { +"description": "Document is a folder with legal hold.", +"type": "boolean" }, -"spamscore2": { -"description": "0-127 scale", -"format": "int32", -"type": "integer" +"documentIsRetentionFolder": { +"description": "Document is a folder with retention policy.", +"type": "boolean" +}, +"documentName": { +"description": "Required. Name of the referenced document.", +"type": "string" +}, +"snippet": { +"description": "Stores the subset of the referenced document's content. This is useful to allow user peek the information of the referenced document.", +"type": "string" }, -"webtableKey": { -"description": "Webtable key of source", +"updateTime": { +"description": "Output only. The time when the document is last updated.", +"format": "google-datetime", +"readOnly": true, "type": "string" } }, "type": "object" }, -"AnchorsRedundantAnchorInfo": { -"description": "NOTE: in docjoins, the following anchor sampling information is only ## available in the first record of a document (under the same docid). The total number of redundant anchors dropped per (domain, text) in linkextractor. If we receive a large number of anchors from a particular domain, then we'll throw out all but a sampling of them from that domain. The data is sorted by the (domain,text) pairs. This field is not populated by Alexandria, which uses cdoc.anchor_stats instead.", -"id": "AnchorsRedundantAnchorInfo", +"GoogleCloudContentwarehouseV1DocumentSchema": { +"description": "A document schema used to define document structure.", +"id": "GoogleCloudContentwarehouseV1DocumentSchema", "properties": { -"anchorsDropped": { -"format": "int64", +"createTime": { +"description": "Output only. The time when the document schema is created.", +"format": "google-datetime", +"readOnly": true, "type": "string" }, -"domain": { +"description": { +"description": "Schema description.", "type": "string" }, -"text": { +"displayName": { +"description": "Required. Name of the schema given by the user. Must be unique per project.", "type": "string" -} }, -"type": "object" +"documentIsFolder": { +"description": "Document Type, true refers the document is a folder, otherwise it is a typical document.", +"type": "boolean" }, -"AppsDynamiteCustomerId": { -"description": "Represents a GSuite customer ID. Obfuscated with CustomerIdObfuscator.", -"id": "AppsDynamiteCustomerId", -"properties": { -"customerId": { +"name": { +"description": "The resource name of the document schema. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}. The name is ignored when creating a document schema.", "type": "string" -} }, -"type": "object" +"propertyDefinitions": { +"description": "Document details.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1PropertyDefinition" }, -"AppsDynamiteSharedOrganizationInfo": { -"description": "Contains info about the entity that something is, or is owned by.", -"id": "AppsDynamiteSharedOrganizationInfo", -"properties": { -"consumerInfo": { -"$ref": "AppsDynamiteSharedOrganizationInfoConsumerInfo" +"type": "array" }, -"customerInfo": { -"$ref": "AppsDynamiteSharedOrganizationInfoCustomerInfo" +"updateTime": { +"description": "Output only. The time when the document schema is last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" }, -"AppsDynamiteSharedOrganizationInfoConsumerInfo": { -"description": "Intentionally empty. Used to disambiguate consumer and customer use cases in oneof below.", -"id": "AppsDynamiteSharedOrganizationInfoConsumerInfo", -"properties": {}, -"type": "object" -}, -"AppsDynamiteSharedOrganizationInfoCustomerInfo": { -"id": "AppsDynamiteSharedOrganizationInfoCustomerInfo", +"GoogleCloudContentwarehouseV1EnumArray": { +"description": "Enum values.", +"id": "GoogleCloudContentwarehouseV1EnumArray", "properties": { -"customerId": { -"$ref": "AppsDynamiteCustomerId" +"values": { +"description": "List of enum values.", +"items": { +"type": "string" +}, +"type": "array" } }, "type": "object" }, -"AppsDynamiteSharedSegmentedMembershipCount": { -"description": "Contains info on membership count for member types: HUMAN_USER, APP_USER & ROSTER_MEMBER different states: INVITED, JOINED", -"id": "AppsDynamiteSharedSegmentedMembershipCount", +"GoogleCloudContentwarehouseV1EnumTypeOptions": { +"description": "Configurations for an enum/categorical property.", +"id": "GoogleCloudContentwarehouseV1EnumTypeOptions", "properties": { -"memberType": { -"enum": [ -"MEMBER_TYPE_UNSPECIFIED", -"HUMAN_USER", -"ROSTER_MEMBER" -], -"enumDescriptions": [ -"default value", -"member is a human user", -"member is a roster" -], +"possibleValues": { +"description": "Required. List of possible enum values.", +"items": { "type": "string" }, -"membershipCount": { -"description": "count of members with given type and state", -"format": "int32", -"type": "integer" +"type": "array" }, -"membershipState": { -"enum": [ -"MEMBER_UNKNOWN", -"MEMBER_INVITED", -"MEMBER_JOINED", -"MEMBER_NOT_A_MEMBER", -"MEMBER_FAILED" -], -"enumDescriptions": [ -"Default state, do not use", -"An invitation to the space has been sent", -"User has joined the space", -"User is not a member", -"This state should never be stored in Spanner. It is a state for responses to the clients to indicate that membership mutations have failed and the member is in its previous state." -], -"type": "string" +"validationCheckDisabled": { +"description": "Make sure the Enum property value provided in the document is in the possile value list during document creation. The validation check runs by default.", +"type": "boolean" } }, "type": "object" }, -"AppsDynamiteSharedSegmentedMembershipCounts": { -"id": "AppsDynamiteSharedSegmentedMembershipCounts", +"GoogleCloudContentwarehouseV1EnumValue": { +"description": "Represents the string value of the enum field.", +"id": "GoogleCloudContentwarehouseV1EnumValue", "properties": { "value": { -"items": { -"$ref": "AppsDynamiteSharedSegmentedMembershipCount" -}, -"type": "array" +"description": "String value of the enum field. This must match defined set of enums in document schema using EnumTypeOptions.", +"type": "string" } }, "type": "object" }, -"AppsPeopleActivityBackendDestinationStream": { -"description": "A DestinationStream is a /namespace/id[0]/id[1]/.../id[n] that represents a collection of Activities. Example destinations: -The Profile Stream on http://plus.google.com/+JohnDoe/posts -A Square Stream on http://plus.google.com/squares/123 -A \"comment Stream\" (Fountain) on http://www.youtube.com/watch?id=123 It's possible for a single Activity to show in each of these destinations - and it might behave/look slightly differently for each one. Destinations can have their own business logic associated with them at both write-time and read-time server-side (these are documented below). Each DestinationStream is indexed and can be retrieved using the GetDestinationStreamRequest. For the repeated ID space indexing happens at all levels, e.g. if you have: /square/123/abc /square/123/efd /square/456 You can fetch /square/123/abc directly or /square/123 (which includes all Activities in both /square/123/abc and /square/123/efd), or even /square which retrieves all Activities in the Square namespace (visible for that user). On the storage layer, we represent DestinationStream as Channel (http://cs/#google3/social/common/channel/channel.proto), since the storage does not have the concept of a Stream. Both terms are used interchangeably within the service layer, but client of Social Infrastructure should use the term DestinationStream. Next ID: 3", -"id": "AppsPeopleActivityBackendDestinationStream", +"GoogleCloudContentwarehouseV1ExportToCdwPipeline": { +"description": "The configuration of exporting documents from the Document Warehouse to CDW pipeline.", +"id": "GoogleCloudContentwarehouseV1ExportToCdwPipeline", "properties": { -"id": { -"description": "The hierarchy of IDs. Each individual ID is \"flat\" and the repeated list defines the hierarchy. Namespaces define the \"validity\" of this hierachy (depth, naming convention, etc) and the server will reject invalid IDs.", +"docAiDataset": { +"description": "Optional. The CDW dataset resource name. This field is optional. If not set, the documents will be exported to Cloud Storage only. Format: projects/{project}/locations/{location}/processors/{processor}/dataset", +"type": "string" +}, +"documents": { +"description": "The list of all the resource names of the documents to be processed. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", "items": { "type": "string" }, "type": "array" }, -"namespace": { -"enum": [ -"UNKNOWN_DESTINATION_NAMESPACE", -"SQUARES", -"FOUNTAIN", -"PROFILE", -"COLLEXIONS", -"TEST", -"HIGHLIGHT", -"SOCIETY", -"MEMEGEN", -"PHOTOS", -"SUPPLY_CHAIN_CENTRAL", -"PAISA", -"SOCIETY_CHAT", -"PLUS_ENTERPRISE_LOG", -"SEARCH_UGC", -"LOUPE", -"MINDSEARCH", -"SOS_LIVE_COMMENTS", -"SBE_LOADTEST", -"SYSTEM1", -"G_PLUS", -"YOUTUBE", -"EVENTS", -"DEPRECATED_COLLECTIONS", -"REVIEWS", -"BACKSTAGE", -"SPACES" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -true +"exportFolderPath": { +"description": "The Cloud Storage folder path used to store the exported documents before being sent to CDW. Format: `gs:///`.", +"type": "string" +}, +"trainingSplitRatio": { +"description": "Ratio of training dataset split. When importing into Document AI Workbench, documents will be automatically split into training and test split category with the specified ratio. This field is required if doc_ai_dataset is set.", +"format": "float", +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1FetchAclRequest": { +"description": "Request message for DocumentService.FetchAcl", +"id": "GoogleCloudContentwarehouseV1FetchAclRequest", +"properties": { +"projectOwner": { +"description": "For Get Project ACL only. Authorization check for end user will be ignored when project_owner=true.", +"type": "boolean" +}, +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the end user, used to enforce access control for the service." +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1FetchAclResponse": { +"description": "Response message for DocumentService.FetchAcl.", +"id": "GoogleCloudContentwarehouseV1FetchAclResponse", +"properties": { +"metadata": { +"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", +"description": "Additional information for the API invocation, such as the request tracking id." +}, +"policy": { +"$ref": "GoogleIamV1Policy", +"description": "The IAM policy." +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1FileTypeFilter": { +"description": "Filter for the specific types of documents returned.", +"id": "GoogleCloudContentwarehouseV1FileTypeFilter", +"properties": { +"fileType": { +"description": "The type of files to return.", +"enum": [ +"FILE_TYPE_UNSPECIFIED", +"ALL", +"FOLDER", +"DOCUMENT", +"ROOT_FOLDER" ], "enumDescriptions": [ -"To prevent issues with out-of-sync clients sending unexpected values.", -"Use for G+ community. The ID is the square gaia id. If this is a square substream, second ID is the substream stream ID. Write-time logic: -Verify that the AUTHOR is a member of this Square. Read-time logic: -Fetch the name of the Square and substream.", -"For Blogger Fountain, the ID is the canonical URL of the site. For Distiller, the ID is \"youtube\"/channel_id/video_id. For Distiller Discussion Page, the ID is \"youtube\"/channel_id/\"channel\". For Distiller Private Msg, the ID is \"youtube\"/channel_id/\"private\". For Backstage, the ID is \"youtube\"/channel_id/\"backstage\". For Live, the ID is \"youtube\"/channel_id/\"live\"/\"chat\" and \"youtube\"/channel_id/video_id/\"live\"/\"chat\".", -"Used for Profile Stream fetches. The ID is the (decimal) Gaia ID of the user whose profile is being fetched.", -"Used for Collexions feature (go/g+c) ID is the Collexion ID to which the post is assigned. These IDs are allocated from their own id space maintained via UniqueId. They are NOT gaia ids, nor do they need to be used in conjunction with a gaia id when identifying a collection. In general a post will be assigned to at most 1 collexion, however there do exist corner cases where multi-inclusion can occur.", -"Used for test data for clients to prototype with. This should only ever be used in the SBE_PLAYGROUND corpus (go/sbe-playground), or in tests.", -"Used for Search Highlight (go/seng-highlight). The ID is the account id to which a highlight belongs.", -"Used for Society (go/society) ID is an s2 cell id (go/s2), optionally with an additional ID of a \"topic\" string within that cell.", -"Used for Memegen integration with SBE (go/memegen-sbe-onboarding) The ID is \"meme\" followed by the meme id to which a comment belongs.", -"Used for Photos integration with SBE (go/envelope-comments-design-doc)", -"Used for Supply Chain Central commenting capability (go/scc-comment). ID format: - For purchase order app: resource/purchase_order//line_item/ - For data exchange app: application/data_exchange/report/", -"Used for Paisa integration with SBE (go/paisa-stanza-plan)", -"Use for Society Chat (http://go/society-chat). The ID format is /society/users/private/.", -"Use for G+ Enterprise Log API Event API (http://go/gplus_interaction_event_sync_queue) The ID format is /enterprise//", -"Used for UGC for knowledge (go/knowledge-ugc). The ID format is /livestream/sports/", -"Loupe go/sbe-loupe-requirements.", -"Mindsearch go/india-qa.", -"Used for SOS Live comments (go/live-comments-sos-alerts). The ID format is /livestream/sos/", -"Used for SBE loadtests.", -"Used for System 1 (go/sys1-sbe)", -"Use if you want the Activity to show in the regular G+ product, i.e. the main Stream, user's profile streams, the G+ search stream, etc. No ID should be used with this DestinationNamespace.", -"Used to serve consumption stream on Youtube homepage. The stream is essentially a user's consumption stream with restricts set during write-time indexing such that only activities with Youtube video are retrieved during read-time. Deprecated - Do not use.", -"DEPRECATED - once used for the G+ Events feature, but never launched.", -"It is deprecated and replaced by COLLEXIONS.", -"Used for Review Stream fetches. The ID is the canonical representation of the entity being reviewed (for place reviews, it is cluster_id).", -"Deprecated - do not use. Backstage uses FOUNTAIN DestinationNamespace.", -"Deprecated. Used for Spaces (go/spaces) ID is the space ID in which the post is made. Posts will reside in exactly one space." +"Default document type. If set, disables the filter.", +"Returns all document types, including folders.", +"Returns only folders.", +"Returns only non-folder documents.", +"Returns only root folders" ], "type": "string" } }, "type": "object" }, -"AppsPeopleActivityStreamqualityDistillerEngagements": { -"description": "Stores the number of different kind of user engagement actions. Abuse Report is also consider an engagement. Currently we only have abuse report engagements but in future we might add other types of engagements as well.", -"id": "AppsPeopleActivityStreamqualityDistillerEngagements", +"GoogleCloudContentwarehouseV1FloatArray": { +"description": "Float values.", +"id": "GoogleCloudContentwarehouseV1FloatArray", "properties": { -"reportCompromised": { -"description": "Corresponds on \"This account might be compromised or hacked\" reporting action.", -"format": "int64", -"type": "string" +"values": { +"description": "List of float values.", +"items": { +"format": "float", +"type": "number" }, -"reportHarassment": { -"description": "Corresponds on \"Harassment or bullying\" reporting action.", -"format": "int64", -"type": "string" +"type": "array" +} }, -"reportHate": { -"description": "Corresponds on \"Hate speach or graphic violence\" reporting action.", -"format": "int64", +"type": "object" +}, +"GoogleCloudContentwarehouseV1FloatTypeOptions": { +"description": "Configurations for a float property.", +"id": "GoogleCloudContentwarehouseV1FloatTypeOptions", +"properties": {}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1GcsIngestPipeline": { +"description": "The configuration of the Cloud Storage Ingestion pipeline.", +"id": "GoogleCloudContentwarehouseV1GcsIngestPipeline", +"properties": { +"inputPath": { +"description": "The input Cloud Storage folder. All files under this folder will be imported to Document Warehouse. Format: `gs:///`.", "type": "string" }, -"reportPorn": { -"description": "Corresponds on \"Pornography or sexually explicit material\" reporting action.", -"format": "int64", +"pipelineConfig": { +"$ref": "GoogleCloudContentwarehouseV1IngestPipelineConfig", +"description": "Optional. The config for the Cloud Storage Ingestion pipeline. It provides additional customization options to run the pipeline and can be skipped if it is not applicable." +}, +"processorType": { +"description": "The Doc AI processor type name. Only used when the format of ingested files is Doc AI Document proto format.", "type": "string" }, -"reportSpam": { -"description": "Corresponds on \"Unwanted commercial content or spam\" reporting action.", -"format": "int64", +"schemaName": { +"description": "The Document Warehouse schema resource name. All documents processed by this pipeline will use this schema. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", "type": "string" }, -"serveCount": { -"deprecated": true, -"description": "Number of times this activity was served out of asbe/stanza.", -"format": "int64", +"skipIngestedDocuments": { +"description": "The flag whether to skip ingested documents. If it is set to true, documents in Cloud Storage contains key \"status\" with value \"status=ingested\" in custom metadata will be skipped to ingest.", +"type": "boolean" +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1GcsIngestWithDocAiProcessorsPipeline": { +"description": "The configuration of the Cloud Storage Ingestion with DocAI Processors pipeline.", +"id": "GoogleCloudContentwarehouseV1GcsIngestWithDocAiProcessorsPipeline", +"properties": { +"extractProcessorInfos": { +"description": "The extract processors information. One matched extract processor will be used to process documents based on the classify processor result. If no classify processor is specified, the first extract processor will be used.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo" +}, +"type": "array" +}, +"inputPath": { +"description": "The input Cloud Storage folder. All files under this folder will be imported to Document Warehouse. Format: `gs:///`.", "type": "string" }, -"timeSec": { -"description": "Timestamp in seconds for which time this record is valid.", -"format": "int64", +"pipelineConfig": { +"$ref": "GoogleCloudContentwarehouseV1IngestPipelineConfig", +"description": "Optional. The config for the Cloud Storage Ingestion with DocAI Processors pipeline. It provides additional customization options to run the pipeline and can be skipped if it is not applicable." +}, +"processorResultsFolderPath": { +"description": "The Cloud Storage folder path used to store the raw results from processors. Format: `gs:///`.", "type": "string" }, -"ytThumbsDown": { -"description": "Corresponds on Distiller comment thumbs down action.", -"format": "int64", +"skipIngestedDocuments": { +"description": "The flag whether to skip ingested documents. If it is set to true, documents in Cloud Storage contains key \"status\" with value \"status=ingested\" in custom metadata will be skipped to ingest.", +"type": "boolean" +}, +"splitClassifyProcessorInfo": { +"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo", +"description": "The split and classify processor information. The split and classify result will be used to find a matched extract processor." +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1GetDocumentRequest": { +"description": "Request message for DocumentService.GetDocument.", +"id": "GoogleCloudContentwarehouseV1GetDocumentRequest", +"properties": { +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the end user, used to enforce access control for the service." +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1HistogramQuery": { +"description": "The histogram request.", +"id": "GoogleCloudContentwarehouseV1HistogramQuery", +"properties": { +"filters": { +"$ref": "GoogleCloudContentwarehouseV1HistogramQueryPropertyNameFilter", +"description": "Optional. Filter the result of histogram query by the property names. It only works with histogram query count('FilterableProperties'). It is an optional. It will perform histogram on all the property names for all the document schemas. Setting this field will have a better performance." +}, +"histogramQuery": { +"description": "An expression specifies a histogram request against matching documents for searches. See SearchDocumentsRequest.histogram_queries for details about syntax.", "type": "string" +}, +"requirePreciseResultSize": { +"description": "Controls if the histogram query requires the return of a precise count. Enable this flag may adversely impact performance. Defaults to true.", +"type": "boolean" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAbout": { -"id": "AppsPeopleOzExternalMergedpeopleapiAbout", +"GoogleCloudContentwarehouseV1HistogramQueryPropertyNameFilter": { +"id": "GoogleCloudContentwarehouseV1HistogramQueryPropertyNameFilter", "properties": { -"contentType": { +"documentSchemas": { +"description": "This filter specifies the exact document schema(s) Document.document_schema_name to run histogram query against. It is optional. It will perform histogram for property names for all the document schemas if it is not set. At most 10 document schema names are allowed. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", +"items": { +"type": "string" +}, +"type": "array" +}, +"propertyNames": { +"description": "It is optional. It will perform histogram for all the property names if it is not set. The properties need to be defined with the is_filterable flag set to true and the name of the property should be in the format: \"schemaId.propertyName\". The property needs to be defined in the schema. Example: the schema id is abc. Then the name of property for property MORTGAGE_TYPE will be \"abc.MORTGAGE_TYPE\".", +"items": { +"type": "string" +}, +"type": "array" +}, +"yAxis": { +"description": "By default, the y_axis is HISTOGRAM_YAXIS_DOCUMENT if this field is not set.", "enum": [ -"TEXT_PLAIN", -"TEXT_HTML" +"HISTOGRAM_YAXIS_DOCUMENT", +"HISTOGRAM_YAXIS_PROPERTY" ], "enumDescriptions": [ -"", -"" +"Count the documents per property name.", +"Count the properties per property name." ], "type": "string" +} }, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" +"type": "object" +}, +"GoogleCloudContentwarehouseV1HistogramQueryResult": { +"description": "Histogram result that matches HistogramQuery specified in searches.", +"id": "GoogleCloudContentwarehouseV1HistogramQueryResult", +"properties": { +"histogram": { +"additionalProperties": { +"format": "int64", +"type": "string" }, -"safeHtmlValue": { -"$ref": "WebutilHtmlTypesSafeHtmlProto", -"description": "Sanitized HTML value that is only populated when the SANITIZE_ABOUT_HTML extension is requested." +"description": "A map from the values of the facet associated with distinct values to the number of matching entries with corresponding value. The key format is: * (for string histogram) string values stored in the field.", +"type": "object" }, -"value": { +"histogramQuery": { +"description": "Requested histogram expression.", "type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedData": { -"description": "Extension data for use in AboutMe.", -"id": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedData", +"GoogleCloudContentwarehouseV1IngestPipelineConfig": { +"description": "The ingestion pipeline config.", +"id": "GoogleCloudContentwarehouseV1IngestPipelineConfig", "properties": { -"nameDisplayOptions": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataNameDisplayOptions" +"cloudFunction": { +"description": "The Cloud Function resource name. The Cloud Function needs to live inside consumer project and is accessible to Document AI Warehouse P4SA. Only Cloud Functions V2 is supported. Cloud function execution should complete within 5 minutes or this file ingestion may fail due to timeout. Format: `https://{region}-{project_id}.cloudfunctions.net/{cloud_function}` The following keys are available the request json payload. * display_name * properties * plain_text * reference_id * document_schema_name * raw_document_path * raw_document_file_type The following keys from the cloud function json response payload will be ingested to the Document AI Warehouse as part of Document proto content and/or related information. The original values will be overridden if any key is present in the response. * display_name * properties * plain_text * document_acl_policy * folder", +"type": "string" }, -"photosCompareData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataPhotosCompareData" +"documentAclPolicy": { +"$ref": "GoogleIamV1Policy", +"description": "The document level acl policy config. This refers to an Identity and Access (IAM) policy, which specifies access controls for all documents ingested by the pipeline. The role and members under the policy needs to be specified. The following roles are supported for document level acl control: * roles/contentwarehouse.documentAdmin * roles/contentwarehouse.documentEditor * roles/contentwarehouse.documentViewer The following members are supported for document level acl control: * user:user-email@example.com * group:group-email@example.com Note that for documents searched with LLM, only single level user or group acl check is supported." }, -"profileEditability": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataProfileEditability" +"enableDocumentTextExtraction": { +"description": "The document text extraction enabled flag. If the flag is set to true, DWH will perform text extraction on the raw document.", +"type": "boolean" }, -"profileNameModificationHistory": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataProfileNameModificationHistory" +"folder": { +"description": "Optional. The name of the folder to which all ingested documents will be linked during ingestion process. Format is `projects/{project}/locations/{location}/documents/{folder_id}`", +"type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataNameDisplayOptions": { -"description": "See NameDisplayOptions in //depot/google3/focus/backend/proto/backend.proto. See also go/nickname-mess.", -"id": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataNameDisplayOptions", +"GoogleCloudContentwarehouseV1InitializeProjectRequest": { +"description": "Request message for projectService.InitializeProject", +"id": "GoogleCloudContentwarehouseV1InitializeProjectRequest", "properties": { -"nicknameOption": { +"accessControlMode": { +"description": "Required. The access control mode for accessing the customer data", "enum": [ -"UNKNOWN_NICKNAME_OPTION", -"QUOTED_NICKNAME", -"PAREN_NICKNAME" +"ACL_MODE_UNKNOWN", +"ACL_MODE_UNIVERSAL_ACCESS", +"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_BYOID", +"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_GCI" ], "enumDescriptions": [ -"", -"Include the nickname, in quotes, in the display name. In English, the nickname appears between the given and family names. Example: Victor \"Pug\" Henry.", -"Include the nickname, in parentheses, in the display name. In English, the nickname appears after the given and family names. Example: Victor Henry (Pug)." +"This value is required by protobuf best practices", +"Universal Access: No document level access control.", +"Document level access control with customer own Identity Service.", +"Document level access control using Google Cloud Identity." ], "type": "string" -} }, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataPhotosCompareData": { -"id": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataPhotosCompareData", -"properties": { -"diffData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataPhotosCompareDataDiffData" +"databaseType": { +"description": "Required. The type of database used to store customer data", +"enum": [ +"DB_UNKNOWN", +"DB_INFRA_SPANNER", +"DB_CLOUD_SQL_POSTGRES" +], +"enumDeprecated": [ +false, +false, +true +], +"enumDescriptions": [ +"This value is required by protobuf best practices", +"Internal Spanner", +"Cloud Sql with a Postgres Sql instance" +], +"type": "string" }, -"highResUrl": { +"documentCreatorDefaultRole": { +"description": "Optional. The default role for the person who create a document.", +"enum": [ +"DOCUMENT_CREATOR_DEFAULT_ROLE_UNSPECIFIED", +"DOCUMENT_ADMIN", +"DOCUMENT_EDITOR", +"DOCUMENT_VIEWER" +], +"enumDescriptions": [ +"Unspecified, will be default to document admin role.", +"Document Admin, same as contentwarehouse.googleapis.com/documentAdmin.", +"Document Editor, same as contentwarehouse.googleapis.com/documentEditor.", +"Document Viewer, same as contentwarehouse.googleapis.com/documentViewer." +], "type": "string" }, -"inconsistentPhoto": { -"description": "True if photo diff is greater than 0.01 on any color band, or if the user has a low res photo but no high res photo. This field is primarily for use in About Me and for other uses it's recommended to use the DiffData values directly instead. The cutoff is based on a heuristic determined in go/comparing-profile-photos", +"enableCalUserEmailLogging": { +"description": "Optional. Whether to enable CAL user email logging.", "type": "boolean" }, -"lowResData": { -"description": "Only present if the photo diff is greater than 0.01 on any color band.", -"format": "byte", +"kmsKey": { +"description": "Optional. The KMS key used for CMEK encryption. It is required that the kms key is in the same region as the endpoint. The same key will be used for all provisioned resources, if encryption is available. If the kms_key is left empty, no encryption will be enforced.", "type": "string" +} }, -"lowResUrl": { -"type": "string" +"type": "object" }, -"monogramUrl": { +"GoogleCloudContentwarehouseV1InitializeProjectResponse": { +"description": "Response message for projectService.InitializeProject", +"id": "GoogleCloudContentwarehouseV1InitializeProjectResponse", +"properties": { +"message": { +"description": "The message of the project initialization process.", "type": "string" }, -"privateLowResAcl": { -"description": "True if the low-res photo has a private ACL set.", -"type": "boolean" +"state": { +"description": "The state of the project initialization process.", +"enum": [ +"STATE_UNSPECIFIED", +"SUCCEEDED", +"FAILED", +"CANCELLED", +"RUNNING" +], +"enumDescriptions": [ +"Clients should never see this.", +"Finished project initialization without error.", +"Finished project initialization with an error.", +"Client canceled the LRO.", +"Ask the customer to check the operation for results." +], +"type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataPhotosCompareDataDiffData": { -"id": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataPhotosCompareDataDiffData", +"GoogleCloudContentwarehouseV1IntegerArray": { +"description": "Integer values.", +"id": "GoogleCloudContentwarehouseV1IntegerArray", "properties": { -"blueDiff": { -"format": "float", -"type": "number" +"values": { +"description": "List of integer values.", +"items": { +"format": "int32", +"type": "integer" }, -"greenDiff": { -"format": "float", -"type": "number" +"type": "array" +} }, -"redDiff": { -"format": "float", -"type": "number" +"type": "object" +}, +"GoogleCloudContentwarehouseV1IntegerTypeOptions": { +"description": "Configurations for an integer property.", +"id": "GoogleCloudContentwarehouseV1IntegerTypeOptions", +"properties": {}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1InvalidRule": { +"description": "A triggered rule that failed the validation check(s) after parsing.", +"id": "GoogleCloudContentwarehouseV1InvalidRule", +"properties": { +"error": { +"description": "Validation error on a parsed expression.", +"type": "string" +}, +"rule": { +"$ref": "GoogleCloudContentwarehouseV1Rule", +"description": "Triggered rule." } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataProfileEditability": { -"description": "See UserEditedLockedMask in //depot/google3/focus/backend/proto/backend.proto.", -"id": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataProfileEditability", +"GoogleCloudContentwarehouseV1ListDocumentSchemasResponse": { +"description": "Response message for DocumentSchemaService.ListDocumentSchemas.", +"id": "GoogleCloudContentwarehouseV1ListDocumentSchemasResponse", "properties": { -"lockedField": { -"description": "Read-only set of zero or more field paths that are locked for update on this person, such as \"person.name\", \"person.email\", etc. The set of fields is only populated for the requester's profile. Fields in the set cannot be edited, added, or deleted from the profile. Attempting to update any of these fields will result in an exception.", +"documentSchemas": { +"description": "The document schemas from the specified parent.", "items": { -"type": "string" +"$ref": "GoogleCloudContentwarehouseV1DocumentSchema" }, "type": "array" +}, +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataProfileNameModificationHistory": { -"description": "See ABUSE_NAME_LAST_MODIFIED in //depot/google3/focus/backend/proto/backend.proto which maps to //depot/google3/focus/proto/profileattribute.proto", -"id": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedDataProfileNameModificationHistory", +"GoogleCloudContentwarehouseV1ListLinkedSourcesRequest": { +"description": "Response message for DocumentLinkService.ListLinkedSources.", +"id": "GoogleCloudContentwarehouseV1ListLinkedSourcesRequest", "properties": { -"computedNameChangesRemaining": { -"description": "The number of name changes remaining at RPC request time. This can be more than name_changes_remaining, if user hasn't changed name for some time and accrued quota since last change.", +"pageSize": { +"description": "The maximum number of document-links to return. The service may return fewer than this value. If unspecified, at most 50 document-links will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "type": "integer" }, -"computedNicknameChangesRemaining": { -"description": "The number of nickname changes remaining at RPC request time. This can be more than nickname_changes_remaining, if user hasn't changed nickname for some time and accrued quota since last change.", -"format": "int32", -"type": "integer" +"pageToken": { +"description": "A page token, received from a previous `ListLinkedSources` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListLinkedSources` must match the call that provided the page token.", +"type": "string" }, -"nameChangesRemaining": { -"description": "The number of name changes remaining at the time the name was last modified.", -"format": "int32", -"type": "integer" +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the document creator, used to enforce access control for the service." +} }, -"nameLastModified": { -"description": "The last time the profile name was modified in milliseconds UTC.", -"format": "int64", -"type": "string" +"type": "object" }, -"nicknameChangesRemaining": { -"description": "The number of nickname changes remaining at the time the nickname was last modified.", -"format": "int32", -"type": "integer" +"GoogleCloudContentwarehouseV1ListLinkedSourcesResponse": { +"description": "Response message for DocumentLinkService.ListLinkedSources.", +"id": "GoogleCloudContentwarehouseV1ListLinkedSourcesResponse", +"properties": { +"documentLinks": { +"description": "Source document-links.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1DocumentLink" }, -"nicknameLastModified": { -"description": "The last time the profile nickname was modified in milliseconds UTC.", -"format": "int64", -"type": "string" +"type": "array" }, -"quotaEnforcementStatus": { -"enum": [ -"UNKNOWN_QUOTA_ENFORCEMENT_STATUS", -"ENFORCED", -"NOT_ENFORCED", -"NOT_APPLICABLE" -], -"enumDescriptions": [ -"", -"Name change quota is enforced.", -"Name change quota exists but is not enforced. This is used for users suspended due to abusive names, where users are allowed to change their names back to non-abusive state without being charged against the quota.", -"Name change quota does not apply. This is used for premium dasher users." -], +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAccountEmail": { -"id": "AppsPeopleOzExternalMergedpeopleapiAccountEmail", +"GoogleCloudContentwarehouseV1ListLinkedTargetsRequest": { +"description": "Request message for DocumentLinkService.ListLinkedTargets.", +"id": "GoogleCloudContentwarehouseV1ListLinkedTargetsRequest", "properties": { -"email": { -"type": "string" +"requestMetadata": { +"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", +"description": "The meta information collected about the document creator, used to enforce access control for the service." } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAdditionalContainerInfo": { -"description": "Additional information about a container. TO BE DELETED: replaced by DeviceContactInfo.", -"id": "AppsPeopleOzExternalMergedpeopleapiAdditionalContainerInfo", +"GoogleCloudContentwarehouseV1ListLinkedTargetsResponse": { +"description": "Response message for DocumentLinkService.ListLinkedTargets.", +"id": "GoogleCloudContentwarehouseV1ListLinkedTargetsResponse", "properties": { -"rawDeviceContactInfo": { -"deprecated": true, -"description": "When the container is a DEVICE_CONTACT, this list provides account information from the raw contact which is the source of this field.", +"documentLinks": { +"description": "Target document-links.", "items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRawDeviceContactInfo" +"$ref": "GoogleCloudContentwarehouseV1DocumentLink" }, "type": "array" +}, +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAddress": { -"id": "AppsPeopleOzExternalMergedpeopleapiAddress", +"GoogleCloudContentwarehouseV1ListRuleSetsResponse": { +"description": "Response message for RuleSetService.ListRuleSets.", +"id": "GoogleCloudContentwarehouseV1ListRuleSetsResponse", "properties": { -"country": { +"nextPageToken": { +"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" }, -"countryCode": { -"type": "string" +"ruleSets": { +"description": "The rule sets from the specified parent.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1RuleSet" }, -"encodedPlaceId": { -"description": "FeatureId associated with the address. The format is the same as that used for ids in PLACE containers in SourceIdentity.", -"type": "string" +"type": "array" +} }, -"extendedAddress": { -"type": "string" +"type": "object" }, -"formatted": { +"GoogleCloudContentwarehouseV1ListSynonymSetsResponse": { +"description": "Response message for SynonymSetService.ListSynonymSets.", +"id": "GoogleCloudContentwarehouseV1ListSynonymSetsResponse", +"properties": { +"nextPageToken": { +"description": "A page token, received from a previous `ListSynonymSets` call. Provide this to retrieve the subsequent page.", "type": "string" }, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" +"synonymSets": { +"description": "The synonymSets from the specified parent.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1SynonymSet" }, -"locality": { -"type": "string" +"type": "array" +} }, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" +"type": "object" }, -"poBox": { +"GoogleCloudContentwarehouseV1LockDocumentRequest": { +"description": "Request message for DocumentService.LockDocument.", +"id": "GoogleCloudContentwarehouseV1LockDocumentRequest", +"properties": { +"collectionId": { +"description": "The collection the document connects to.", "type": "string" }, -"pointSpec": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPointSpec" +"lockingUser": { +"$ref": "GoogleCloudContentwarehouseV1UserInfo", +"description": "The user information who locks the document." +} }, -"postalCode": { -"type": "string" +"type": "object" }, -"region": { -"type": "string" +"GoogleCloudContentwarehouseV1MapProperty": { +"description": "Map property value. Represents a structured entries of key value pairs, consisting of field names which map to dynamically typed values.", +"id": "GoogleCloudContentwarehouseV1MapProperty", +"properties": { +"fields": { +"additionalProperties": { +"$ref": "GoogleCloudContentwarehouseV1Value" }, -"streetAddress": { -"type": "string" +"description": "Unordered map of dynamically typed values.", +"type": "object" +} }, -"type": { -"description": "The type of the address. The type can be free form or one of these predefined values: * `home` * `work` * `other`", -"type": "string" +"type": "object" +}, +"GoogleCloudContentwarehouseV1MapTypeOptions": { +"description": "Configurations for a Map property.", +"id": "GoogleCloudContentwarehouseV1MapTypeOptions", +"properties": {}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1MergeFieldsOptions": { +"description": "Options for merging updated fields.", +"id": "GoogleCloudContentwarehouseV1MergeFieldsOptions", +"properties": { +"replaceMessageFields": { +"description": "When merging message fields, the default behavior is to merge the content of two message fields together. If you instead want to use the field from the source message to replace the corresponding field in the destination message, set this flag to true. When this flag is set, specified submessage fields that are missing in source will be cleared in destination.", +"type": "boolean" +}, +"replaceRepeatedFields": { +"description": "When merging repeated fields, the default behavior is to append entries from the source repeated field to the destination repeated field. If you instead want to keep only the entries from the source repeated field, set this flag to true. If you want to replace a repeated field within a message field on the destination message, you must set both replace_repeated_fields and replace_message_fields to true, otherwise the repeated fields will be appended.", +"type": "boolean" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAffinity": { -"description": "Similar to social.graph.storage.Affinity, but pared down to what the clients of the People API are interested in.", -"id": "AppsPeopleOzExternalMergedpeopleapiAffinity", +"GoogleCloudContentwarehouseV1ProcessWithDocAiPipeline": { +"description": "The configuration of processing documents in Document Warehouse with DocAi processors pipeline.", +"id": "GoogleCloudContentwarehouseV1ProcessWithDocAiPipeline", "properties": { -"affinityMetadata": { -"$ref": "SocialGraphWireProtoPeopleapiAffinityMetadata", -"description": "Contains extra ranking information returned by DAS." +"documents": { +"description": "The list of all the resource names of the documents to be processed. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", +"items": { +"type": "string" }, -"affinityType": { -"enum": [ -"AFFINITY_TYPE_UNKNOWN", -"EMAIL_AUTOCOMPLETE", -"CONTACTS_PLUS_FREQUENTLY_CONTACTED", -"CHAT_AUTOCOMPLETE", -"GPLUS_AUTOCOMPLETE", -"GLASS_AFFINITY", -"PEOPLE_AUTOCOMPLETE_SOCIAL", -"FIELD_AUTOCOMPLETE_SOCIAL", -"CONTACTS_PLUS_EMAIL", -"PHOTOS_PEOPLE_TO_SHARE_WITH_SUGGESTIONS", -"PHOTOS_FIELDS_TO_SHARE_WITH_SUGGESTIONS", -"INBOX_AFFINITY", -"DYNAMITE_AFFINITY", -"PHOTOS_SUGGESTIONS_AFFINITY", -"PHOTOS_SUGGESTED_TARGETS", -"PHOTOS_ASSISTANT_SUGGESTIONS_AFFINITY", -"DRIVE_AUTOCOMPLETE", -"WALLET_PEOPLE_TO_PAY_SUGGESTIONS", -"CONTACTS_PLUS_CONTACT_CENTRIC", -"POMEROY_AFFINITY", -"CALENDAR_AFFINITY", -"SPACES_APP_PEOPLE_AFFINITY", -"HOMEROOM_AFFINITY", -"PEOPLE_PLAYGROUND_AFFINITY", -"FAMILY_AFFINITY", -"CONTACTS_ASSISTANT_SUGGESTED_CONTACTS", -"TRIPS_AFFINITY", -"GOOGLE_VOICE_AFFINITY", -"PHOTOS_FACE_AFFINITY", -"G3DOC_AUTOCOMPLETE", -"LOUPE_SUGGESTIONS_AFFINITY", -"MAPS_SHARING_AFFINITY", -"CLOUD_SEARCH_AFFINITY", -"YOUTUBE_UNPLUGGED", -"JAM_AFFINITY", -"ITEM_SUGGEST_AFFINITY", -"ISSUE_TRACKER_AFFINITY", -"APPS_ASSISTANT_AFFINITY", -"APDL_CONTACT_CENTRIC_DEFAULT_AFFINITY", -"APDL_PROFILE_CENTRIC_DEFAULT_AFFINITY", -"SOCIAL_RECOVERY", -"TEZ_AFFINITY", -"NEWS_AFFINITY", -"ALLO_AFFINITY", -"GPLUS_PEOPLE_RECOMMENDATIONS", -"GPLUS_PEOPLE_RECOMMENDATIONS_SAME_DOMAIN", -"DRIVE_AFFINITY", -"PODIUM_AFFINITY", -"ZOOM_SIGHTS_EMAIL_AFFINITY", -"AIRDROME_AFFINITY", -"HANGOUTS_MEET_AFFINITY", -"GALLERY_AFFINITY", -"AGSA_AFFINITY", -"PAY_AFFINITY", -"SAVES_AFFINITY", -"JASPER_AFFINITY", -"GOOGLE_HOME_APP_AFFINITY", -"TOPAZ_TEAMS_AFFINITY", -"DYNAMITE_OUT_OF_DOMAIN_AFFINITY", -"GOOGLE_VOICE_SIRI_EXTENSION_AFFINITY", -"COURSE_KIT_AFFINITY", -"FORMS_AFFINITY", -"NOVITAS_AFFINITY", -"GTI_PEER_INTERACTIONS_AFFINITY", -"ANDROID_EMERGENCY_AFFINITY", -"DATA_STUDIO_AFFINITY", -"SPUR_AFFINITY", -"PLAY_GAMES_SERVICES_AFFINITY", -"GROUPS_ADD_MEMBER_AFFINITY", -"DUO_AFFINITY", -"MY_BUSINESS_AFFINITY", -"GMAIL_COMPOSE", -"NON_GPLUS_AFFINITY", -"ABUSE_AFFINITY", -"ABUSE_AFFINITY_LITE", -"CALENDAR_PEEK_AFFINITY", -"HUB_CALL_AFFINITY", -"GSUITE_WORKFLOW_AFFINITY", -"VR_POLY_PRO_AFFINITY", -"TASKS_AFFINITY", -"GOOGLE_ONE_AFFINITY", -"TRAVEL_AFFINITY", -"GEO_DISCOVERY_FOLLOW_AFFINITY", -"GMAIL_WEB_AFFINITY", -"ASSISTANT_SETTINGS_WEB_UI_AFFINITY", -"ARTIFEX_AFFINITY", -"CONTACT_STORE_DEFAULT_AFFINITY", -"CONTACT_STORE_SELF_EXCLUSIVE", -"PHOTOS_FACE_STALE_AFFINITY", -"LANDSPEEDER_AFFINITY", -"GOOGLE_FI_AFFINITY", -"CONTACTS_PLUS_DOMAIN_ONLY", -"PHOTOS_SUGGESTED_TARGETS_IN_APP_ONLY", -"SOCIETY_AFFINITY", -"NANDHI_TEST_SCHEDULER_AFFINITY", -"HIJACKING_HIGH_RISK_AFFINITY", -"TRUECOLOURS_AFFINITY", -"ESPRESSO_AFFINITY", -"TAG_AFFINITY", -"CORPBOT_AFFINITY", -"SHOPPING_LIST_AFFINITY", -"INTEGRATION_PLATFORM_AFFINITY", -"HOT_ORDERS_UI_AFFINITY", -"TELLY_MOBILE_APP_AFFINITY", -"NGA_SUGGESTION_RESOLUTION_AFFINITY", -"DUC_COMPANION_AFFINITY", -"TOG_AFFINITY", -"ANDROID_SYSTEM_INTELLIGENCE_AFFINITY", -"EARTH_AFFINITY", -"SHORTCUT_AFFINITY", -"CHROME_OS_SCALING_AFFINITY", -"SHOWTIME_AFFINITY", -"PLAY_GAMES_SERVICES_EXPERIMENTAL", -"GUPPEEPS_AFFINITY", -"NEST_AFFINITY", -"BLOGGER_AFFINITY", -"INDIVIDUAL_OUTGOING_INTERACTIONS_RECENCY_RANK", -"ASSISTANT_TOOLCHAIN_AFFINITY", -"CHAT_CONSERVER_FAVORITE_CONTACTS_AFFINITY", -"CHAT_CONSERVER_INVITEE_AFFINITY", -"GANTRY_AFFINITY", -"KINTARO_AFFINITY", -"KEEP_AFFINITY", -"INCIDENTFLOW_AFFINITY", -"DRIVE_MENTION_AFFINITY", -"DRIVE_LOOKUP_AFFINITY", -"PODCASTS_MANAGER_AFFINITY", -"EMAIL_AUTOCOMPLETE_GG", -"ONE_REVIEWER_TOOL_AFFINITY", -"ASSISTANT_FAMILY_VERTICAL_AFFINITY", -"STADIA_AFFINITY", -"ATLAS_AFFINITY", -"CONSTELLATION_AFFINITY", -"CORONADO_AFFINITY", -"WALLET_GOLDEN_GATE_AFFINITY", -"PUMICE_AFFINITY", -"DEMO_AFFINITY_DEFAULT_ALGO", -"DEMO_AFFINITY_DEFAULT_ALGO_DOMAIN_ONLY", -"DEMO_AFFINITY_EMAIL_ALGO", -"DEMO_AFFINITY_EMAIL_ALGO_DOMAIN_ONLY", -"BACKLIGHT_AFFINITY", -"DYNAMITE_GROUPS_AFFINITY", -"DYNAMITE_OUT_OF_DOMAIN_GROUPS_AFFINITY", -"GLOSSARY_MANAGER_AFFINITY", -"ONEDEV_WORKFLOW_AFFINITY", -"GSUITE_HUB_CALL_AFFINITY", -"AVALANCHE_AFFINITY", -"SANDTROUT_DEVICE_CONTACTS_AFFINITY", -"DYNAMITE_ROOM_AFFINITY", -"DESKBOOKING_AFFINITY", -"TEZ_EXTENDED_AFFINITY", -"DRIVE_PROFILE_ONLY_AFFINITY", -"OFFSEC_AFFINITY", -"GOOGLE_HOME_FAMILY_AFFINITY", -"ONEMARKET_CALENDAR_AFFINITY", -"GPAY_MERCHANT_CONSOLE_AFFINITY", -"WORDFLOW_AFFINITY", -"YOUTUBE_CREATOR_STUDIO_AFFINITY", -"BRICKS_AFFINITY", -"BUG_OBSERVER_AFFINITY", -"ALPHASCHEDULE_AFFINITY", -"BURROW_AFFINITY", -"TEAMSPACES_AFFINITY", -"GMAIL_SMARTADDRESS_REPLACE_AFFINITY", -"GMAIL_SMARTADDRESS_EXPAND_AFFINITY", -"ASSISTANT_OPA_AFFINITY", -"POLYGLOT_AFFINITY", -"TRANSLATION_MEMORY_MANAGER_AFFINITY", -"THREADIT_AFFINITY", -"RESOURCE_SYMPHONY_AFFINITY", -"HOUSEHOLD_CONTACTS_PICKER_AFFINITY", -"L10N_INFRA_SHARED_AFFINITY", -"WORK_TRACKER_AFFINITY", -"ARIANE_AFFINITY", -"DRIVE_ROOM_AFFINITY", -"MOMA_SEARCH_AFFINITY", -"COLAB_INTERNAL_AFFINITY", -"COLAB_EXTERNAL_AFFINITY", -"TALENT_GROW_AFFINITY", -"SOCIAL_CONNECTION_CHECKER_AFFINITY", -"GMS_PEOPLE_AFFINITY", -"ROCKET_LABS_AFFINITY", -"DYNAMITE_ROOM_AND_INDIVIDUAL_ONLY_AFFINITY", -"TEZ_PHONE_SEARCH_AFFINITY", -"MY_GOOGLE_FAMILIES_AFFINITY", -"DYNAMITE_UNIFIED_AFFINITY", -"SHORTCUT_SERVER_AFFINITY", -"LEGAL_CONTRACTS_AFFINITY", -"CALENDAR_WEB_AFFINITY", -"DATA_CATALOG_AFFINITY", -"BRIEF_API_AFFINITY", -"HARDWARE_MFG_DATA_VENUS_AFFINITY", -"BETTERBUG_AFFINITY", -"DCMS_AFFINITY", -"PLAY_BOOKS_PUBENG_AFFINITY", -"YAQS_AFFINITY", -"RESPONSIBLE_FEATURE_ACCESS_AFFINITY", -"PROSPER_AFFINITY", -"PEOPLE_TO_ADD_BIRTHDAY_FOR_AFFINITY", -"FLOURISH_AFFINITY", -"CAMPAIGN_MANAGEMENT_TOOL_AFFINITY", -"RECORDER_AFFINITY", -"PERSONAL_SUGGEST_FIRST_HOP_SOCIAL_VICINITY", -"PERSONAL_SUGGEST_EMAIL_AUTOCOMPLETE_SCORE", -"CLASSROOM_SEARCH_AFFINITY", -"HIRING_AFFINITY", -"DATACENTER_SOFTWARE_AFFINITY", -"PHOTOS_INVITE_AFFINITY", -"PHOTOS_PARTNER_SHARING_AFFINITY", -"MARKETING_WORKFLOWS_AFFINITY", -"INTROSPECT_AFFINITY", -"YOUTUBE_PARENT_TOOLS_AFFINITY", -"RELIABILITY_INSIGHTS_PST_AFFINITY", -"GMAIL_ANDROID_AFFINITY", -"CUSTOMER_CARE_PORTAL_AFFINITY", -"MOMAHOME_3C_AFFINITY", -"DIGITAL_CAR_KEY_AFFINITY", -"PLAY_BOOKS_DISTRIBUTION_AFFINITY", -"GOOGLE_ASSIGNMENTS_AFFINITY", -"TEST_FUSION_AFFINITY", -"PRODUCTION2020_UIE_AFFINITY", -"SPEAKEASY_AFFINITY", -"DOCS_TASKS_AFFINITY", -"DYNAMITE_SEARCH_AFFINITY", -"GPAY_RELEASE_OPS_AFFINITY", -"VOICE_PBX_AFFINITY", -"VOICE_WEB_AFFINITY", -"SKILLSSTACK_AFFINITY", -"WHOSTORY_AFFINITY", -"PHOTOS_PARTNER_SHARING_EMAIL_ONLY", -"MEMORIZE_AFFINITY", -"BETTANY_AFFINITY", -"BASECAMP_AFFINITY", -"DRIVE_SEARCH_FILTER_AFFINITY", -"CULTURE_EVENTS_CALENDAR_AFFINITY", -"DATABRIDGE_CONSOLE_AFFINITY", -"COMMSTAR_AFFINITY", -"CDDB_AFFINITY", -"DATA_STUDIO_SPACES_AFFINITY", -"SOJI_AFFINITY", -"PLAY_MOVIES_ANDROID_AFFINITY", -"DATA_STUDIO_DOMAIN_ONLY_AFFINITY", -"MONOSPACE_AFFINITY", -"MY_ACCOUNT_AFFINITY", -"NUDGEIT_CAMPAIGN_MANAGER_AFFINITY", -"LEGAL_CONTRACTS_EXTERNAL_AFFINITY", -"CONTACTS_TO_STAR_AFFINITY", -"DECS_AFFINITY", -"GSOX_MOCHI_AFFINITY", -"MEET_AFFINITY", -"PMW_TI_AFFINITY", -"DRIVE_SEARCH_FILTER_PERSON_ONLY", -"ACCESSIBILITY_TRACKER_AFFINITY", -"PLX_DATASOURCE_AFFINITY", -"DUCKIEWEB_AFFINITY", -"MEET_CALLING_AFFINITY", -"MATTERSPACE_AFFINITY", -"TRUSTED_CONTACTS_OOBE_AFFINITY", -"REFERRALS_AFFINITY", -"WAYMO_TRIAGE_TOOLING_AFFINITY", -"DATA_STUDIO_GAIA_ONLY_AFFINITY", -"TWENTYPERCENT_JOBPOSTINGS_AFFINITY", -"ENGAGEMENTS_AFFINITY", -"TRUSTED_CONTACTS_FL_AFFINITY", -"CALENDAR_WEB_TEAM_MEMBERS_AFFINITY", -"CLOUDCONNECT_AFFINITY", -"PERSONAL_AGENT_AFFINITY", -"MOBILE_HARNESS_AFFINITY", -"LOOKER_STUDIO_PRO_AFFINITY", -"SUPPORT_CLASSIFICATION_UI_AFFINITY", -"NOTEBOOKLM_AFFINITY", -"PLAYSPACE_LABS_AFFINITY", -"ZOMBIE_CLOUD_AFFINITY", -"RELATIONSHIPS_AFFINITY", -"APPS_WORKFLOW_AFFINITY", -"FLEETSCOPE_AFFINITY", -"CLOUD_SALES_GCLM_AFFINITY", -"DRIVE_NOTIFICATIONS_AFFINITY", -"TS_TOOL_INTAKE_AFFINITY", -"DYNAMITE_NEW_CONTACTS_AFFINITY", -"GENESIS_IOS_AFFINITY", -"BANKROLL_PROD_AFFINITY", -"CALENDAR_WEB_ROOM_AFFINITY", -"CHROME_MULTIPLAYER_AFFINITY", -"GEO_DATA_PORTAL_AFFINITY" +"type": "array" +}, +"exportFolderPath": { +"description": "The Cloud Storage folder path used to store the exported documents before being sent to CDW. Format: `gs:///`.", +"type": "string" +}, +"processorInfo": { +"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo", +"description": "The CDW processor information." +}, +"processorResultsFolderPath": { +"description": "The Cloud Storage folder path used to store the raw results from processors. Format: `gs:///`.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1ProcessorInfo": { +"description": "The DocAI processor information.", +"id": "GoogleCloudContentwarehouseV1ProcessorInfo", +"properties": { +"documentType": { +"description": "The processor will process the documents with this document type.", +"type": "string" +}, +"processorName": { +"description": "The processor resource name. Format is `projects/{project}/locations/{location}/processors/{processor}`, or `projects/{project}/locations/{location}/processors/{processor}/processorVersions/{processorVersion}`", +"type": "string" +}, +"schemaName": { +"description": "The Document schema resource name. All documents processed by this processor will use this schema. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudContentwarehouseV1ProjectStatus": { +"description": "Status of a project, including the project state, dbType, aclMode and etc.", +"id": "GoogleCloudContentwarehouseV1ProjectStatus", +"properties": { +"accessControlMode": { +"description": "Access control mode.", +"enum": [ +"ACL_MODE_UNKNOWN", +"ACL_MODE_UNIVERSAL_ACCESS", +"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_BYOID", +"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_GCI" +], +"enumDescriptions": [ +"This value is required by protobuf best practices", +"Universal Access: No document level access control.", +"Document level access control with customer own Identity Service.", +"Document level access control using Google Cloud Identity." +], +"type": "string" +}, +"databaseType": { +"description": "Database type.", +"enum": [ +"DB_UNKNOWN", +"DB_INFRA_SPANNER", +"DB_CLOUD_SQL_POSTGRES" ], "enumDeprecated": [ false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false +true ], "enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Will eventually be deprecated in favor of PHOTOS_SUGGESTED_TARGETS.", -"Will eventually be deprecated in favor of PHOTOS_SUGGESTED_TARGETS.", -"", -"", -"Will eventually be deprecated in favour of PHOTOS_SUGGESTED_TARGETS.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" +"This value is required by protobuf best practices", +"Internal Spanner", +"Cloud Sql with a Postgres Sql instance" ], "type": "string" }, -"containerId": { -"description": "The ID of the container", +"documentCreatorDefaultRole": { +"description": "The default role for the person who create a document.", +"type": "string" +}, +"location": { +"description": "The location of the queried project.", "type": "string" }, -"containerType": { -"description": "The type of container to which this affinity applies", +"qaEnabled": { +"description": "If the qa is enabled on this project.", +"type": "boolean" +}, +"state": { +"description": "State of the project.", "enum": [ -"UNKNOWN_CONTAINER", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"NAMED_CHAT_ROOM", -"UNNAMED_CHAT_ROOM", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false +"PROJECT_STATE_UNSPECIFIED", +"PROJECT_STATE_PENDING", +"PROJECT_STATE_COMPLETED", +"PROJECT_STATE_FAILED", +"PROJECT_STATE_DELETING", +"PROJECT_STATE_DELETING_FAILED", +"PROJECT_STATE_DELETED", +"PROJECT_STATE_NOT_FOUND" ], "enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Deprecated. E-mail only circle membership should be dead. E-mail only circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact. An entity that is owned by a domain and represents a person, but is not a user in the domain. For more details see https://support.google.com/a/answer/9281635.", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Dynamite \"Named Flat Room\" (NFR). This is a Baggins Roster with label DYNAMITE_SPACE *and* label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Dynamite \"Unnamed Flat Room\" (UFR). This is a Baggins Roster with label DYNAMITE_SPACE but does *not* have label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Device contacts that are used in interaction ranking, but have not been uploaded to Google. These are synthesized people from interaction data. This container type is only used in ListRankedPeople and ListRankedTargets actions. See go/TopNWithClusteringPart1.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." +"Default status, required by protobuf best practices.", +"The project is in the middle of a provision process.", +"All dependencies have been provisioned.", +"A provision process was previously initiated, but failed.", +"The project is in the middle of a deletion process.", +"A deleting process was initiated, but failed.", +"The project is deleted.", +"The project is not found." ], "type": "string" -}, -"loggingId": { -"description": "Used to log events for this affinity value, for disco diagnostic-purposes. See go/disco-diagnostics.", -"type": "string" -}, -"value": { -"description": "Affinity value. Frequently represented as an inverse ranking, sometimes with additional data encoded. If data_formats.affinity_formats.score_format is set to RAW_SCORE then the value will be the score returned by DAS.", -"format": "double", -"type": "number" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiAgeRangeType": { -"description": "Please read go/people-api-howto:age on how to get age data. Message for the `Person.age_range_repeated` field. Replaces the existing `Person.age_range` field.", -"id": "AppsPeopleOzExternalMergedpeopleapiAgeRangeType", +"GoogleCloudContentwarehouseV1Property": { +"description": "Property of a document.", +"id": "GoogleCloudContentwarehouseV1Property", "properties": { -"ageInYears": { -"description": "Please read go/people-api-howto:age on how to get age data. Age of the user. The field is set based on profile storage fields such as account birthday. If the source fields are not present, `age_in_years` will be left unset.", -"format": "int32", -"type": "integer" -}, -"ageOfConsentStatus": { -"deprecated": true, -"description": "Deprecated. Use go/supervised-accounts#capabilities-for-child-accounts instead. Denotes whether the user is under the region based Age of Consent. The user's region is based on ClientUserInfo.GlobalTos.AgreedLocation The age is inferred from Birthday field or CertifiedBornBefore field. The region based AoC is specified at go/aoc.", -"enum": [ -"AOC_STATUS_UNKNOWN", -"UNDER_AOC", -"AT_OR_ABOVE_AOC" -], -"enumDescriptions": [ -"We do not have enough information to determine the user's age. e.g. user has no declared age or region is not known.", -"The user is under the Age of Consent of the region the user is in.", -"The user is at or older than the Age of Consent of the region the user is in." -], -"type": "string" +"dateTimeValues": { +"$ref": "GoogleCloudContentwarehouseV1DateTimeArray", +"description": "Date time property values. It is not supported by CMEK compliant deployment." }, -"ageRange": { -"deprecated": true, -"description": "Deprecated. Please read go/people-api-howto:age on how to get age data. Age range is populated based on `account_birthday` and `certified_born_before`, which may not be set for dasher users.", -"enum": [ -"UNKNOWN", -"LESS_THAN_EIGHTEEN", -"TWENTY_ONE_OR_OLDER", -"EIGHTEEN_TO_TWENTY" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" +"enumValues": { +"$ref": "GoogleCloudContentwarehouseV1EnumArray", +"description": "Enum property values." }, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} +"floatValues": { +"$ref": "GoogleCloudContentwarehouseV1FloatArray", +"description": "Float property values." }, -"type": "object" +"integerValues": { +"$ref": "GoogleCloudContentwarehouseV1IntegerArray", +"description": "Integer property values." }, -"AppsPeopleOzExternalMergedpeopleapiAppUniqueInfo": { -"description": "Store all app unique info that are needed for app action fulfillment.", -"id": "AppsPeopleOzExternalMergedpeopleapiAppUniqueInfo", -"properties": { -"appUniqueId": { -"description": "Store the app unique id endpoint. This will be passed over to app to fulfill the action. For example, app_unique_id for Whatsapp will be \"11234567890@s.whatsapp.net\"", -"type": "string" +"mapProperty": { +"$ref": "GoogleCloudContentwarehouseV1MapProperty", +"description": "Map property values." }, -"contactInfoSource": { -"description": "Where this contact info was retrieved from. Note: only added for Assistant usage, and will not be populated by PAPI. This is due to the coupling between Assistant Proto, and PAPI proto. (//depot/google3/quality/qrewrite/servlets/proto/focus_name.proto)", -"enum": [ -"SOURCE_UNKNOWN", -"APP_SEARCH" -], -"enumDescriptions": [ -"", -"The contact info came from AppSearch." -], +"name": { +"description": "Required. Must match the name of a PropertyDefinition in the DocumentSchema.", "type": "string" }, -"displayAppUniqueId": { -"description": "Store third party endpoint that is displayed to users. For example, display_app_unique_id for Whatsapp will be \"Message +11234567890\".", -"type": "string" +"propertyValues": { +"$ref": "GoogleCloudContentwarehouseV1PropertyArray", +"description": "Nested structured data property values." }, -"label": { -"description": "Store third party endpoint label. For example, \"HOME\", \"WORK\"", -"type": "string" +"textValues": { +"$ref": "GoogleCloudContentwarehouseV1TextArray", +"description": "String/text property values." }, -"mimetype": { -"description": "Store mimetype of this endpoint. We will use this as the differentiator for Assistant to know whether to use the RawContact for messaging, call or video call. For example, send message mimetype for whatsapp: \"vnd.android.cursor.item/vnd.com.whatsapp.profile\" voice call mimetype for whatsapp: \"vnd.android.cursor.item/vnd.com.whatsapp.voip.call\"", -"type": "string" +"timestampValues": { +"$ref": "GoogleCloudContentwarehouseV1TimestampArray", +"description": "Timestamp property values. It is not supported by CMEK compliant deployment." } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiBestDisplayName": { -"description": "The best suggested name to use for the Person from the available source fields, which may include FileAs, Name, Org, Email, IM, Phone, ... Rough source container priority order is Contact, then Profile, then Place.", -"id": "AppsPeopleOzExternalMergedpeopleapiBestDisplayName", +"GoogleCloudContentwarehouseV1PropertyArray": { +"description": "Property values.", +"id": "GoogleCloudContentwarehouseV1PropertyArray", "properties": { -"containerType": { -"description": "The container the suggested name was sourced from", -"enum": [ -"UNKNOWN_CONTAINER", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"NAMED_CHAT_ROOM", -"UNNAMED_CHAT_ROOM", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Deprecated. E-mail only circle membership should be dead. E-mail only circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact. An entity that is owned by a domain and represents a person, but is not a user in the domain. For more details see https://support.google.com/a/answer/9281635.", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Dynamite \"Named Flat Room\" (NFR). This is a Baggins Roster with label DYNAMITE_SPACE *and* label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Dynamite \"Unnamed Flat Room\" (UFR). This is a Baggins Roster with label DYNAMITE_SPACE but does *not* have label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Device contacts that are used in interaction ranking, but have not been uploaded to Google. These are synthesized people from interaction data. This container type is only used in ListRankedPeople and ListRankedTargets actions. See go/TopNWithClusteringPart1.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"displayName": { -"description": "The display name. This name is intended to be the best name to display for this Person. It may be built from a variety of fields, even if those fields are not explicitly requested in the request mask. Generally, the display name is formatted in 'first last' format. If the name appears to be a CJK name (as determined by a heuristic), the 'last first' format will be used. There may be other cases that the 'last first' format is used which are not documented here. See the code at: http://google3/java/com/google/focus/backend/client/DisplayNameFormatter.java?l=659&rcl=351360938", -"type": "string" +"properties": { +"description": "List of property values.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1Property" }, -"displayNameLastFirst": { -"description": "The display name, always in 'last first' format. This field does not depend on the format of `display_name` and will always be in 'last first' format.", -"type": "string" +"type": "array" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiBirthday": { -"description": "IMPORTANT NOTES: - Requesting person.birthday requires membership in the purpose limited data ACL group sgbe-ac-d-birthday-(read|mutate). Contact people-api-eng@ for assistance with initial setup. - The birthday field should not be used to calculate the requester's age! To determine the requester's age, use person.age_range_repeated. - For more details about age see go/peopleapi-howto/age Birthday value may not be present: - Consumer users generally required to have account birthday set (required at account creation), though some users created via legacy flows may not have birthday present. - Dasher users generally don't require birthday, but could optionally have it set by users. - Any other types of accounts (e.g. robot, service) do not have birthdays. - Account Birthday field may be present but without birthday value set for grace period birthday (provisional new birthday). For users that do have birthday data: - \"Profile Birthday\" (person.birthday.metadata.container is PROFILE) may not have a year set if user \"hides\" the year. - \"Account Birthday\" (see api-specific notes below) will only be returned for the requester's own profile. - People API (go/peopleapi): * Account birthday is only supported in GetPeople for PeopleAPI. * If account birthday is needed, use a request mask with: `include_field { paths: \"person.birthday\" }` `include_container: ACCOUNT` - People API++ (go/peopleapi++): * Account birthday is supported for most apis in PeopleAPI++. * If account birthday is needed, use a request mask with: `include_field { paths: \"person.account_birthday\" }` `include_container: PROFILE` (note: it will also need `include_container: DOMAIN_PROFILE` because they must be requested together: go/people-api-masks#profile-domain_profile) - See go/papi-vs-papi++#birthday for more details.", -"id": "AppsPeopleOzExternalMergedpeopleapiBirthday", +"GoogleCloudContentwarehouseV1PropertyDefinition": { +"description": "Defines the metadata for a schema property.", +"id": "GoogleCloudContentwarehouseV1PropertyDefinition", "properties": { -"ageDisableGracePeriod": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiBirthdayAgeDisableGracePeriod", -"description": "Only supported for PROFILE/DOMAIN_PROFILE/ACCOUNT container." -}, -"birthdayDecoration": { -"$ref": "SocialGraphApiProtoBirthdayDecoration", -"description": "Whether the user has opted in to display their birthday via photo decorations. Only supported for PROFILE/DOMAIN_PROFILE container." +"dateTimeTypeOptions": { +"$ref": "GoogleCloudContentwarehouseV1DateTimeTypeOptions", +"description": "Date time property. It is not supported by CMEK compliant deployment." }, -"birthdayResolution": { -"description": "Only supported for PROFILE/DOMAIN_PROFILE/ACCOUNT container.", -"enum": [ -"FULL", -"MONTH_AND_APPROXIMATED_YEAR", -"APPROXIMATED_YEAR" -], -"enumDescriptions": [ -"Birthday includes year, month, and day.", -"Birthday includes approximate year and month.", -"Birthday includes an approximate year." -], +"displayName": { +"description": "The display-name for the property, used for front-end.", "type": "string" }, -"calendarDay": { -"$ref": "GoogleTypeDate", -"description": "Birthdays are more accurately represented as a calendar day that does not depend on a timestamp representation at all. When given a timestamp, there are lots of opportunities to make mistakes, so a CalendarDay proto is replacing timestamps. Currently this is always returned by PeopleApi on reads that include birthday fields. New clients should write using calendar_day. Clients that were already writing via date_ms are allowlisted such that writes use that field. Old callers should migrate to writing BOTH date_ms and calendar_day values. If those are consistent, they may be removed from the 'legacy_timestamp_event_write_behavior_enabled' capability." +"enumTypeOptions": { +"$ref": "GoogleCloudContentwarehouseV1EnumTypeOptions", +"description": "Enum/categorical property." }, -"dateMs": { -"deprecated": true, -"description": "Birthdays are currently represented as timestamp values, although the interpretation of these timestamp values is a calendar date. Clients are recommended to read the calendar_day field, which is easier to work with than date_ms. New clients writing to PeopleApi must set calendar_day instead of date_ms. There are a few important details about how this value should be mapped to a calendar date that should be consistent among all clients. 1. Epoch - The epoch or calendar date equivalent to 0 ms is chosen to be 1970-01-01 UTC. 2. Timezone - All of the conversions to calendars should occur in the UTC timezone. We don't typically think of someones birthday changing when they travel, so clients should not use local times. 3. Calendar - The calendar used for the dates should be a Gregorian proleptic calendar. Proleptic means that the rules of the Gregorian calendar are retrofitted to before its adoption. It is easy to get this wrong, particularly with the java GregorianCalendar class, which by default is a mixed Gregorian/Julian calendar. Joda Time makes this easy, but if it's not an option, look into GregorianCalendar.setGregorianChange(). 4. Omitted years - Clients have chosen to represent birthdays or events without years as timestamps within the year zero. When the computed date has a year of 0, it means the client did not specify a year. Note that a year 0 does not exist in a chronology like the familiar Anno Domini (A.D. and B.C.); clients must agree on year numbering. 5. Year Numbering - The chronology used to map dates to the calendar should use Astronomical Year Numbering so that the year 0 is defined and dates before it have a negative year. If libraries only provide Anno Domini, then the year of 1 BC corresponds to year zero and an omitted user provided year. Other BC values are presumed rare, but clients should still not ignore the era and interpret the year as an A.D. value, especially if writing values back to PeopleApi.", -"format": "int64", -"type": "string" +"floatTypeOptions": { +"$ref": "GoogleCloudContentwarehouseV1FloatTypeOptions", +"description": "Float property." }, -"dateMsAsNumber": { -"deprecated": true, -"description": "date_ms_as_number contains the same data as date_ms, but has a different type in generated javascript bindings. Non javascript clients can ignore it.", -"format": "int64", -"type": "string" +"integerTypeOptions": { +"$ref": "GoogleCloudContentwarehouseV1IntegerTypeOptions", +"description": "Integer property." }, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" +"isFilterable": { +"description": "Whether the property can be filtered. If this is a sub-property, all the parent properties must be marked filterable.", +"type": "boolean" }, -"prompt": { -"$ref": "SocialGraphApiProtoPrompt", -"description": "People Prompts settings for contact birthday data. Only supported for CONTACT container." +"isMetadata": { +"description": "Whether the property is user supplied metadata. This out-of-the box placeholder setting can be used to tag derived properties. Its value and interpretation logic should be implemented by API user.", +"type": "boolean" }, -"value": { -"description": "Actual value entered. Allows unstructured values.", -"type": "string" -} +"isRepeatable": { +"description": "Whether the property can have multiple values.", +"type": "boolean" }, -"type": "object" +"isRequired": { +"description": "Whether the property is mandatory. Default is 'false', i.e. populating property value can be skipped. If 'true' then user must populate the value for this property.", +"type": "boolean" }, -"AppsPeopleOzExternalMergedpeopleapiBirthdayAgeDisableGracePeriod": { -"description": "Whether this field is set or not determines whether an account is in the grace period. While in the grace period, the user is unable to change their birthday on their own, and will be age-disabled if they don't act in a limited amount of time. Applies only to ServiceData Birthday. Users enter the grace period if they choose a birthday below the Age of Consent (go/aoc). After the grace period ends, the account will be age disabled. See go/age-disable-grace-period-dd.", -"id": "AppsPeopleOzExternalMergedpeopleapiBirthdayAgeDisableGracePeriod", -"properties": { -"calendarDay": { -"$ref": "GoogleTypeDate", -"description": "Provisional birthday `, `>=`, and `~~` where the left of the operator is a property name and the right of the operator is a number or a quoted string. You must escape backslash (\\\\) and quote (\\\") characters. `~~` is the LIKE operator. The right of the operator must be a string. The only supported property data type for LIKE is text_values. It provides semantic search functionality by parsing, stemming and doing synonyms expansion against the input query. It matches if the property contains semantic similar content to the query. It is not regex matching or wildcard matching. For example, \"property.company ~~ \\\"google\\\"\" will match records whose property `property.compnay` have values like \"Google Inc.\", \"Google LLC\" or \"Google Company\". Supported functions are `LOWER([property_name])` to perform a case insensitive match and `EMPTY([property_name])` to filter on the existence of a key. Boolean expressions (AND/OR/NOT) are supported up to 3 levels of nesting (for example, \"((A AND B AND C) OR NOT D) AND E\"), a maximum of 100 comparisons or functions are allowed in the expression. The expression must be < 6000 bytes in length. Only properties that are marked filterable are allowed (PropertyDefinition.is_filterable). Property names do not need to be prefixed by the document schema id (as is the case with histograms), however property names will need to be prefixed by its parent hierarchy, if any. For example: top_property_name.sub_property_name. Sample Query: `(LOWER(driving_license)=\"class \\\"a\\\"\" OR EMPTY(driving_license)) AND driving_years > 10` CMEK compliant deployment only supports: * Operators: `=`, `<`, `<=`, `>`, and `>=`. * Boolean expressions: AND and OR.", "type": "string" }, -"url": { +"documentSchemaName": { +"description": "The Document schema name Document.document_schema_name. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", "type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiCallerIdExtendedData": { -"id": "AppsPeopleOzExternalMergedpeopleapiCallerIdExtendedData", +"GoogleCloudContentwarehouseV1PropertyTypeOptions": { +"description": "Configurations for a nested structured data property.", +"id": "GoogleCloudContentwarehouseV1PropertyTypeOptions", "properties": { -"callerIdSource": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCallerIdExtendedDataCallerIdSource", -"description": "Indicates which data source was used to populate the caller ID result" +"propertyDefinitions": { +"description": "Required. List of property definitions.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1PropertyDefinition" +}, +"type": "array" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiCallerIdExtendedDataCallerIdSource": { -"id": "AppsPeopleOzExternalMergedpeopleapiCallerIdExtendedDataCallerIdSource", +"GoogleCloudContentwarehouseV1PublishAction": { +"description": "Represents the action responsible for publishing messages to a Pub/Sub topic.", +"id": "GoogleCloudContentwarehouseV1PublishAction", "properties": { -"sourceType": { -"enum": [ -"UNKNOWN_SOURCE_TYPE", -"PLACE", -"SCOOBY_MANUAL", -"SCOOBY_GOOGLE_VOICE", -"SCOOBY_CSA", -"SCOOBY_KNOWLEDGE_GRAPH" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], +"messages": { +"description": "Messages to be published.", +"items": { "type": "string" -} }, -"type": "object" +"type": "array" }, -"AppsPeopleOzExternalMergedpeopleapiCertifiedBornBefore": { -"description": "Information related to domain administrator (or authority) certification of a users age.", -"id": "AppsPeopleOzExternalMergedpeopleapiCertifiedBornBefore", -"properties": { -"bornBefore": { -"description": "Indicates that the user was born at or before this time.", -"format": "google-datetime", +"topicId": { +"description": "The topic id in the Pub/Sub service for which messages will be published to.", "type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiChannelData": { -"id": "AppsPeopleOzExternalMergedpeopleapiChannelData", +"GoogleCloudContentwarehouseV1QAResult": { +"description": "Additional result info for the question-answering feature.", +"id": "GoogleCloudContentwarehouseV1QAResult", "properties": { -"channelId": { -"description": "Unique ID that corresponds to a Youtube channel.", -"type": "string" -}, -"commentCount": { -"description": "Number of comments for a given Youtube channel.", -"format": "int64", -"type": "string" -}, -"description": { -"description": "Description of the channel.", -"type": "string" -}, -"playlistCount": { -"format": "int64", -"type": "string" -}, -"profilePictureUrl": { -"description": "A FIFE URL pointing to the channel's profile image (go/avatar-fife-urls) with default fife url options. Also refer to go/people-api-concepts:photos for People API's FIFE best practices. The image could be up to a couple of days stale, though it is much fresher in practice. If a fresh image is required, contact the YouTubeAccountProfileService. The URL itself expires ~30 days after generation.", -"type": "string" -}, -"profileUrl": { -"description": "URL of user's Youtube channel profile.", -"type": "string" -}, -"subscriberCount": { -"description": "Number of subscribers for a given Youtube channel.", -"format": "int64", -"type": "string" -}, -"title": { -"description": "Title of the YouTube channel", -"type": "string" +"confidenceScore": { +"description": "The calibrated confidence score for this document, in the range [0., 1.]. This represents the confidence level for whether the returned document and snippet answers the user's query.", +"format": "float", +"type": "number" }, -"usesYoutubeNames": { -"description": "Whether or not the channel's profile has a title/avatar that is canonical in YouTube. Used to determine if the product profile card should be part of the core persona or have their own persona.", -"type": "boolean" +"highlights": { +"description": "Highlighted sections in the snippet.", +"items": { +"$ref": "GoogleCloudContentwarehouseV1QAResultHighlight" }, -"videoCount": { -"description": "Number of videos uploaded in a given Youtube channel.", -"format": "int64", -"type": "string" +"type": "array" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiCircleMembership": { -"deprecated": true, -"description": "DEPRECATED. No data is returned for this field anymore. See b/329513077.", -"id": "AppsPeopleOzExternalMergedpeopleapiCircleMembership", +"GoogleCloudContentwarehouseV1QAResultHighlight": { +"description": "A text span in the search text snippet that represents a highlighted section (answer context, highly relevant sentence, etc.).", +"id": "GoogleCloudContentwarehouseV1QAResultHighlight", "properties": { -"circleId": { -"description": "The circle that the person belongs to.", -"type": "string" +"endIndex": { +"description": "End index of the highlight, exclusive.", +"format": "int32", +"type": "integer" }, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" +"startIndex": { +"description": "Start index of the highlight.", +"format": "int32", +"type": "integer" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiClientData": { -"description": "Arbitrary client data that is populated based on the client", -"id": "AppsPeopleOzExternalMergedpeopleapiClientData", +"GoogleCloudContentwarehouseV1RemoveFromFolderAction": { +"description": "Represents the action responsible for remove a document from a specific folder.", +"id": "GoogleCloudContentwarehouseV1RemoveFromFolderAction", "properties": { -"key": { -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"namespace": { +"condition": { +"description": "Condition of the action to be executed.", "type": "string" }, -"value": { +"folder": { +"description": "Name of the folder under which new document is to be added. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", "type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiCommunicationEmail": { -"description": "Email for Google product communication with the user. This is only allowed in ServiceData. It is purely synthesized and read-only, and contains at most one field. It proxies from RawCommunicationEmail and only includes the primary field if exists. Otherwise if RawCommunicationEmail does not have primary, this includes a field synthesized from valid Gaia primary account email. Otherwise if Gaia primary account email is invalid, this field is empty. See go/comm-email-use for more details.", -"id": "AppsPeopleOzExternalMergedpeopleapiCommunicationEmail", +"GoogleCloudContentwarehouseV1RequestMetadata": { +"description": "Meta information is used to improve the performance of the service.", +"id": "GoogleCloudContentwarehouseV1RequestMetadata", "properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" +"userInfo": { +"$ref": "GoogleCloudContentwarehouseV1UserInfo", +"description": "Provides user unique identification and groups information." +} }, -"value": { +"type": "object" +}, +"GoogleCloudContentwarehouseV1ResponseMetadata": { +"description": "Additional information returned to client, such as debugging information.", +"id": "GoogleCloudContentwarehouseV1ResponseMetadata", +"properties": { +"requestId": { +"description": "A unique id associated with this call. This id is logged for tracking purpose.", "type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiConnectionReminder": { -"description": "Contact-level people-prompts settings and contact-level connection reminders. Part of go/people-prompts.", -"id": "AppsPeopleOzExternalMergedpeopleapiConnectionReminder", +"GoogleCloudContentwarehouseV1Rule": { +"description": "Represents the rule for a content warehouse trigger.", +"id": "GoogleCloudContentwarehouseV1Rule", "properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"prompt": { -"description": "Contact-level \"reminder to connect\" prompts for this contact.", +"actions": { +"description": "List of actions that are executed when the rule is satisfied.", "items": { -"$ref": "SocialGraphApiProtoPrompt" +"$ref": "GoogleCloudContentwarehouseV1Action" }, "type": "array" -} }, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiContactCreateContextInfo": { -"description": "Contact creation timestamps and related metadata. See go/contact-edit-history. This message is a pure wrapper of the shared ContactCreactionContext message so that it can be a top-level person field. No other fields should be added to the message.", -"id": "AppsPeopleOzExternalMergedpeopleapiContactCreateContextInfo", -"properties": { -"contactCreateContext": { -"$ref": "SocialGraphApiProtoContactCreateContext" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiContactEditContextInfo": { -"description": "Contact edit timestamps and related metadata. See go/contact-edit-history. This message is a pure wrapper of the shared ContactCreactionContext message so that it can be a top-level person field. No other fields should be added to the message.", -"id": "AppsPeopleOzExternalMergedpeopleapiContactEditContextInfo", -"properties": { -"contactEditContext": { -"$ref": "SocialGraphApiProtoContactEditContext" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiContactGroupMembership": { -"description": "A membership that the person has. The person can be a member of multiple circles and multiple contact-groups. A circle membership is created by adding a person to a circle by person-id or by email. A contact-group membership is created by adding a contact to a contact-group.", -"id": "AppsPeopleOzExternalMergedpeopleapiContactGroupMembership", -"properties": { -"contactGroupId": { -"description": "The contact-group that the person belong to. The id can be either a hex-formatted id or a camel-cased SystemContactGroup predefined group name. The id will be predefined group name iff the system_contact_group_id has a value.", -"type": "string" -}, -"delegatedGroupInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiDelegatedGroupInfo", -"description": "Information related to delegated group that this contact belongs to." -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"systemContactGroupId": { -"description": "This field will be populated when the membership is in a system-reserved contact-group.", -"enum": [ -"UNKNOWN", -"MY_CONTACTS", -"STARRED", -"FRIENDS", -"FAMILY", -"COWORKERS" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiContactPromptSettingsInfo": { -"description": "Contact level People Prompt settings. This message is a pure wrapper of the shared ContactPromptSettings message so that it can be a top-level person field. No other fields should be added to the message.", -"id": "AppsPeopleOzExternalMergedpeopleapiContactPromptSettingsInfo", -"properties": { -"contactPromptSettings": { -"$ref": "SocialGraphApiProtoContactPromptSettings" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiContactStateInfo": { -"description": "Contact state and related metadata. See go/fbs-contacts-trash. This message is a pure wrapper of the shared ContactState message so that it can be a top-level person field. No other fields should be added to the message.", -"id": "AppsPeopleOzExternalMergedpeopleapiContactStateInfo", -"properties": { -"contactState": { -"$ref": "SocialGraphApiProtoContactState" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiCoverPhoto": { -"description": "CoverPhoto is the long banner photo (also called full bleed photo) at the top of G+ profile page.", -"id": "AppsPeopleOzExternalMergedpeopleapiCoverPhoto", -"properties": { -"imageHeight": { -"format": "int32", -"type": "integer" -}, -"imageId": { -"type": "string" -}, -"imageUrl": { -"type": "string" -}, -"imageWidth": { -"format": "int32", -"type": "integer" -}, -"isAnimated": { -"type": "boolean" -}, -"isDefault": { -"type": "boolean" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiCustomSchemaField": { -"description": "Custom fields associated with a person, from the custom schema defined on the domain. See go/custompeopleapi and go/customfocus. NOTE: these are only updatable via Cloud Directory (go/cd).", -"id": "AppsPeopleOzExternalMergedpeopleapiCustomSchemaField", -"properties": { -"fieldDisplayName": { -"type": "string" -}, -"fieldId": { -"type": "string" -}, -"fieldType": { -"enum": [ -"CUSTOM_FIELD_TYPE_UNKNOWN", -"STRING", -"INT64", -"BOOL", -"DOUBLE", -"EMAIL", -"PHONE", -"DATE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"multiValued": { -"type": "boolean" -}, -"schemaDisplayName": { -"type": "string" -}, -"schemaId": { -"type": "string" -}, -"type": { -"description": "The type of the custom schema field. The type can be free form or one of these predefined values: * `home` * `other` * `work`", -"type": "string" -}, -"value": { -"description": "String representation of the value, based on FieldType", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiCustomerInfo": { -"description": "Contains customer data for profile owner proxied from D3.", -"id": "AppsPeopleOzExternalMergedpeopleapiCustomerInfo", -"properties": { -"customerId": { -"deprecated": true, -"description": "DEPRECATED. Use obfuscated_customer_id instead. If result has a GSuite Customer ID, this field will continue to be populated with -1 to indicate the presence of a value for backwards compatibility with clients in the wild. See b/144596193.", -"format": "int64", -"type": "string" -}, -"customerName": { -"description": "Customer organization name for dasher user.", -"type": "string" -}, -"obfuscatedCustomerId": { -"description": "Obfuscated FlexOrgs customer ID for Dasher user. See cs/symbol:CustomerIdObfuscator.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiDedupedContainerInfo": { -"description": "Container information for deduping. When two fields have the same value and only differ by field.metadata a service implementation can choose to avoid duplicating the fields and instead set field.metadata.other_deduped_containers This type can include information on the dedupe type (for example, strict value match vs. lenient value match)", -"id": "AppsPeopleOzExternalMergedpeopleapiDedupedContainerInfo", -"properties": { -"containerType": { -"description": "See SourceIdentity.container_type", -"enum": [ -"UNKNOWN_CONTAINER", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"NAMED_CHAT_ROOM", -"UNNAMED_CHAT_ROOM", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Deprecated. E-mail only circle membership should be dead. E-mail only circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact. An entity that is owned by a domain and represents a person, but is not a user in the domain. For more details see https://support.google.com/a/answer/9281635.", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Dynamite \"Named Flat Room\" (NFR). This is a Baggins Roster with label DYNAMITE_SPACE *and* label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Dynamite \"Unnamed Flat Room\" (UFR). This is a Baggins Roster with label DYNAMITE_SPACE but does *not* have label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Device contacts that are used in interaction ranking, but have not been uploaded to Google. These are synthesized people from interaction data. This container type is only used in ListRankedPeople and ListRankedTargets actions. See go/TopNWithClusteringPart1.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"id": { -"description": "See SourceIdentity.id", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiDelegatedGroupInfo": { -"description": "Information related to delegated group that this contact belongs to.", -"id": "AppsPeopleOzExternalMergedpeopleapiDelegatedGroupInfo", -"properties": { -"delegatedGroupId": { -"$ref": "SocialGraphApiProtoDelegatedGroupId", -"description": "Required. The additional id specifically for a delegated group." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiDeviceContactExtraMetadata": { -"description": "Extra metadata for an aggregated or raw device contact.", -"id": "AppsPeopleOzExternalMergedpeopleapiDeviceContactExtraMetadata", -"properties": { -"attributes": { -"description": "Attributes for this device contact.", -"items": { -"enum": [ -"ATTRIBUTE_UNKNOWN", -"STARRED" -], -"enumDescriptions": [ -"", -"This contact is starred." -], -"type": "string" -}, -"type": "array" -}, -"usageInfo": { -"description": "Usage info for this device contact.", -"items": { -"$ref": "SocialGraphApiProtoUsageInfo" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiDeviceContactId": { -"description": "Unique id for an aggregated device contact.", -"id": "AppsPeopleOzExternalMergedpeopleapiDeviceContactId", -"properties": { -"contactId": { -"description": "Aggregated device contact id on the source device.", -"format": "int64", -"type": "string" -}, -"deviceId": { -"description": "Source device id (go/client-instance-id) of this device contact.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiDeviceContactInfo": { -"description": "Device contact information.", -"id": "AppsPeopleOzExternalMergedpeopleapiDeviceContactInfo", -"properties": { -"deviceContactMetadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiDeviceContactExtraMetadata", -"description": "Metadata for this device contact." -}, -"hasCrossDeviceData": { -"description": "Output only. True if any of the contact's phone, email or address fields can be used on devices other than the one it originated from. Note that there can be other fields, typically name, and metadata such as some of the raw_contact_infos that can be used on other devices. Assigned by the server.", -"type": "boolean" -}, -"id": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiDeviceContactId", -"description": "Id of the device contact." -}, -"lastClientUpdateTime": { -"description": "Last time a device contact was updated on device.", -"format": "google-datetime", -"type": "string" -}, -"lookupKey": { -"description": "An opaque value used by the device to look up this contact if its row id changed as a result of a sync or aggregation. See: https://developer.android.com/reference/android/provider/ContactsContract.ContactsColumns.html#LOOKUP_KEY", -"type": "string" -}, -"rawContactInfo": { -"description": "Info about the raw device contacts that make up this device contact.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRawDeviceContactInfo" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEdgeKeyInfo": { -"id": "AppsPeopleOzExternalMergedpeopleapiEdgeKeyInfo", -"properties": { -"containerId": { -"description": "The container ID of the entity this field creates a join to. See `SourceIdentity.id`.", -"type": "string" -}, -"containerType": { -"description": "The type of container that this edge points to. See `SourceIdentity.container_type`.", -"enum": [ -"UNKNOWN_CONTAINER", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"NAMED_CHAT_ROOM", -"UNNAMED_CHAT_ROOM", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Deprecated. E-mail only circle membership should be dead. E-mail only circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact. An entity that is owned by a domain and represents a person, but is not a user in the domain. For more details see https://support.google.com/a/answer/9281635.", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Dynamite \"Named Flat Room\" (NFR). This is a Baggins Roster with label DYNAMITE_SPACE *and* label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Dynamite \"Unnamed Flat Room\" (UFR). This is a Baggins Roster with label DYNAMITE_SPACE but does *not* have label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Device contacts that are used in interaction ranking, but have not been uploaded to Google. These are synthesized people from interaction data. This container type is only used in ListRankedPeople and ListRankedTargets actions. See go/TopNWithClusteringPart1.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"extendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEdgeKeyInfoExtensionData", -"description": "Data that is added to the proto by peopleapi read extensions." -}, -"materialized": { -"description": "True indicates this edge links this source to a container represented by this person object. Note: Except for certain legacy clients, EdgeKeyInfo is only created for for edges to an entity in this person and this will always be true.", -"type": "boolean" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEdgeKeyInfoExtensionData": { -"id": "AppsPeopleOzExternalMergedpeopleapiEdgeKeyInfoExtensionData", -"properties": { -"gdataCompatibilityExtensionId": { -"description": "The GDataCompatibilityExtension will (temporarily) return mobile_owner_id for profile containers.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEmail": { -"id": "AppsPeopleOzExternalMergedpeopleapiEmail", -"properties": { -"certificate": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmailCertificate" -}, -"type": "array" -}, -"classification": { -"enum": [ -"EMAIL_CLASSIFICATION_UNKNOWN", -"SIGNUP_EMAIL" -], -"enumDescriptions": [ -"", -"This is the email user provided when signing up with Google (ie. this email is stored in Gaia). There may be more than 1 signup email. This is set for an Email of container type PROFILE." -], -"type": "string" -}, -"contactGroupPreference": { -"description": "To read or update, use the CONTACT_GROUP_PREFERENCE mask field.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmailContactGroupPreference" -}, -"type": "array" -}, -"displayName": { -"type": "string" -}, -"extendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmailExtendedData" -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"signupEmailMetadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmailSignupEmailMetadata" -}, -"type": { -"description": "The type of the email address. The type can be free form or one of these predefined values: * `home` * `work` * `other`", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEmailCertificate": { -"description": "Represents a S/MIME certificate config for use with Gmail. See //caribou/smime/proto/certificate_status.proto. There can be zero or more certificates associated with an email address, be it profile email or contact email.", -"id": "AppsPeopleOzExternalMergedpeopleapiEmailCertificate", -"properties": { -"configurationName": { -"description": "The name of this certificate configuration. Examples could be \"High security level\" or \"For domain emails only\".", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata", -"description": "It is conceivable that certificates could be ACLed. We also need to indicate which certificate is the default. The PersonFieldMetadata can accomplish both of these." -}, -"status": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmailCertificateCertificateStatus" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEmailCertificateCertificateStatus": { -"description": "Minimal S/MIME certificate status i.e. two fields per email address.", -"id": "AppsPeopleOzExternalMergedpeopleapiEmailCertificateCertificateStatus", -"properties": { -"notAfterSec": { -"description": "The certificate expiration timestamp in seconds.", -"format": "int64", +"condition": { +"description": "Represents the conditional expression to be evaluated. Expression should evaluate to a boolean result. When the condition is true actions are executed. Example: user_role = \"hsbc_role_1\" AND doc.salary > 20000", "type": "string" }, -"statusCode": { -"description": "Current status of the email's certificate chain.", -"enum": [ -"UNKNOWN", -"CERTIFICATE_VALID", -"CERTIFICATE_MISSING", -"CERTIFICATE_EXPIRED", -"CERTIFICATE_REVOKED" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], +"description": { +"description": "Short description of the rule and its context.", "type": "string" -} -}, -"type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiEmailContactGroupPreference": { -"description": "Preferred email addresses for contact groups.", -"id": "AppsPeopleOzExternalMergedpeopleapiEmailContactGroupPreference", -"properties": { -"contactGroupId": { +"ruleId": { +"description": "ID of the rule. It has to be unique across all the examples. This is managed internally.", "type": "string" }, -"isSynthetic": { -"description": "If the Preference was implicitly set by PeopleApi/Contacts Service. A preference with this bit will not be saved to storage. See go/contact-group-email-preference-papi-problem for more info.", -"type": "boolean" -}, -"type": { +"triggerType": { +"description": "Identifies the trigger type for running the policy.", "enum": [ "UNKNOWN", -"GMAIL" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEmailExtendedData": { -"description": "Extension data for a person email.", -"id": "AppsPeopleOzExternalMergedpeopleapiEmailExtendedData", -"properties": { -"internalExternal": { -"$ref": "PeoplestackFlexorgsProtoInternalExternal", -"description": "For use with the CUSTOMER_INFO_ADDITIONAL_DATA extension. This includes information on whether the given email is internal to or external to the requesting user's domain." -}, -"isPlaceholder": { -"description": "For ListPeoplebyKnownId to indicate an email is sythesized from a lookup email.", -"type": "boolean" -}, -"smtpServerSupportsTls": { -"description": "For use with the TLS extension. Whether the SMTP server that handles delivery for this email address supports TLS encryption.", -"type": "boolean" -}, -"usesConfusingCharacters": { -"description": "For use with the Gmail Homograph Warning extension. Whether the email contains mixed character sets that could be used to decieve users. This field is populated by the GMAIL_SECURITY_DATA extension.", -"type": "boolean" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEmailSignupEmailMetadata": { -"description": "Additional metadata for a signup email. This will only be set if the email's classification is SIGNUP_EMAIL.", -"id": "AppsPeopleOzExternalMergedpeopleapiEmailSignupEmailMetadata", -"properties": { -"primary": { -"description": "This is considered to be the primary signup email. At most 1 signup email will have this set.", -"type": "boolean" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiEmergencyInfo": { -"description": "Emergency info for Person. See go/emergency-trusted-contacts-papi.", -"id": "AppsPeopleOzExternalMergedpeopleapiEmergencyInfo", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"pomeroyId": { -"description": "Opaque id from Pomeroy (go/pomeroy). Non-empty pomeroy_id means that this contact has the potential to become trusted contact or it's already trusted contact. Trust is eventually gaia<->gaia link, but when the trust link is initiated gaia might not be known. Until gaia is discovered, pomeroy_id is used to identify the contact uniquely. If trust_level is missing or set to TRUST_LEVEL_UNSPECIFIED pomeroy_id must be empty.", -"type": "string" -}, -"trustLevel": { -"enum": [ -"TRUST_LEVEL_UNSPECIFIED", -"TRUST_LEVEL_EMERGENCY_CONTACT" +"ON_CREATE", +"ON_UPDATE", +"ON_CREATE_LINK", +"ON_DELETE_LINK" ], "enumDescriptions": [ -"The person does not have a specified trust level.", -"The person is an Emergency Contact." +"Trigger for unknown action.", +"Trigger for create document action.", +"Trigger for update document action.", +"Trigger for create link action.", +"Trigger for delete link action." ], "type": "string" } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiEvent": { -"id": "AppsPeopleOzExternalMergedpeopleapiEvent", -"properties": { -"calendarDay": { -"$ref": "GoogleTypeDate", -"description": "Event are more accurately represented as a calendar day that does not depend on a timestamp representation at all. When given a timestamp, there are lots of opportunities to make mistakes, so a CalendarDay proto is replacing timestamps. PeopleApi will return these values on reads, and unless the client is a legacy caller in the legacy_timestamp_event_write_behavior_enabled capability allowlist, this value is what is used for Person writes." -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"prompt": { -"$ref": "SocialGraphApiProtoPrompt", -"description": "People Prompts settings for contact event data." -}, -"timestampMillis": { -"deprecated": true, -"description": "Clients are recommended to read the calendar_day field instead of timestamp_millis. When writing events, new clients must set calendar_day instead of timestamp_millis. Events are currently represented as timestamp values, although the interpretation of these timestamp values is a calendar date. There are a few important details about how this value should be mapped to a calendar date that should be consistent among all clients. For detailed information, see Birthday.date_ms.", -"format": "int64", -"type": "string" -}, -"type": { -"description": "The type of the event. The type can be free form or one of these predefined values: * `anniversary` * `other`", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiExternalId": { -"description": "External identifier associated with the person.", -"id": "AppsPeopleOzExternalMergedpeopleapiExternalId", -"properties": { -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"description": "The type of the external ID. The type can be free form or one of these predefined values: * `account` * `customer` * `loginId` * `network` * `organization`", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAcl": { -"description": "The field ACL. Currently only populated on profile fields for the profile owner. A Person field ACL; see http://go/peopleapi-acl", -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAcl", +"GoogleCloudContentwarehouseV1RuleActionsPair": { +"description": "Represents a rule and outputs of associated actions.", +"id": "GoogleCloudContentwarehouseV1RuleActionsPair", "properties": { -"aclEntry": { -"description": "A custom type of field ACL entry. The set of all ACL entries includes those listed in acl_entry as well as predefined_acl_entry.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntry" -}, -"type": "array" -}, -"authorizedViewers": { -"description": "Set of users that will be authorized to view the field by this field ACL. If the ACL is public, this will only contain ALL_USERS. This field is synthesized, read-only, and currently only used for profile photos. It's populated under \"person.photo.metadata.field_acl\" for the current photo ACL and \"person.photo.metadata.acl_choices\" for available photo ACL choices. Note: The set of authorized viewers for a given FieldAcl may depend on the user's account type and domain configuration. For example, a PRIVATE_READ FieldAcl could have any of the following authorized viewers: Consumer user: [IDENTITY_ACL_ESTABLISHED] Dasher user without domain contact sharing: [IDENTITY_ACL_ESTABLISHED] Unicorn user: [SAME_UNICORN_FAMILY] Hafez user: []", -"items": { -"enum": [ -"AUTHORIZED_VIEWER_UNSPECIFIED", -"IDENTITY_ACL_ESTABLISHED", -"SAME_ORGANIZATION", -"SAME_UNICORN_FAMILY", -"ALL_USERS" -], -"enumDescriptions": [ -"", -"Users with an identity ACL established with the field owner through user-to-user interactions.", -"Users in the same enterprise organization as the field owner.", -"Users in the same Unicorn family as the field owner.", -"All users, including anonymous viewers." -], -"type": "string" -}, -"type": "array" -}, -"predefinedAclEntry": { -"description": "A common type of field ACL entry. A predefined ACL entry is a shortcut for a commonly occurring case of role and scope. For example, PUBLIC_READ is the same as an AclEntry with role = READER and scope.all_users = true. The set of all ACL entries includes those listed in acl_entry as well as predefined_acl_entry.", +"actionOutputs": { +"description": "Outputs of executing the actions associated with the above rule.", "items": { -"enum": [ -"UNKNOWN", -"OWNER", -"PUBLIC_READ", -"DOMAIN_READ", -"YOUR_CIRCLES_READ", -"EXTENDED_CIRCLES_READ", -"PRIVATE_READ" -], -"enumDescriptions": [ -"", -"Equivalent to role = OWNER and scope.person.person_id = requester's person ID.", -"Equivalent to role = READER and scope.all_users = true.", -"Equivalent to role = READER and scope.domain_users = true.", -"Equivalent to role = READER and scope.membership.circle.circle_set = YOUR_CIRCLES.", -"Equivalent to role = READER and scope.membership.circle.circle_set = EXTENDED_CIRCLES.", -"This is under implementation and not populated in PeopleAPI yet. Equivalent to role = READER and scope.person.person_id = requester's person ID. Presence of this entry means the field is private and is not visible to others." -], -"type": "string" +"$ref": "GoogleCloudContentwarehouseV1ActionOutput" }, "type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntry": { -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntry", -"properties": { -"role": { -"enum": [ -"UNKNOWN", -"READER", -"WRITER", -"OWNER" -], -"enumDescriptions": [ -"", -"", -"", -"owner can change ACL" -], -"type": "string" -}, -"scope": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScope" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScope": { -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScope", -"properties": { -"allUsers": { -"description": "Indicates that the field is accessible to all users including unauthenticated users. For some fields this means \"to everyone except blocked users\".", -"type": "boolean" -}, -"domainUsers": { -"description": "This is a \"synthetic\" field. In reality domains are treated as gaia- groups. This field will be 'true' when the field is ACLed to the gaia-group of the requester's domain.", -"type": "boolean" -}, -"membership": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAcl" -}, -"person": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopePersonAcl", -"description": "Indicates that the field is accessible to a person." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAcl": { -"description": "Used when the field is accessible to a membership that the person has.", -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAcl", -"properties": { -"circle": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAclCircleAcl" -}, -"contactGroup": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAclContactGroupAcl" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAclCircleAcl": { -"description": "Used when a field is accessible to a circle.", -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAclCircleAcl", -"properties": { -"circleId": { -"type": "string" -}, -"circleSet": { -"enum": [ -"UNKNOWN", -"YOUR_CIRCLES", -"EXTENDED_CIRCLES" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"displayName": { -"description": "Equivalent to Circle.display_name for the circle_id. Included when FieldAclOption.FULL_ACL_WITH_DETAILS is requested. This field is read-only and ignored on update.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAclContactGroupAcl": { -"description": "Used when a field is accessible to a legacy contact group. Contact groups are discouraged and may be deprecated soon. ContactGroupAcls are read-only. If they are included as part of an ACL on an Update, an exception is thrown.", -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopeMembershipAclContactGroupAcl", -"properties": { -"contactGroupId": { -"description": "A contact group ID. This is either a user-defined contact group hex ID, or it is the string name of the enum constant in Group.PredefinedId in FBS backend.proto for predefined groups. Common values for the predefined name include, but are not limited to: all, myContacts, starred, chatBuddies, friends, family, coworkers, and blocked.", -"type": "string" -}, -"displayName": { -"description": "The localized display name for the predefined group, if known; or, the display name for the user-defined contact group. Included when FieldAclOption.FULL_ACL_WITH_DETAILS is requested.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopePersonAcl": { -"description": "Used when a field is accessible to a person. NOTE: ACLs to a circle or to a non-self person are no longer supported, so this can only be applied to the requester self.", -"id": "AppsPeopleOzExternalMergedpeopleapiFieldAclAclEntryScopePersonAcl", -"properties": { -"displayName": { -"deprecated": true, -"description": "DEPRECATED. This is not different than reading from person.name for a self-read; ACLs to a circle or to a non-self person are no longer supported. Equivalent to Name.display_name for the person_id profile. Included when the profile Name is ACLed to the requester and FieldAclOption.FULL_ACL_WITH_DETAILS is requested. This field is read-only and ignored on update.", -"type": "string" -}, -"personId": { -"type": "string" }, -"photoUrl": { -"deprecated": true, -"description": "DEPRECATED. This is not different than reading from person.photo for a self-read; ACLs to a circle or to a non-self person are no longer supported. Equivalent to Photo.url for the person_id profile. Included when the profile Photo is ACLed to the requester and FieldAclOption.FULL_ACL_WITH_DETAILS is requested. This field is read-only and ignored on update.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFieldEmergencyInfo": { -"description": "Emergency information for Person field, such as Phone or Email. See go/emergency-trusted-contacts-papi.", -"id": "AppsPeopleOzExternalMergedpeopleapiFieldEmergencyInfo", -"properties": { -"emergencyLevel": { -"enum": [ -"EMERGENCY_LEVEL_UNSPECIFIED", -"EMERGENCY_LEVEL_PRIMARY" -], -"enumDescriptions": [ -"The field (such as phone) is not designated for emergency communication.", -"The field (such as phone) is designated for emergency communication." -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiFileAs": { -"description": "The FileAs field in Contacts is used to override the DisplayName of a Contact for that User. ", -"id": "AppsPeopleOzExternalMergedpeopleapiFileAs", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" +"rule": { +"$ref": "GoogleCloudContentwarehouseV1Rule", +"description": "Represents the rule." } }, "type": "object" }, -"AppsPeopleOzExternalMergedpeopleapiGPayExtendedData": { -"description": "Extension data for use in GPay Product Profile. go/gpay-product-profile-1-pager Contact: profiles-eng-fe@google.com", -"id": "AppsPeopleOzExternalMergedpeopleapiGPayExtendedData", -"properties": { -"failure": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProductProfileFailure", -"description": "Failure type if there is an error when fetching product profile data." -}, -"internationalNumber": { -"deprecated": true, -"description": "A number in international format including the country code that is made user readable by including formatting such as spaces. Example: \"+41 44 668 1800\" DEPRECATED: A user's phone number should be masked and not in an international format", -"type": "string" -}, -"maskedNumber": { -"description": "The masked string of a user's phone number The number will be obfucsated with * except the last 4 digits. Refer to: //java/com/google/nbu/paisa/common/PhoneNumberMasker.java", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiGender": { -"description": "Gender in PeopleApi has some odd semantics about writing and reading that are not obvious from the proto definition. First, the `type` string, when read, always maps to the constrained domain of \"male\", \"female\", and \"other\", aside from a pathological case that PeopleApi would like to fix. There are two typical patterns: 1. `type` is either \"male\" or \"female\" and `custom_type` and `address_me_as` are exactly as specified by an update to PeopleApi, although they are most often absent for \"male\" and \"female\" writes. 2. `type` is \"other\" and `custom_type` is set to a freeform string from the request. `address_me_as` is equal to whatever was provided at write time. When writing, the free-form string for `custom_type` can come from either `custom_type` if the field is present on the request, or if `custom_type` is absent, the string value of `type` will be copied into it. Any value in `type` will be coerced to \"other\" and the free-form value will be copied into `custom_type`, even if `type` is exactly \"other\". Prefer to explicitly set `custom_type` and set type to \"other\" instead of setting type to a free-form value. There are weird edge cases when the value is \"unknown\". Consider the behavior for `type` == \"unknown\" unspecified. Clients reading the gender should use the value from `formatted_type` if `type` is \"male\" or \"female\". If `type` is \"other\", `formatted_type` will be \"Other\" (or some translation) and clients should read `custom_type` for more specificity. ", -"id": "AppsPeopleOzExternalMergedpeopleapiGender", -"properties": { -"addressMeAs": { -"description": "Preferred pronoun choice. It's unclear whether this value is constrained to a finite domain by UIs. `address_me_as` may be populated regardless of whether `type` is \"male\", \"female\", or \"other\", although most writers only set it if `type` is \"other\".", -"type": "string" -}, -"customType": { -"description": "A free-form string indicating what the user entered as their gender. `custom_type` may exist even if the type is \"male\" or \"female\", although most writers do not set it unless `type` is \"other\".", -"type": "string" -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"description": "The gender. \"male\", \"female\", or \"other\". If \"other\", typically, additional fields will have additional information.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiGplusExtendedData": { -"id": "AppsPeopleOzExternalMergedpeopleapiGplusExtendedData", -"properties": { -"contentRestriction": { -"enum": [ -"UNKNOWN", -"PUBLIC", -"DISCOVERY", -"WALLED_GARDEN" -], -"enumDescriptions": [ -"", -"Can send and consume public content", -"Restricted to only sending content internally, cannot mutate external content in any manner", -"Restricted to sending content internally, no public content naturally surfaces" -], -"type": "string" -}, -"isEnterpriseUser": { -"description": "Equivalent to having the DASHER_POLICY bit in the REGISTERED state.", -"type": "boolean" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiHangoutsExtendedData": { -"description": "Extension data for use in Hangouts.", -"id": "AppsPeopleOzExternalMergedpeopleapiHangoutsExtendedData", -"properties": { -"hadPastHangoutState": { -"enum": [ -"UNKNOWN_PAST_HANGOUT_STATE", -"HAD_PAST_HANGOUT", -"NO_PAST_HANGOUT" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"invitationStatus": { -"description": "Populated for all contacts. Only set if had_past_hangout_state == HAD_PAST_HANGOUT. INVITATION_NEEDED is not a valid value because there already is a past hangout, which means either the invitation is still pending or it\u2019s been accepted.", -"enum": [ -"UNKNOWN_INVITATION_STATUS", -"PENDING_INVITATION", -"ACCEPTED_INVITATION", -"INVITATION_NEEDED" -], -"enumDescriptions": [ -"", -"The invitation is still pending, or was declined", -"The other user accepted the invitation", -"An invitation is needed to chat with this user" -], -"type": "string" -}, -"isBot": { -"description": "True if this is a Hangouts bot.", -"type": "boolean" -}, -"isDismissed": { -"type": "boolean" -}, -"isFavorite": { -"type": "boolean" -}, -"isPinned": { -"type": "boolean" -}, -"userType": { -"enum": [ -"UNKNOWN_USER_TYPE", -"INVALID", -"GAIA", -"OFF_NETWORK_PHONE", -"MALFORMED_PHONE_NUMBER", -"UNKNOWN_PHONE_NUMBER", -"ANONYMOUS_PHONE_NUMBER" -], -"enumDescriptions": [ -"", -"No entity found.", -"Regular GAIA users.", -"Phone numbers represented as GAIA ids.", -"Malformed number. Those are numbers that don't comfirm to an E.164 format that we get with phone calls/voicemails (e.g. \"HELLO\").", -"Unknown number. This is a number that the caller has chosen not to broadcast.", -"Phone number that Google knows about, but the caller chose not to reveal to the recipient." -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiIdentityInfo": { -"id": "AppsPeopleOzExternalMergedpeopleapiIdentityInfo", -"properties": { -"originalLookupToken": { -"description": "Original lookup token from the request that resulted in this person or one of its containers.", -"items": { -"type": "string" -}, -"type": "array" -}, -"previousPersonId": { -"description": "Any former IDs this person may have had, in the case that their ID may have changed. Populated only for sync requests. Examples of such changes include adding an edge to a contact that links to a profile. The ID will change from being contact-oriented to being profile-oriented. To be used to clear out old versions of a person.", -"items": { -"type": "string" -}, -"type": "array" -}, -"sourceIds": { -"description": "A list of sources contributing to the merged person, including profiles (with gaia-id), contacts and synthetic-contacts.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSourceIdentity" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiIm": { -"id": "AppsPeopleOzExternalMergedpeopleapiIm", -"properties": { -"formattedProtocol": { -"description": "The `protocol` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"protocol": { -"description": "The protocol of the IM. The protocol can be free form or one of these predefined values: * `aim` * `msn` * `yahoo` * `skype` * `qq` * `googleTalk` * `icq` * `jabber` * `netMeeting`", -"type": "string" -}, -"type": { -"description": "The type of the IM. The type can be free form or one of these predefined values: * `home` * `work` * `other`", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInAppNotificationTarget": { -"description": "How and where to send notifications to this person in other apps, and why the requester can do so. See go/reachability for more info. \"How\" and \"where\" identify the recipient in a P2P Bridge (glossary/p2p bridge), and \"why\" may be helpful in a UI to disambiguate which of several ways may be used to contact the recipient. How: Via a Google profile or a reachable-only phone number that the requester has access to. Specified in the target \"type\" and \"value\". Where: Apps in which the profile/phone number owner may receive notifications. Specified in the repeated \"app\". Why: Which fields in, e.g., a contact associated with this person make the notification target info visible to the requester. Specified in the repeated originating_field param. Example: Alice has a contact Bob, with: Email 0 = bob@gmail.com Phone 0 = +12223334444 Phone 1 = +15556667777 Email 0 and Phone 0 let Alice see Bob's public profile (obfuscated gaia ID = 123). Public profiles are visible by email by default, and Bob has explicitly made it visible via Phone 0. Bob says people can send notifications to his public profile in YouTube. Phone 2 is associated with another Google profile that Bob owns, but he doesn't want others to see it. He is okay with people sending notifications to him in Who's Down if they have this phone number, however. There will be separate InAppNotificationTargets: one for Bob's public Google profile, and one for the second phone number, which is in his private profile. IANT #1 - targeting Bob's public profile (visible via Email 0 and Phone 0): app = [YOUTUBE] type = OBFUSCATED_GAIA_ID value = 123 originating_field: [ { field_type = EMAIL, field_index = 0 } // For Email 0 { field_type = PHONE, field_index = 0 } // For Phone 0 ] IANT #2 - targeting Bob's private profile phone number Phone 1: app = [WHOS_DOWN] type = PHONE value = +15556667777 originating_field: [ { field_type = PHONE, field_index = 1 } // For Phone 1 ]", -"id": "AppsPeopleOzExternalMergedpeopleapiInAppNotificationTarget", -"properties": { -"app": { -"items": { -"enum": [ -"UNKNOWN", -"BABEL", -"YOUTUBE", -"WHOS_DOWN", -"YOUTUBE_MANGO", -"PHOTOS", -"GOOGLE_ASSISTANT", -"KABOO", -"COMMERCE_PLATFORM", -"SPACES", -"MAPS", -"LOUPE_UNUSED", -"POMEROY", -"LOUPE", -"PEOPLE_PLAYGROUND", -"NEWS_360", -"DUO", -"MEET" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"Should never be stored in practice", -"Aka Hangouts", -"Aka Reactr", -"Aka Who's Down", -"Aka YouTube for Emerging Markets", -"Aka Google Photos", -"", -"Aka PAISA, aka Tez", -"Aka Naksha", -"Aka Google Spaces", -"Aka Google Map", -"This field is deprecated.", -"Aka Trusted Contacts", -"Aka Camera by Google Photos", -"See go/people-playground-design.", -"Also mdb/social-magazines", -"Aka Taychon", -"Aka Google Meet" -], -"type": "string" -}, -"type": "array" -}, -"clientData": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInAppNotificationTargetClientData" -}, -"type": "array" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"originatingField": { -"description": "There may be more than one field from which this IANT originates, as in the case of Bob's public profile.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInAppNotificationTargetOriginatingField" -}, -"type": "array" -}, -"type": { -"enum": [ -"UNKNOWN_KEY_TYPE", -"PHONE", -"OBFUSCATED_GAIA_ID", -"EMAIL" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"value": { -"description": "The value of the target, used for delivery. E.g., the obfuscated gaia ID for a visible profile.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInAppNotificationTargetClientData": { -"description": "Client-specific data pertaining to app reachability. No PII data or user content should be stored in this blob.", -"id": "AppsPeopleOzExternalMergedpeopleapiInAppNotificationTargetClientData", -"properties": { -"app": { -"description": "The app to which this client data applies.", -"enum": [ -"UNKNOWN", -"BABEL", -"YOUTUBE", -"WHOS_DOWN", -"YOUTUBE_MANGO", -"PHOTOS", -"GOOGLE_ASSISTANT", -"KABOO", -"COMMERCE_PLATFORM", -"SPACES", -"MAPS", -"LOUPE_UNUSED", -"POMEROY", -"LOUPE", -"PEOPLE_PLAYGROUND", -"NEWS_360", -"DUO", -"MEET" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"Should never be stored in practice", -"Aka Hangouts", -"Aka Reactr", -"Aka Who's Down", -"Aka YouTube for Emerging Markets", -"Aka Google Photos", -"", -"Aka PAISA, aka Tez", -"Aka Naksha", -"Aka Google Spaces", -"Aka Google Map", -"This field is deprecated.", -"Aka Trusted Contacts", -"Aka Camera by Google Photos", -"See go/people-playground-design.", -"Also mdb/social-magazines", -"Aka Taychon", -"Aka Google Meet" -], -"type": "string" -}, -"byteValue": { -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInAppNotificationTargetOriginatingField": { -"description": "Info for identifying the specific field in this person that lets the requester send them notifications. These are typically fields added to a contact (e.g., email). There will not always be in originating field, typically in the case that whatever permits the requester to see this target info is not something that can be used on its own for contacting this person.", -"id": "AppsPeopleOzExternalMergedpeopleapiInAppNotificationTargetOriginatingField", -"properties": { -"fieldIndex": { -"description": "The index of the relevant field in the merged person", -"format": "int32", -"type": "integer" -}, -"fieldType": { -"enum": [ -"UNKNOWN_FIELD_TYPE", -"PHONE", -"EMAIL" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"value": { -"description": "The value of the origin field", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInAppReachability": { -"description": "This is deprecated in PEOPLE_API/SHARPEN, and should only be used for PROFILES. Clients should use InAppNotificationTarget field instead. Which apps the person has indicated they are reachable at for the requester. See go/d13y and com.google.focus.proto.InAppReachability.", -"id": "AppsPeopleOzExternalMergedpeopleapiInAppReachability", -"properties": { -"appType": { -"enum": [ -"UNKNOWN", -"BABEL", -"YOUTUBE", -"WHOS_DOWN", -"YOUTUBE_MANGO", -"PHOTOS", -"KABOO", -"COMMERCE_PLATFORM", -"SPACES", -"GOOGLE_ASSISTANT", -"PEOPLE_PLAYGROUND", -"MAPS", -"LOUPE_UNUSED", -"POMEROY", -"LOUPE", -"NEWS_360", -"DUO", -"MEET" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"should never be stored in practice", -"aka Hangouts", -"aka Reactr", -"aka Who's Down?", -"aka YouTube for Emerging Markets", -"aka Google Photos", -"aka PAISA", -"aka Naksha", -"", -"", -"See go/people-playground-design.", -"Aka Google Map", -"This field is deprecated.", -"aka Trusted Contacts", -"Aka Camera by Google Photos", -"Also mdb/social-magazines", -"aka Taychon", -"Aka Google Meet" -], -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"reachabilityKey": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInAppReachabilityReachabilityKey" -}, -"status": { -"enum": [ -"UNKNOWN_REACHABLE_STATUS", -"REACHABLE", -"NOT_REACHABLE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInAppReachabilityReachabilityKey": { -"description": "Information pertaining to how this reachable state was established.", -"id": "AppsPeopleOzExternalMergedpeopleapiInAppReachabilityReachabilityKey", -"properties": { -"keyType": { -"enum": [ -"UNKNOWN_KEY_TYPE", -"PHONE", -"OBFUSCATED_GAIA_ID" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"keyValue": { -"description": "The value of the key by which the user said they may be reachable. E.g., the phone number.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInteractionSettings": { -"deprecated": true, -"description": "Defines interactions that are allowed or disallowed with this person.", -"id": "AppsPeopleOzExternalMergedpeopleapiInteractionSettings", -"properties": { -"allowed": { -"type": "boolean" -}, -"interaction": { -"enum": [ -"UNKNOWN", -"INCOMING_CIRCLE_MEMBERSHIP", -"INCOMING_SOCIAL_EDGE", -"INVITE_TO_EMAIL" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiInterest": { -"id": "AppsPeopleOzExternalMergedpeopleapiInterest", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiLanguage": { -"description": "The value can either by a language code conforming to the IETF BCP 47 specification or a custom freeform value. By default the returned value is proxied from FBS Profile.Language. If `include_account_locale` is set on the `MergePersonSourceOptions` the language from go/uls is preferred and returned as primary along with a secondary language from FBS.", -"id": "AppsPeopleOzExternalMergedpeopleapiLanguage", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiLatLng": { -"id": "AppsPeopleOzExternalMergedpeopleapiLatLng", -"properties": { -"lat": { -"format": "double", -"type": "number" -}, -"lng": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiLegacyFields": { -"description": "Fields used in legacy applications. Useful for joining legacy and new data streams. Most applications should not care about these fields.", -"id": "AppsPeopleOzExternalMergedpeopleapiLegacyFields", -"properties": { -"mobileOwnerId": { -"description": "Mobile obfuscated gaia id. This is the same gaia id in metadata.owner_id, but obfuscated with the legacy mobile obfuscator.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiLimitedProfileSettingsField": { -"id": "AppsPeopleOzExternalMergedpeopleapiLimitedProfileSettingsField", -"properties": { -"limitedProfileSettings": { -"$ref": "SocialGraphApiProtoLimitedProfileSettings" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiLocation": { -"id": "AppsPeopleOzExternalMergedpeopleapiLocation", -"properties": { -"buildingId": { -"type": "string" -}, -"buildingName": { -"description": "The building_name field is only filled if the DESK_LOCATION_ADDITIONAL_DATA extension is active.", -"type": "string" -}, -"current": { -"type": "boolean" -}, -"deskCode": { -"description": "Most specific textual description of individual desk location.", -"type": "string" -}, -"floorName": { -"type": "string" -}, -"floorSection": { -"type": "string" -}, -"lastUpdateTime": { -"description": "Indicates the time this location was added or last edited.", -"format": "google-datetime", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"source": { -"description": "Value indicates the origin of this location information.", -"enum": [ -"UNKNOWN", -"EXPLICIT", -"INFERRED" -], -"enumDescriptions": [ -"", -"The location was directly set by the user or admin.", -"The location was inferred from available signals (e.g. past rooms usage)." -], -"type": "string" -}, -"type": { -"description": "Describes the type of location. E.g. Grew_up, Desk. Corresponds to FBS backend.proto Location.StandardTag", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiManagementUpchain": { -"id": "AppsPeopleOzExternalMergedpeopleapiManagementUpchain", -"properties": { -"indirectManager": { -"description": "List of managers in the chain. If user has manager email \"abc@google.com\" and manager's manager has email \"xyz@google.com\" then the list will be: [0]: { email: \"abc@google.com\" } [1]: { email: \"xyz@google.com\" }", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiManagementUpchainIndirectManager" -}, -"type": "array" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"status": { -"enum": [ -"UNKNOWN", -"OK", -"PARTIAL", -"LOOP" -], -"enumDescriptions": [ -"", -"", -"An error occurred while retrieving next manager so resulting management upchain is partial up to manager that was not retrieved.", -"A loop was found in management upchain so resulting upchain contains all managers retrieved in first iteration of the loop." -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiManagementUpchainIndirectManager": { -"id": "AppsPeopleOzExternalMergedpeopleapiManagementUpchainIndirectManager", -"properties": { -"email": { -"type": "string" -}, -"personId": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMapsExtendedData": { -"description": "Extension data for use in Maps Product Profile.", -"id": "AppsPeopleOzExternalMergedpeopleapiMapsExtendedData", -"properties": { -"failure": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProductProfileFailure", -"description": "Failure type if there is an error when fetching product profile data." -}, -"followeeCount": { -"description": "Number of people the user is following.", -"format": "int64", -"type": "string" -}, -"followerCount": { -"description": "Number of people who are following the user.", -"format": "int32", -"type": "integer" -}, -"numContributions": { -"description": "Sum of creators contributions i.e. reviews, rating, questions, etc.", -"format": "int64", -"type": "string" -}, -"profilePhotoUrl": { -"description": "The user's profile photo that might have a badge rendered at the corner if the user is eligible for a badge.", -"type": "string" -}, -"tagline": { -"description": "A user's bio, or tagline.", -"type": "string" -}, -"topicExpertise": { -"description": "A topic that creator has expertise in. This will be in the format: emoji associated with the topic, display name of the topic, topic score", -"items": { -"type": "string" -}, -"type": "array" -}, -"userCaption": { -"description": "A user's caption displayed under the user name on their profile page i.e. 'Local Guide Level 8'", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMapsProfile": { -"description": "Maps Profile Data. See go/product-profiles-backend-api.", -"id": "AppsPeopleOzExternalMergedpeopleapiMapsProfile", -"properties": { -"fieldRestriction": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMapsProfileFieldRestriction" -}, -"type": "array" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"tagline": { -"type": "string" -}, -"websiteLink": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMapsProfileUrlLink", -"description": "A link to the profile owner's website to be displayed in profile." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMapsProfileFieldRestriction": { -"id": "AppsPeopleOzExternalMergedpeopleapiMapsProfileFieldRestriction", -"properties": { -"clientData": { -"description": "Opaque data associated with this restriction e.g. abuse status.", -"format": "byte", -"type": "string" -}, -"type": { -"enum": [ -"TYPE_UNSPECIFIED", -"HIDE_TAGLINE" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMapsProfileUrlLink": { -"id": "AppsPeopleOzExternalMergedpeopleapiMapsProfileUrlLink", -"properties": { -"anchorText": { -"description": "Anchor text to be displayed as clickable link. If not present, the URL should be displayed directly.", -"type": "string" -}, -"url": { -"description": "The URL to be linked to.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMatchInfo": { -"description": "Represents the matching information for a field when there is a query.", -"id": "AppsPeopleOzExternalMergedpeopleapiMatchInfo", -"properties": { -"match": { -"description": "The list of matches ordered by most relevant matching for autocomplete coming first.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMatchInfoLookupTokenMatch" -}, -"type": "array" -}, -"query": { -"description": "The query token we are matching against.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMatchInfoLookupTokenMatch": { -"description": "All the substring that were matched for the given query against the current field. Represents a substring of another string.", -"id": "AppsPeopleOzExternalMergedpeopleapiMatchInfoLookupTokenMatch", -"properties": { -"endIndex": { -"description": "Index right after the last character that matches the query. length = end-start, we have substring = [start, end).", -"format": "int32", -"type": "integer" -}, -"startIndex": { -"description": "Index of the first unicode character that matches the query.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMembership": { -"deprecated": true, -"description": "A membership that the person has. The person can be a member of multiple circles and multiple contact-groups. A circle membership is created by adding a person to a circle by person-id or by email. A contact-group membership is created by adding a contact to a contact-group.", -"id": "AppsPeopleOzExternalMergedpeopleapiMembership", -"properties": { -"circleId": { -"description": "A circle that the person belongs to.", -"type": "string" -}, -"contactGroupId": { -"description": "A contact-group that the person belong to. The id can be either a hex-formatted id or a camel-cased SystemContactGroup predefined group name. The id will be predefined group name iff the system_contact_group_id has a value.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata", -"description": "The metadata field can be used to determine which container generated the membership. For example, when the membership has a contact_group_id, the metadata.container will be CONTACT and the container_id will be the contact Id." -}, -"systemContactGroupId": { -"description": "The membership has a contact_group_id, this field will be populated when the membership is in a system-reserved contact-group.", -"enum": [ -"UNKNOWN", -"MY_CONTACTS", -"STARRED", -"FRIENDS", -"FAMILY", -"COWORKERS" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiMission": { -"id": "AppsPeopleOzExternalMergedpeopleapiMission", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiName": { -"description": "See go/people-api-howto:names for an overview of name usage in PeopleAPI. The `unstructured_name` field contains a free form name value. The `given_name`, `middle_name`, `family_name`, etc, fields contain the structured name. For CONTACT mutates, (i.e. when Name.metadata.container is CONTACT), it is recommended for clients to set either the `unstructured_name` or the set of structured name fields, not both. * When only the `unstructured_name` is set, it is parsed to produce a best-guess set of structured name values for the `given_name`, `family_name`, etc. * When only the structured name fields are set, the various values are combined to produce an `unstructured_name`. * When both are set, the `unstructured_name` is saved as-is and the structured name fields are saved as-is. This may be confusing as they might not \"match\". For PROFILE mutates, (i.e. when Name.metadata.container is PROFILE), it is _required_ for clients to use the structured name fields as the unstructured field value is ignored on write. The unstructured name fields are generated for convenience on read. For DEVICE_CONTACTS, see b/156020778.", -"id": "AppsPeopleOzExternalMergedpeopleapiName", -"properties": { -"displayName": { -"description": "Read-only. A name synthesized based on `unstructured_name` and the structured name fields. Example: \"John Smith\" If a language code is passed in the side channel using http://cs/symbol:framework.rpc.DeprecatedPropagatedLanguageCode.value or http://cs/symbol:google.rpc.context.OriginContext.accept_language and the name does not have `honorific_prefix`, `middle_name`, or `honorific_suffix` set, the language code will be used to format `display_name`. If `include_account_locale` is set on the `MergePersonSourceOptions` and a language code is not passed in the side channel. The language code from go/uls will be used as the language code for formatting `display_name`.", -"type": "string" -}, -"displayNameLastFirst": { -"description": "Read-only. A name synthesized based on `unstructured_name` and the structured name fields with the last name first. Example: \"Smith, John\"", -"type": "string" -}, -"displayNameSource": { -"$ref": "SocialGraphApiProtoDisplayNameSource", -"description": "Read-only. The source of the display name." -}, -"familyName": { -"type": "string" -}, -"formattedName": { -"deprecated": true, -"description": "DEPRECATED(b/70571931). Use `unstructured_name` instead.", -"type": "string" -}, -"givenName": { -"type": "string" -}, -"honorificPrefix": { -"type": "string" -}, -"honorificSuffix": { -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"middleName": { -"type": "string" -}, -"pronunciations": { -"$ref": "SocialGraphApiProtoPronunciations", -"description": "This field is stored in contact annotations and merged at read-time. It is available with CONTACT_ANNOTATION container type at read time." -}, -"shortDisplayName": { -"description": "NOTE: this is currently NOT IMPLEMENTED due to changed priorities. Clients usually rely on \"first name\" instead, when a short name is needed. Read-only. A possibly shorter version of the user's name. - The purpose of this field is to address the needs of UIs where a full display name might be too large to fit. Instead of relying on `first_name`, which might not be present, `short_display_name` is preferred. - This is only available for PROFILE and DOMAIN_PROFILE container types. - About the actual content in this field: will be the first name when it's visible to the requester, or the same as `display_name`, otherwise. A sample scenario where the first name may not be visible is when the limited profile is returned. For more info, see: http://shortn/_9iV7TJ33la", -"type": "string" -}, -"unstructuredName": { -"description": "The free form name value. For contact mutates it is recommended for clients to set either the `unstructured_name` or the set of structured name fields, not both.", -"type": "string" -}, -"yomiFamilyName": { -"type": "string" -}, -"yomiFullName": { -"type": "string" -}, -"yomiGivenName": { -"type": "string" -}, -"yomiHonorificPrefix": { -"type": "string" -}, -"yomiHonorificSuffix": { -"type": "string" -}, -"yomiMiddleName": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiNamePronunciationAudioMetadataInfo": { -"description": "Pronunciation audio metadata info. See go/name-pronunciation-backend. The metadata itself tracks the state of a user's name pronunciation audio.", -"id": "AppsPeopleOzExternalMergedpeopleapiNamePronunciationAudioMetadataInfo", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"namePronunciationAudioMetadata": { -"$ref": "SocialGraphApiProtoNamePronunciationAudioMetadata", -"description": "Actual metadata proto, shared with FBS backends." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiNickname": { -"id": "AppsPeopleOzExternalMergedpeopleapiNickname", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"enum": [ -"NICKNAME_UNKNOWN", -"DEFAULT", -"OTHER_NAME", -"MAIDEN_NAME", -"SHORT_NAME", -"INITIALS", -"ALTERNATE_NAME" -], -"enumDeprecated": [ -false, -false, -true, -true, -true, -true, -false -], -"enumDescriptions": [ -"", -"", -"Used in Profiles UI", -"", -"", -"", -"The nickname or alternate name, incorporated into Display Name." -], -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOccupation": { -"id": "AppsPeopleOzExternalMergedpeopleapiOccupation", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOpeningHours": { -"description": "The periods that this place is open during the week. The periods are in chronological order, starting with today in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.", -"id": "AppsPeopleOzExternalMergedpeopleapiOpeningHours", -"properties": { -"openNow": { -"description": "Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.", -"type": "boolean" -}, -"periods": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOpeningHoursPeriod" -}, -"type": "array" -}, -"weekdayTexts": { -"description": "Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: \"Sun: 18:00-06:00\"", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOpeningHoursEndpoint": { -"id": "AppsPeopleOzExternalMergedpeopleapiOpeningHoursEndpoint", -"properties": { -"day": { -"description": "A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.", -"format": "uint32", -"type": "integer" -}, -"time": { -"description": "A time in 24-hour \"hhmm\" format (i.e. range is 0000 to 2359).", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOpeningHoursPeriod": { -"id": "AppsPeopleOzExternalMergedpeopleapiOpeningHoursPeriod", -"properties": { -"close": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOpeningHoursEndpoint" -}, -"open": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOpeningHoursEndpoint" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOrganization": { -"id": "AppsPeopleOzExternalMergedpeopleapiOrganization", -"properties": { -"assignment": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOrganizationAssignment" -}, -"type": "array" -}, -"certification": { -"type": "string" -}, -"costCenter": { -"type": "string" -}, -"current": { -"type": "boolean" -}, -"department": { -"type": "string" -}, -"description": { -"type": "string" -}, -"domain": { -"type": "string" -}, -"endCalendarDay": { -"$ref": "GoogleTypeDate", -"description": "Start and End Dates are better represented as calendar entities. The intention is to replace timestamps. Not set if no value exists. Clients can choose whether to use has* semantics or default value semantics. For writes, the default proto and an absent message are equivalent. Legacy callers in the legacy_timestamp_event_write_behavior_enabled capability allowlist should write to PeopleApi via end_ms and migrate to setting both so they can be removed from the whitelist." -}, -"endMs": { -"deprecated": true, -"description": "Clients are encouraged to read the end_calendar_day instead. PeopleApi writes will still use end_ms for legacy callers that are in the legacy_timestamp_event_write_behavior_enabled capability allowlist. New writers must use the calendar_day fields.", -"format": "int64", -"type": "string" -}, -"endMsAsNumber": { -"deprecated": true, -"format": "int64", -"type": "string" -}, -"formattedStringType": { -"description": "The `string_type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"fteMilliPercent": { -"format": "int32", -"type": "integer" -}, -"importance": { -"format": "float", -"type": "number" -}, -"location": { -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"name": { -"type": "string" -}, -"project": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOrganizationProject" -}, -"type": "array" -}, -"startCalendarDay": { -"$ref": "GoogleTypeDate", -"description": "Start and End Dates are better represented as calendar entities. The intention is to replace timestamps. Not set if no value exists. Clients can choose whether to use has* semantics or default value semantics. For writes, the default proto and an absent message are equivalent. Legacy callers in the legacy_timestamp_event_write_behavior_enabled capability allowlist should write to PeopleApi via start_ms and migrate to setting both so they can be removed from the allowlist." -}, -"startMs": { -"deprecated": true, -"description": "Clients are encouraged to read the start_calendar_day instead. PeopleApi writes will still use start_ms for legacy callers that are in the legacy_timestamp_event_write_behavior_enabled capability allowlist. New writers must use the calendar_day fields.", -"format": "int64", -"type": "string" -}, -"startMsAsNumber": { -"deprecated": true, -"format": "int64", -"type": "string" -}, -"stringType": { -"description": "The type of the organization. The type can be free form or one of these predefined values: * `work` * `school`", -"type": "string" -}, -"symbol": { -"type": "string" -}, -"title": { -"type": "string" -}, -"type": { -"enum": [ -"UNKNOWN", -"WORK", -"SCHOOL", -"DOMAIN_ONLY" -], -"enumDescriptions": [ -"", -"", -"", -"Used for domains inferred from verified email addresses. May be deprecated when/if we start mapping SCHOOL and WORK Names to Domain." -], -"type": "string" -}, -"yomiName": { -"deprecated": true, -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOrganizationAssignment": { -"id": "AppsPeopleOzExternalMergedpeopleapiOrganizationAssignment", -"properties": { -"name": { -"type": "string" -}, -"url": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOrganizationProject": { -"id": "AppsPeopleOzExternalMergedpeopleapiOrganizationProject", -"properties": { -"description": { -"type": "string" -}, -"name": { -"type": "string" -}, -"role": { -"type": "string" -}, -"type": { -"description": "Mapped from StandardProjectTag / CustomProjectTag", -"type": "string" -}, -"url": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiOtherKeyword": { -"id": "AppsPeopleOzExternalMergedpeopleapiOtherKeyword", -"properties": { -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"source": { -"enum": [ -"SOURCE_UNKNOWN", -"OUTLOOK", -"CUSTOM" -], -"enumDescriptions": [ -"", -"Following field is added to support Outlook schema. This field is a general storage for Outlook string data that shall be encoded as: \"name:\" + value strings where name of the Outlook field cannot contain colons. Multiple Outlook fields shall be encoded as multiple Keywords.", -"" -], -"type": "string" -}, -"type": { -"description": "The type of the event. The type depends on the `OtherKeyword.source`. `OUTLOOK` source fields must be one of: * `billing_information` * `directory_server` * `keyword` * `mileage` * `sensitivity` * `user` * `subject` All other fields are treated as a `CUSTOM` source field. The value can be free form or one of these predefined values: * `home` * `other` * `work`", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPerson": { -"description": "Merged-person combines multiple sources of data like contacts and profiles. See go/people-api and go/understanding-merged-person NOTE: Why are all the fields repeated? See go/people-api-concepts#repeated", -"id": "AppsPeopleOzExternalMergedpeopleapiPerson", -"properties": { -"about": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAbout" -}, -"type": "array" -}, -"address": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAddress" -}, -"type": "array" -}, -"ageRange": { -"deprecated": true, -"description": "Deprecated. If age is needed use `person.age_range_repeated` instead. Please see go/people-api-howto:age on how to correctly get age data.", -"enum": [ -"UNKNOWN", -"LESS_THAN_EIGHTEEN", -"TWENTY_ONE_OR_OLDER", -"EIGHTEEN_TO_TWENTY" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"ageRangeRepeated": { -"description": "Data on the person's age range, adult status, and age of consent. NOTE: Please read go/people-api-howto:age on how to correctly get age data.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAgeRangeType" -}, -"type": "array" -}, -"birthday": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiBirthday" -}, -"type": "array" -}, -"braggingRights": { -"description": "Used only by contacts, no data will be returned for profiles.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiBraggingRights" -}, -"type": "array" -}, -"calendar": { -"description": "b/145671020: Deprecated for Profiles, but not for Contacts.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCalendar" -}, -"type": "array" -}, -"certifiedBornBefore": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCertifiedBornBefore" -}, -"type": "array" -}, -"circleMembership": { -"deprecated": true, -"description": "DEPRECATED. No data is returned for this field anymore. See b/329513077.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCircleMembership" -}, -"type": "array" -}, -"clientData": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiClientData" -}, -"type": "array" -}, -"communicationEmail": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCommunicationEmail" -}, -"type": "array" -}, -"connectionReminder": { -"description": "Reminder to connect with a Contact (part of go/people-prompts). Also contains contact-level prompts settings. Each Contact can have a single `connection_reminder` (but can have multiple Prompts inside of it). Field is repeated per PeopleAPI data model go/people-api-concepts#repeated. Only supported for CONTACT container.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiConnectionReminder" -}, -"type": "array" -}, -"contactCreateContextInfo": { -"description": "ContactCreateContextInfo has a timestamp timestamp and additional metadata (e.g. the source of the creation) for when the contact was created. See also `ContactEditContextInfo`.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiContactCreateContextInfo" -}, -"type": "array" -}, -"contactEditContextInfo": { -"description": "ContactEditContextInfo is a timestamp and additional metadata (e.g. the source of the edit) for the last 'human initiated edit'. See also `ContactCreateContextInfo`.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiContactEditContextInfo" -}, -"type": "array" -}, -"contactGroupMembership": { -"description": "Contact groups that this person is a member of.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiContactGroupMembership" -}, -"type": "array" -}, -"contactPromptSettingsInfo": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiContactPromptSettingsInfo" -}, -"type": "array" -}, -"contactStateInfo": { -"description": "Contact state and related metadata. See go/fbs-contacts-trash. If this field was requested but is not set on the Person then the contact is in the DEFAULT contact state. This field is read-only, and should not be set on a mutate (e.g. UpdatePerson) call. Clients must call the explicit APIs (e.g. UntrashPerson) to change contact state.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiContactStateInfo" -}, -"type": "array" -}, -"coverPhoto": { -"deprecated": true, -"description": "DEPRECATED. Now always returns a default cover photo. See go/sunset-cover-photo.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCoverPhoto" -}, -"type": "array" -}, -"customSchemaField": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCustomSchemaField" -}, -"type": "array" -}, -"email": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmail" -}, -"type": "array" -}, -"emergencyInfo": { -"description": "Emergency information. See go/emergency-trusted-contacts-papi.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmergencyInfo" -}, -"type": "array" -}, -"event": { -"description": "Event is currently in use by contacts.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEvent" -}, -"type": "array" -}, -"extendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonExtendedData", -"description": "Data added by extensions that are not specific to a particular field." -}, -"externalId": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiExternalId" -}, -"type": "array" -}, -"fileAs": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFileAs" -}, -"type": "array" -}, -"fingerprint": { -"description": "A fingerprint that can be used to reliably determine if a resource has changed. Externally it is used as part of the etag.", -"type": "string" -}, -"gender": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiGender" -}, -"type": "array" -}, -"im": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiIm" -}, -"type": "array" -}, -"inAppNotificationTarget": { -"description": "Ways to send in-app notifications to this person. See go/reachability. This field is read-only and ignored for mutates.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInAppNotificationTarget" -}, -"type": "array" -}, -"inAppReachability": { -"description": "Used only by profile service, deprecated for PeopleAPI and Sharpen. If you aren't sure, contact people-api-users@ and profile-service-eng@.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInAppReachability" -}, -"type": "array" -}, -"interactionSettings": { -"deprecated": true, -"description": "DEPRECATED. This field isn't populated in people.list.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInteractionSettings" -}, -"type": "array" -}, -"interest": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiInterest" -}, -"type": "array" -}, -"language": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiLanguage" -}, -"type": "array" -}, -"legacyFields": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiLegacyFields", -"deprecated": true, -"description": "DEPRECATED. This field was only for backwards compatibility with legacy GData callers, and should not be used by new clients. Legacy fields used for mobile clients." -}, -"limitedProfileSettings": { -"description": "Settings for the limited profile. See go/limited-profiles-api.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiLimitedProfileSettingsField" -}, -"type": "array" -}, -"linkedPerson": { -"description": "Other person resources linked indirectly by an edge. The full person or just the IDs may be populated depending on request parameters. We consider linked people distinct people, but they share information. Example: A contact with two outgoing edges. The two edges are considered separate, but linked people.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"location": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiLocation" -}, -"type": "array" -}, -"managementUpchain": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiManagementUpchain" -}, -"type": "array" -}, -"mapsProfile": { -"description": "MapsProfile, see go/product-profiles-backend-api", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMapsProfile" -}, -"type": "array" -}, -"membership": { -"deprecated": true, -"description": "DEPRECATED. Please use `circle_membership` or `contact_group_membership` instead. Contact-groups and circles that this person is a member of.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMembership" -}, -"type": "array" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonMetadata" -}, -"mission": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMission" -}, -"type": "array" -}, -"name": { -"description": "See go/people-api-howto:names for details about names in PeopleAPI.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiName" -}, -"type": "array" -}, -"namePronunciationAudioMetadataInfo": { -"description": "Metadata info for a user's name pronunciation audio. See go/name-pronunication-backend.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiNamePronunciationAudioMetadataInfo" -}, -"type": "array" -}, -"nickname": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiNickname" -}, -"type": "array" -}, -"occupation": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOccupation" -}, -"type": "array" -}, -"organization": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOrganization" -}, -"type": "array" -}, -"otherKeyword": { -"description": "Legacy arbitrary key value fields", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOtherKeyword" -}, -"type": "array" -}, -"peopleInCommon": { -"deprecated": true, -"description": "DEPRECATED. This feature was stubbed, but never implemented. This field will not be populated with any results.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"personAttribute": { -"description": "In order to request this field, the client must set desired PersonAttributeKey in the dedicated RequestMask field `person_attribute`. Unlike other person fields, this field cannot be requested in the `include_field` field mask.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonAttribute" -}, -"type": "array" -}, -"personId": { -"description": "The ID of the person. This is determined by the backend, is unstable, and may not be the same as a user_id. Internally referred as 'personKey' to distinguish from the common PersonId pojo. See go/people-api-concepts#person-id", -"type": "string" -}, -"phone": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPhone" -}, -"type": "array" -}, -"photo": { -"description": "See go/people-api-concepts/photos for usage details", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPhoto" -}, -"type": "array" -}, -"placeDetails": { -"description": "Data specific to places. Data which also applies to contacts and profiles such as name, phone, photo, etc. are returned in the corresponding Person fields.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPlaceDetails" -}, -"type": "array" -}, -"plusPageInfo": { -"deprecated": true, -"description": "DEPRECATED. Info about plus pages in the person.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPlusPageInfo" -}, -"type": "array" -}, -"posixAccount": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPosixAccount" -}, -"type": "array" -}, -"profileUrl": { -"deprecated": true, -"description": "DEPRECATED. No data is returned for this field anymore. (go/people-api-concepts#repeated): Use person.profile_url_repeated instead. Access to this field is restricted to a set of legacy clients. This is a Google+-only field. See go/fbs-g+-deprecation. NOTE: `Person.profile_url` is only populated for profile-centric person.", -"type": "string" -}, -"profileUrlRepeated": { -"deprecated": true, -"description": "DEPRECATED. No data is returned for this field anymore. This is a Google+-only field. See go/fbs-g+-deprecation.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProfileUrl" -}, -"type": "array" -}, -"pronoun": { -"description": "Pronouns are not supported for consumer profiles. See go/pronouns-in-people-system-prd for more details.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPronoun" -}, -"type": "array" -}, -"readOnlyProfileInfo": { -"description": "Information about the profiles that are a part of this Person. This is only applicable to PROFILE and DOMAIN_PROFILE containers.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiReadOnlyProfileInfo" -}, -"type": "array" -}, -"relation": { -"description": "See go/relation-vs-relationship for relation vs relationship explanation.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRelation" -}, -"type": "array" -}, -"relationshipInterest": { -"deprecated": true, -"description": "DEPRECATED. No data is returned for this field anymore.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRelationshipInterest" -}, -"type": "array" -}, -"relationshipStatus": { -"deprecated": true, -"description": "DEPRECATED. No data is returned for this field anymore.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRelationshipStatus" -}, -"type": "array" -}, -"rightOfPublicityState": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRightOfPublicityState" -}, -"type": "array" -}, -"rosterDetails": { -"description": "Data specific to rosters (such as Google Groups and Chat Rooms). Data which also applies to contacts and profiles such as name, email, and photo, etc are returned in the corresponding Person fields.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRosterDetails" -}, -"type": "array" -}, -"searchProfile": { -"description": "Profile for Janata and Search. go/janata-profile-in-sgbe", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSearchProfile" -}, -"type": "array" -}, -"sipAddress": { -"description": "SipAddress is currently in use by contacts.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSipAddress" -}, -"type": "array" -}, -"skills": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSkills" -}, -"type": "array" -}, -"socialConnection": { -"description": "NOTE: this is used by go/starlight, but not actually used or returned in PeopleAPI. See b/27281119 for context. Please reach out to people-api-eng@ if you have questions.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSocialConnection" -}, -"type": "array" -}, -"sortKeys": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSortKeys" -}, -"sshPublicKey": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiSshPublicKey" -}, -"type": "array" -}, -"tagline": { -"description": "Only supported for PLACE container results, no data will be returned for profiles.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiTagline" -}, -"type": "array" -}, -"teamsExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiTeamsExtendedData", -"deprecated": true, -"description": "DEPRECATED. *UNSUPPORTED*. This field is never populated." -}, -"userDefined": { -"description": "UserDefined is currently in use by contacts.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiUserDefined" -}, -"type": "array" -}, -"visibleToGuests": { -"description": "Add annotation_id and metadata (product_source) for visible to guests contacts go/visible-to-guests.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiVisibleToGuests" -}, -"type": "array" -}, -"website": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiWebsite" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonAttribute": { -"description": "Client-specific binary blob stored with Person data. This differs from ClientData, which stores structured, key-value pairs.", -"id": "AppsPeopleOzExternalMergedpeopleapiPersonAttribute", -"properties": { -"attributeKey": { -"enum": [ -"PERSON_ATTRIBUTE_UNKNOWN", -"REJECTED_CLEANUP_CARD_SUGGESTIONS" -], -"enumDescriptions": [ -"", -"The value field of a PersonAttribute object with this key will contain a byte serialized RejectedCleanupCardSuggestions proto, see http://shortn/_3KyFdhbZfW" -], -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonExtendedData": { -"description": "Extension data for the whole person entity.", -"id": "AppsPeopleOzExternalMergedpeopleapiPersonExtendedData", -"properties": { -"aboutMeExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAboutMeExtendedData", -"description": "For use by AboutMe and SmartProfile clients." -}, -"appsWaldoExtendedData": { -"$ref": "SocialGraphWireProtoPeopleapiExtensionAppsWaldoExtendedData", -"description": "For use with Apps Waldo Availability Data extension" -}, -"callerIdExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCallerIdExtendedData", -"description": "For use with caller ID extension" -}, -"contactsExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiWebContactsExtendedData", -"description": "For use with Contacts extension." -}, -"domainName": { -"description": "Hosted domain this person is a member of. The domain_name is also returned as part of the person's ReadOnlyProfileInfo, so requesting it via this extension is no longer necessary.", -"items": { -"type": "string" -}, -"type": "array" -}, -"dynamiteExtendedData": { -"$ref": "SocialGraphWireProtoPeopleapiExtensionDynamiteExtendedData", -"description": "For use with Dynamite extension." -}, -"gpayExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiGPayExtendedData", -"description": "For use with Google Pay extension." -}, -"gplusExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiGplusExtendedData", -"description": "For use with Google+ extension." -}, -"hangoutsExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiHangoutsExtendedData", -"description": "For use with Hangouts extension." -}, -"isPlaceholder": { -"description": "For use with gmail extensions and lookup by email. If true, no person was actually found using the specified email address, but we want to return TLS info about the email address regardless.", -"type": "boolean" -}, -"mapsExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMapsExtendedData", -"description": "For use with Maps extension." -}, -"paisaExtendedData": { -"$ref": "SocialGraphWireProtoPeopleapiExtensionPaisaExtendedData", -"description": "For use with Paisa extension" -}, -"peopleStackExtendedData": { -"$ref": "SocialGraphWireProtoPeopleapiExtensionPeopleStackExtendedData", -"deprecated": true, -"description": "DEPRECATED: Use people_stack_person_extended_data instead. For use with PeopleStack extension." -}, -"peopleStackPersonExtendedData": { -"$ref": "SocialGraphWireProtoPeopleapiExtensionPeopleStackPersonExtendedData", -"description": "For use with PeopleStack extension." -}, -"playGamesExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPlayGamesExtendedData", -"description": "For use with Play Games Product Profile extension. See go/jam-games-profile. The play games profile will be returned only for profile-centric requests." -}, -"tlsIsPlaceholder": { -"deprecated": true, -"description": "For use with the TLS extension and lookup by email. If true, no person was actually found using the specified email address, but we want to return TLS info about the email address regardless. DEPRECATED: Use is_placeholder instead.", -"type": "boolean" -}, -"youtubeExtendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiYoutubeExtendedData", -"description": "For use with Youtube extension." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata": { -"description": "Metadata for a single Person field. See go/understanding-merged-person", -"id": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata", -"properties": { -"aclChoices": { -"description": "When the container is PROFILE/DOMAIN_PROFILE and the profile owner is the requester, this read-only, synthesized field indicates which ACLs the user is allowed to set on the profile field. This is distinct from field_acl, which is the field's currently set ACL. field_acl will always be a valid ACL choice, except for the case of default synthesized profile fields like monogram profile photos. For those, field_acl does not represent a user-set field ACL, so it may or may not be a valid choice. In all cases, default_acl_choice will always be a valid choice. This is currently only populated on the photo field when the \"person.photo.metadata.acl_choices\" mask is set.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAcl" -}, -"type": "array" -}, -"additionalContainerInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAdditionalContainerInfo", -"deprecated": true, -"description": "Additional information about the container of this field." -}, -"affinity": { -"description": "For field-level affinity scores. The affinity between the requester and this particular field in the Person (e.g., frequency of calling a particular phone number).", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAffinity" -}, -"type": "array" -}, -"contactVisibility": { -"description": "Each field can have different visibility settings Only returned when explicitly requested.", -"items": { -"enum": [ -"CONTACT_VISIBILITY_UNSPECIFIED", -"VISIBLE_TO_GUEST" -], -"enumDescriptions": [ -"", -"This can be set for annotation fields(e.g. nicknames, relationship) that the user wants to expose to other people in their household. Example use case: differentiate between speed dial nicknames, regular nicknames and relationships" -], -"type": "string" -}, -"type": "array" -}, -"container": { -"deprecated": true, -"description": "DEPRECATED. Use container_type instead. Having the Container be an enum at the PFM message level causes circular dependency when other types try to refer to it. It breaks javascript build targets.", -"enum": [ -"UNKNOWN", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Data from the user's frequent interactions.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"containerId": { -"deprecated": true, -"description": "DEPRECATED. Use encoded_container_id instead. The numeric id of the data source. The id is only unique within a single container type. This is only set when the id of the container is numeric, e.g. contact id.", -"format": "int64", -"type": "string" -}, -"containerPrimary": { -"description": "Indicates if this field is the primary field for the container and container_id.", -"type": "boolean" -}, -"containerType": { -"description": "The source for the data in the field.", -"enum": [ -"UNKNOWN_CONTAINER", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"NAMED_CHAT_ROOM", -"UNNAMED_CHAT_ROOM", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Deprecated. E-mail only circle membership should be dead. E-mail only circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact. An entity that is owned by a domain and represents a person, but is not a user in the domain. For more details see https://support.google.com/a/answer/9281635.", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Dynamite \"Named Flat Room\" (NFR). This is a Baggins Roster with label DYNAMITE_SPACE *and* label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Dynamite \"Unnamed Flat Room\" (UFR). This is a Baggins Roster with label DYNAMITE_SPACE but does *not* have label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Device contacts that are used in interaction ranking, but have not been uploaded to Google. These are synthesized people from interaction data. This container type is only used in ListRankedPeople and ListRankedTargets actions. See go/TopNWithClusteringPart1.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"crossDeviceAllowed": { -"description": "True if this field can be used on other devices than the one it originated from. Assigned by the server. Currently only used for device contacts.", -"type": "boolean" -}, -"defaultAclChoice": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAcl", -"description": "When the container is PROFILE/DOMAIN_PROFILE and the profile owner is the requester, this read-only, synthesized field contains the default ACL choice. This can be used to select a preferred choice from acl_choices. Generally, default_acl_choice should only be preferred for default synthesized profile fields like monogram profile photos. Otherwise, the existing field_acl should be preferred. This is currently only populated on the photo field when the \"person.photo.metadata.acl_choices\" mask is set." -}, -"deprecatedContactContainerId": { -"deprecated": true, -"description": "DEPRECATED. Use container_id. Not populated or used at all.", -"format": "int64", -"type": "string" -}, -"edgeKey": { -"description": "Field is an edge key for this person. Modifying it breaks the link between data sources. This is equivalent to edge_key_info having at least one entry with materialized = true.", -"type": "boolean" -}, -"edgeKeyInfo": { -"description": "Edges that this field creates. This includes all edges and not necessarily just the edge relevant to the joined entities.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEdgeKeyInfo" -}, -"type": "array" -}, -"encodedContainerId": { -"description": "The encoded id of the data source. The id is only unique within a single container type. This field correlates to person.metadata.identity_info.source_id.id. This field may not be populated in some special cases, where the id is not visible to the querying user. e.g. ListAutocompletions with full phone number query. For value format, see google3/social/graph/api/proto/main_merged_person.proto?q=symbol:SourceIdentity.id", -"type": "string" -}, -"fieldAcl": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldAcl", -"description": "When the container is PROFILE and the profile owner is the requester, this field indicates how the profile field is accessible." -}, -"lastUpdateTime": { -"description": "Indicates the time that the field was added or last edited. Currently this is populated for: (1) person.birthday with ContainerType PROFILE, DOMAIN_PROFILE or ACCOUNT. (2) person.name, person.address, person.relation, person.email and person.phone with ContainerType CONTACT_ANNOTATION;", -"format": "google-datetime", -"type": "string" -}, -"matchingInfo": { -"description": "The matching informations if there was a query against this field.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiMatchInfo" -}, -"type": "array" -}, -"otherDedupedContainers": { -"description": "When deduping fields by value, list of containers of the fields that where deduped.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiDedupedContainerInfo" -}, -"type": "array" -}, -"primary": { -"description": "If true, indicates this field is the Person's primary field eg. Contact, and (Profile) Person could have different Name fields, and the Name represented by the Person is primary. For selecting a primary field from RepeatedFields within a Person, use container_primary.", -"type": "boolean" -}, -"productMetadata": { -"description": "The product(s) that generated the data in this field. Empty is equivalent to DEFAULT. ST_USER_METADATA", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProductMetadata" -}, -"type": "array" -}, -"verified": { -"description": "Indicates whether this is a verified field. It is synthesized from verification and is read-only. If there is at least one verification with status PASSED, the field is considered verified. Currently this is applicable to address, email, name, and phone for PROFILE and DOMAIN_PROFILE. Use .metadata.verified in the request mask.", -"type": "boolean" -}, -"visibility": { -"description": "Currently, only people.get may set this value", -"enum": [ -"VISIBILITY_UNKNOWN", -"PUBLIC", -"USER" -], -"enumDescriptions": [ -"", -"The field is visible to every user.", -"The field is visible to the requester. The server cannot provide additional indication beyond that. The field may or may not be also visible to users other than the requester." -], -"type": "string" -}, -"writeable": { -"description": "Whether the field is writeable to the requester.", -"type": "boolean" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonListWithTotalNumber": { -"description": "A person list with total number specified.", -"id": "AppsPeopleOzExternalMergedpeopleapiPersonListWithTotalNumber", -"properties": { -"people": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"totalNumber": { -"description": "The total number of people, which is aways no less than the size of the above list.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonMetadata": { -"description": "Metadata for the entire Person resource.", -"id": "AppsPeopleOzExternalMergedpeopleapiPersonMetadata", -"properties": { -"affinity": { -"description": "Affinities associated with the person, with respect to the requester.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAffinity" -}, -"type": "array" -}, -"attribution": { -"description": "Populated when the data for the MergedPerson comes from a 3rd party provider or data source. Clients must display these attributions to the user if they are present. NOTE: This field is only relevant when requesting the following containers: - PLACE (data read from Maps)", -"items": { -"type": "string" -}, -"type": "array" -}, -"bestDisplayName": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiBestDisplayName", -"description": "The best name to use for this person for user-facing display. See go/people-api-howto:names for details about how this field is computed. In many cases this will simply be Person.name[0]. However, Person.name returns only explicit Name fields, but other fields maybe usable as a name (for example: nicknames, file_as, ...). `best_display_name` will be calculated from all fields in storage which are usable as a name, even fields which are not explicitly requested in the MergedPerson result. See go/javagoog/apps/tacotown/socialgraph/entity/PersonNameFormatter.java" -}, -"blockType": { -"deprecated": true, -"description": "DEPRECATED. Indicates whether the profile owner has blocked this person. Please use `person.read_only_profile_info.block_type` instead.", -"items": { -"enum": [ -"BLOCK_TYPE_UNKNOWN", -"CIRCLE", -"LEGACY" -], -"enumDescriptions": [ -"", -"Profile-blocked users", -"Chat-blocked users" -], -"type": "string" -}, -"type": "array" -}, -"circleId": { -"deprecated": true, -"description": "DEPRECATED. The circles the person belongs to.", -"items": { -"type": "string" -}, -"type": "array" -}, -"contactGroupId": { -"deprecated": true, -"description": "DEPRECATED. Please use `person.contact_group_memberships` instead. The contact groups the person belongs to.", -"items": { -"type": "string" -}, -"type": "array" -}, -"contactId": { -"description": "The IDs of all contacts contributing to this person.", -"items": { -"format": "int64", -"type": "string" -}, -"type": "array" -}, -"customResponseMaskingType": { -"deprecated": true, -"description": "DEPRECATED. Customized masking of the response similar to the legacy People2RequestMask People2Params request message. NOTE: This param is inherently client-specific, limited to specific legacy clients, and not open to new usage. NOTE: Effects may be applied to a subset of people in the response.", -"enum": [ -"UNKNOWN", -"NONE", -"MENAGERIE" -], -"enumDescriptions": [ -"", -"No filtering of the response.", -"Used by Menagerie to request menagerie-specific custom filtering or masking on the response." -], -"type": "string" -}, -"deleted": { -"description": "For sync requests (i.e., changed since the provided sync_token), indicates the resource is a tombstone for a Person resource that has been entirely deleted.", -"type": "boolean" -}, -"deprecatedBlocked": { -"deprecated": true, -"description": "DEPRECATED. Please use `person.read_only_profile_info.block_type` instead.", -"type": "boolean" -}, -"deprecatedMembershipCircleId": { -"deprecated": true, -"description": "DEPRECATED. This field is no longer populated or read.", -"items": { -"format": "int64", -"type": "string" -}, -"type": "array" -}, -"deprecatedMembershipContactGroupId": { -"deprecated": true, -"description": "DEPRECATED. This field is no longer populated or read.", -"items": { -"format": "int64", -"type": "string" -}, -"type": "array" -}, -"deviceContactInfo": { -"description": "Info about the aggregated device contacts. When the person contains RAW_DEVICE_CONTACT containers, each DeviceContactInfo represents a single aggregate device contact made up of one or more raw device contacts.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiDeviceContactInfo" -}, -"type": "array" -}, -"identityInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiIdentityInfo", -"description": "Detailed metadata about the lookup IDs and data sources included in a MergedPerson result." -}, -"inViewerDomain": { -"deprecated": true, -"description": "DEPRECATED. Indicates whether this person is in the same domain as the viewer. For proxying trust between two users based on organization membership, see: - go/flex-orgs-platform - go/flex-orgs-compliance-handbook (especially http://shortn/_ChwfAY36Ys)", -"type": "boolean" -}, -"incomingBlockType": { -"deprecated": true, -"description": "DEPRECATED. Indicates whether this person is blocking the profile owner. Please use `person.read_only_profile_info.incoming_block_type` instead.", -"items": { -"enum": [ -"BLOCK_TYPE_UNKNOWN", -"CIRCLE", -"LEGACY" -], -"enumDescriptions": [ -"", -"Profile-blocked users", -"Chat-blocked users" -], -"type": "string" -}, -"type": "array" -}, -"lastUpdateTimeMicros": { -"description": "DEPRECATED. The last update timestamps for the constituent components of this person are available in `identity_info.source_ids`. The time of the most recent change to this person, in !!!NANOS!!! (due to a bug). May be a change to any of the underlying parts of the person (profile, contact, etc.). Not guaranteed to be the timestamp of the most recent change, due to limitations in the backend. This field is not fully deprecated for backend container-specific storage services like ProfileService which lack identity_info. The use is still discouraged in such systems and they should prefer to use the `last_update_time` field of this message instead.", -"format": "int64", -"type": "string" -}, -"model": { -"description": "The person model that is used to construct this person.", -"enum": [ -"PERSON_MODEL_UNKNOWN", -"PROFILE_CENTRIC", -"CONTACT_CENTRIC" -], -"enumDescriptions": [ -"", -"The default People API model where a person can contain no more than one profile and potentially many contacts.", -"The inversion of the default profile-centric model, i.e. a person can contain no more than one contact but potentially many profiles. See go/contact-centric-person-model for more details." -], -"type": "string" -}, -"objectType": { -"deprecated": true, -"description": "DEPRECATED.", -"enum": [ -"OBJECT_TYPE_UNKNOWN", -"PERSON", -"PAGE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"ownerId": { -"deprecated": true, -"description": "DEPRECATED. Please use `person.read_only_profile_info.owner_id` instead.", -"type": "string" -}, -"ownerUserType": { -"deprecated": true, -"description": "DEPRECATED. See `person.read_only_profile_info.owner_user_type` instead.", -"items": { -"enum": [ -"OWNER_USER_TYPE_UNKNOWN", -"GOOGLE_USER", -"GPLUS_USER", -"GPLUS_DISABLED_BY_ADMIN", -"GOOGLE_APPS_USER", -"GOOGLE_APPS_SELF_MANAGED_USER", -"GOOGLE_FAMILY_USER", -"GOOGLE_FAMILY_CHILD_USER", -"GOOGLE_APPS_ADMIN_DISABLED", -"GOOGLE_ONE_USER", -"GOOGLE_FAMILY_CONVERTED_CHILD_USER" -], -"enumDescriptions": [ -"", -"The owner is a Google user.", -"The owner is a Currents user.", -"Google+, i.e. INSServiceID::GOOGLE_ME service, is disabled by dasher admin. Only returned for requester's own profile.", -"The owner is a Google Apps for Work user.", -"The owner is a self-managed Google Apps for Work user. Only returned for requester's own profile.", -"The owner is a Google for Families user. Only returned for requester's own profile.", -"The owner is a Google for Families child user. Only returned for requester's own profile.", -"The owner has been disabled (archived, suspended) by the G Suite admin.", -"The owner is a Google One user (go/googleone). Only returned for requester's own profile.", -"The owner is a Google for Families child user that was converted from an existing user. Only returned for requester's own profile. (go/griffin-account)" -], -"type": "string" -}, -"type": "array" -}, -"plusPageType": { -"deprecated": true, -"description": "DEPRECATED. Please use `Person.plus_page_info` instead.", -"enum": [ -"PLUS_PAGE_TYPE_UNKNOWN", -"LOCAL", -"COMPANY", -"BRAND", -"CELEBRITY", -"CAUSE", -"ENTERTAINMENT", -"OTHER", -"OBSOLETE_PRIVATE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"Formerly a private madison account(go/private-madison-account). Don't use it anymore." -], -"type": "string" -}, -"previousPersonId": { -"deprecated": true, -"description": "DEPRECATED. This field is no longer populated or read.", -"items": { -"type": "string" -}, -"type": "array" -}, -"profileOwnerStats": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProfileOwnerStats", -"deprecated": true, -"description": "DEPRECATED. Stats/counters pertaining to followers and incoming edges. Please use `person.read_only_profile_info.profile_owner_stats` instead." -}, -"scoringInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonMetadataScoringInfo", -"description": "Contact people-directory-dev-team@ if you want to use this field." -}, -"userVisibleStats": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiUserVisibleStats", -"deprecated": true, -"description": "DEPRECATED. This field is no longer populated or read." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonMetadataScoringInfo": { -"id": "AppsPeopleOzExternalMergedpeopleapiPersonMetadataScoringInfo", -"properties": { -"rawMatchQualityScore": { -"description": "Only populated on a SearchDirectoryPeople call, when results are scored. Contact people-directory-dev-team@ if you want to use this field.", -"format": "double", -"type": "number" -}, -"stExpressionResults": { -"description": "Only populated on a SearchDirectoryPeople call that sends a request with StFieldSpecExpressions. - Used for linking indexed terms with query terms for go/better-name-matching - Name should be alphanumeric or underscores - Value should be an st expression following the syntax at go/stsyntax Contact people-directory-dev-team@ if you want to use this field.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonMetadataScoringInfoStExpressionResult" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPersonMetadataScoringInfoStExpressionResult": { -"id": "AppsPeopleOzExternalMergedpeopleapiPersonMetadataScoringInfoStExpressionResult", -"properties": { -"name": { -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPhone": { -"id": "AppsPeopleOzExternalMergedpeopleapiPhone", -"properties": { -"canonicalizedForm": { -"description": "Canonicalized form that follows ITU-T E.164 international public telecommunication numbering plan.", -"type": "string" -}, -"emergencyInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiFieldEmergencyInfo", -"description": "Emergency information. See go/emergency-trusted-contacts-papi." -}, -"extendedData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPhoneExtendedData", -"description": "Read-only. Field requested by specifying `HANGOUTS_PHONE_DATA` in `extension_set.extension_names`." -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"description": "The type of the phone number. The type can be free form or one of these predefined values: * `home` * `work` * `mobile` * `homeFax` * `workFax` * `otherFax` * `pager` * `workMobile` * `workPager` * `main` * `googleVoice` * `other`", -"type": "string" -}, -"uri": { -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPhoneExtendedData": { -"id": "AppsPeopleOzExternalMergedpeopleapiPhoneExtendedData", -"properties": { -"structuredPhone": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiStructuredPhone", -"description": "For use with Hangouts extension." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPhoto": { -"id": "AppsPeopleOzExternalMergedpeopleapiPhoto", -"properties": { -"decoration": { -"description": "Read-only. Use UpdatePersonPhoto to change photo decorations. If this photo is decorated, this field contains information about its decorations. For now, this will contain at most one entry.", -"items": { -"$ref": "SocialGraphApiProtoDecorationOverlay" -}, -"type": "array" -}, -"emojiAvatarUrl": { -"description": "URL of an emoji avatar as an image. See go/emoji-cdn. PeopleAPI will return the SVG format so that it can be scaled client side and so that the images will not be animated. All clients that use this field must also have fall-back handling for using the `Photo.url` field if this is empty. When we have FIFE-compatible emoji-image URLs we will drop this field and return the Photo.url instead. Clients that have their own go/emoji-rendering integration may prefer to render the emoji-avatar from `Photo.glyph` field using their rendering system so that the emoji version/style match the rest of the application. For further background, see go/chatroom-avatar-as-roster-metadata. This field will only be populated if all of: - The PersonFieldMetadata `container_type` for the Photo is NAMED_CHAT_ROOM - The chat room has an emoji type avatar image set", -"type": "string" -}, -"glyph": { -"description": "Unicode emoji representation of the chat room emoji avatar. This can be used by clients that use go/emoji-rendering directly so that they can present this with the same version/style as the rest of their application. This value may also be useful to clients as alt-text for the image. This field will only be populated if all of: - The PersonFieldMetadata `container_type` for the Photo is NAMED_CHAT_ROOM - The chat room has an emoji type avatar image set", -"type": "string" -}, -"htmlAttribution": { -"description": "A set of HTML data provider attributions that must be shown with the result. Supported for PLACES photos only. See: go/understanding-places-api-attribution-requirements", -"items": { -"type": "string" -}, -"type": "array" -}, -"isDefault": { -"description": "True when the photo is synthetic or generated (i.e. a monogram or default photo), false when the person has a custom photo.", -"type": "boolean" -}, -"isMonogram": { -"description": "Indicates if the photo is a monogram avatar. Combined with is_default, the type of photo can be determined by: is_default=true, is_monogram=true: Default monogram avatar. is_default=true, is_monogram=false: Default silhouette avatar. is_default=false: Custom photo. is_monogram is irrelevant in this case.", -"type": "boolean" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"monogramBackground": { -"description": "When is_monogram=true, this is the background color of the monogram photo as a hex RGB formatted string \"RRGGBB\".", -"type": "string" -}, -"originalPhoto": { -"$ref": "SocialGraphApiProtoImageReference", -"description": "Read-only. A reference to the original, undecorated profile photo in storage. This field is not stored. It is populated by a live read to /SocialGraphImageService.GetActiveProfilePhoto. This field is only returned when \"person.photo.original_photo\" is specified in the request mask." -}, -"photoId": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPhotoPhotoStorageId", -"description": "For writes only. Indicates photo content for person photo-field update. Currently only used for profile-photo updates (not contact photos yet)." -}, -"photoToken": { -"description": "Most clients don't need to worry about this field and should just use the `url` to fetch the photo. See go/phototoken-migration-plan for some more context about this field. If you think you want to use this please talk with people-api-eng@ first.", -"type": "string" -}, -"url": { -"description": "See go/people-api-concepts/photos for info on the different representations of URLs.", -"type": "string" -}, -"viewerUrl": { -"description": "A URL for a UI to view the photo in its original context. For example, for a place photo, this is the url of a Google Maps page displaying the photo. Supported for place photos only.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPhotoPhotoStorageId": { -"description": "Info necessary for looking up a photo in storage.", -"id": "AppsPeopleOzExternalMergedpeopleapiPhotoPhotoStorageId", -"properties": { -"mediaKey": { -"description": "For writes only, pass the media key that represents the image in photos backend. Note, this is not populated on reads.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPlaceDetails": { -"description": "Metadata specific to places.", -"id": "AppsPeopleOzExternalMergedpeopleapiPlaceDetails", -"properties": { -"googleUrl": { -"description": "A URL hosted by Google providing more information about this place This is the URL returned by Places API in the Place.Url.google field", -"type": "string" -}, -"latLng": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiLatLng" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"openingHours": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiOpeningHours" -}, -"primaryTypeName": { -"description": "The name of the primary type. Examples of primary type are: \"art_school\", \"clothing_wholesaler\", etc. All primary types can be found at http://shortn/_veqh6UwWdc", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPlayGamesExtendedData": { -"description": "Extension data for use in Play Games Product Profile. See go/jam-games-profile.", -"id": "AppsPeopleOzExternalMergedpeopleapiPlayGamesExtendedData", -"properties": { -"achievements": { -"description": "User's top achievements that are sorted for example by rarity.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPlayGamesExtendedDataAchievement" -}, -"type": "array" -}, -"avatarImageUrl": { -"description": "The avatar image to display for the user.", -"type": "string" -}, -"failure": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProductProfileFailure", -"description": "Failure type if there is an error when fetching product profile data." -}, -"gamerTag": { -"description": "The gamer tag set by the user. Not set if the user hasn't set a gamer tag yet.", -"type": "string" -}, -"playerLevel": { -"description": "User's level.", -"format": "int32", -"type": "integer" -}, -"profileVisibility": { -"description": "Specifies the visibility of the player's profile.", -"enum": [ -"UNKNOWN_CLIENT_PLAYER_PROFILE_VISIBILITY", -"PRIVATE_VISIBILITY", -"PUBLIC_VISIBILITY", -"FRIENDS_VISIBILITY" -], -"enumDescriptions": [ -"Safe default.", -"The profile is not visible to anyone but the player themselves.", -"The profile is visible to everyone.", -"The profile is visible to the player and their PGS friends." -], -"type": "string" -}, -"totalFriendsCount": { -"description": "Total number of friends.", -"format": "int64", -"type": "string" -}, -"totalUnlockedAchievements": { -"description": "How many achievements this player has unlocked.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPlayGamesExtendedDataAchievement": { -"description": "Details of an achievement that the user has unlocked.", -"id": "AppsPeopleOzExternalMergedpeopleapiPlayGamesExtendedDataAchievement", -"properties": { -"achievementName": { -"description": "The name of the achievement.", -"type": "string" -}, -"achievementUnlockedIconUrl": { -"description": "The achievement icon url shown to the user if it is unlocked.", -"type": "string" -}, -"rarityPercentage": { -"description": "Rarity of unlocking this achievement (3% of players unlocked would be 3)", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPlusPageInfo": { -"deprecated": true, -"description": "Information about a plus page and the entity it represents.", -"id": "AppsPeopleOzExternalMergedpeopleapiPlusPageInfo", -"properties": { -"applicationId": { -"description": "Int64 ID of packaging-service entry; if set, the plus page is associated with a third-party application.", -"format": "int64", -"type": "string" -}, -"entityType": { -"enum": [ -"ENTITY_TYPE_UNSPECIFIED", -"LOCAL", -"COMPANY", -"BRAND", -"CELEBRITY", -"CAUSE", -"ENTERTAINMENT", -"OTHER", -"OBSOLETE_PRIVATE" -], -"enumDescriptions": [ -"", -"These are the top-level entity types for plus pages.", -"", -"", -"", -"", -"", -"", -"Used to identify a private madison account(go/private-madison-account). Don't use it anymore." -], -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPointSpec": { -"description": "Map marker location for an address.", -"id": "AppsPeopleOzExternalMergedpeopleapiPointSpec", -"properties": { -"bounds": { -"$ref": "GeostoreRectProto" -}, -"point": { -"$ref": "GeostorePointProto" -}, -"pointSource": { -"enum": [ -"UNKNOWN_POINT_SOURCE", -"POINT_SOURCE_UNSPECIFIED", -"USER_PROVIDED", -"SYSTEM_PROVIDED", -"USER_CONFIRMED" -], -"enumDescriptions": [ -"", -"", -"User has explicitly moved the point.", -"System has generated the marker.", -"System has generated the marker, but user has viewed and saved it unchanged." -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPosixAccount": { -"description": "Custom field that represents POSIX account information. Description of the field family: go/fbs-posix. If account has non-empty Username or Uid we will enforce global uniqueness of (AccountNamespace, CustomerKey, SystemId, Uid) and (AccountNamespace, CustomerKey, SystemId, Username) tuples to ensure there are no duplicates.", -"id": "AppsPeopleOzExternalMergedpeopleapiPosixAccount", -"properties": { -"accountId": { -"description": "The user visible value is used to distinguish identical posix account fields with different customer key values.", -"type": "string" -}, -"accountNamespace": { -"description": "Value indicates the uniqueness namespace that applies to the POSIX information. The value is included in all POSIX account uniqueness indices. The indexing prevents two accounts within the same customer from having the same username. Namespacing allows Windows and Linux users to share the same username.", -"enum": [ -"LINUX_GSUITE", -"LINUX_CONSUMER", -"WINDOWS_GSUITE", -"WINDOWS_CONSUMER" -], -"enumDescriptions": [ -"A Linux user for a G Suite domain.", -"A Linux user for a Google account.", -"A Windows user for a G Suite domain.", -"A Windows user for a Google account." -], -"type": "string" -}, -"accountType": { -"description": "Value indicates whether the POSIX information is associated with a non-human entity and the validation logic to apply during PosixAccount mutation.", -"enum": [ -"LINUX_USER_ACCOUNT", -"LINUX_SERVICE_ACCOUNT", -"LINUX_EXTERNAL_USER", -"WINDOWS_USER_ACCOUNT", -"WINDOWS_SERVICE_ACCOUNT", -"WINDOWS_EXTERNAL_USER" -], -"enumDescriptions": [ -"Linux account associated with a human user.", -"Linux account for a non-human entity. Service accounts are used for unattended automation scripts. See go/riseoftherobots for details.", -"Linux account associated with a human user where the owner of the POSIX data is granted access to resources (VMs) owned by a different (external) organization.", -"Windows account associated with a human user.", -"Windows account for a non-human entity. Service accounts are used for unattended automation scripts.", -"Windows account associated with a human user where the owner of the account data is granted access to resources (VMs) owned by a different (external) organization." -], -"type": "string" -}, -"customerKey": { -"description": "The customer associated with the POSIX identity. If the user is already associated with a G Suite Customer, this field has the same value as http://google3/ccc/hosted/policies/settings/dthree_customer_info.proto", -"format": "int64", -"type": "string" -}, -"fingerprint": { -"description": "The value is automatically set to a SHA-256 fingerprint of the POSIX account. A fingerprint should uniquely identify a POSIX account entry.", -"type": "string" -}, -"gecos": { -"description": "The GECOS (user information) entry for this account.", -"type": "string" -}, -"gid": { -"description": "The default group ID.", -"format": "uint64", -"type": "string" -}, -"homeDirectory": { -"description": "The path to the home directory for this account.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"operatingSystemType": { -"description": "Value indicates whether to use Linux or Windows specific validation logic during PosixAccount mutation.", -"enum": [ -"OPERATING_SYSTEM_TYPE_UNSPECIFIED", -"LINUX", -"WINDOWS" -], -"enumDescriptions": [ -"The operating system type associated with the user account information is unspecified.", -"Linux user account information.", -"Windows user account information." -], -"type": "string" -}, -"shell": { -"description": "The path to the login shell for this account.", -"type": "string" -}, -"systemId": { -"description": "System identifier for which account Username or Uid apply to. If not specified on mutate by a caller it will default to empty value if either Username or Uid are being set. SystemId does require to have a value (even an empty one) because it is included into null-filtered Spanner index used to enforce uniqueness on Username and Uid fields.", -"type": "string" -}, -"uid": { -"description": "The user ID.", -"format": "uint64", -"type": "string" -}, -"username": { -"description": "The username of the account.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiProductMetadata": { -"id": "AppsPeopleOzExternalMergedpeopleapiProductMetadata", -"properties": { -"productSource": { -"enum": [ -"PRODUCT_SOURCE_UNKNOWN", -"PRODUCT_SOURCE_DEFAULT", -"PRODUCT_SOURCE_ASSISTANT", -"PRODUCT_SOURCE_JANATA", -"PRODUCT_SOURCE_SPEED_DIAL" -], -"enumDescriptions": [ -"", -"", -"", -"The product for non-prominent people creating their profiles and sharing contact info on Google search (go/project-janata).", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiProductProfileFailure": { -"description": "Product profiles failure type: the status of the rpc to fetch the product profile.", -"id": "AppsPeopleOzExternalMergedpeopleapiProductProfileFailure", -"properties": { -"failureType": { -"enum": [ -"PRODUCT_PROFILE_FAILURE_TYPE_UNKNOWN", -"RPC_FAILURE" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiProfileOwnerStats": { -"description": "Stats pertaining to incoming edges and views, visible to the requester (with acls applied). Related to, but not equal to, com.google.focus.proto.Storage.UserVisibleStats.", -"id": "AppsPeopleOzExternalMergedpeopleapiProfileOwnerStats", -"properties": { -"incomingAnyCircleCount": { -"description": "Replacement for deprecated follower_count. Comes from the EdgeSummary.", -"format": "int64", -"type": "string" -}, -"viewCount": { -"deprecated": true, -"description": "Deprecated. This field is no longer populated by the server.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiProfileUrl": { -"description": "This is a Google+-only field (and thus does not exist for consumer users). See go/fbs-g+-deprecation.", -"id": "AppsPeopleOzExternalMergedpeopleapiProfileUrl", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"url": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiPronoun": { -"description": "Message to represent a user's set of preferred pronouns, see go/pronouns-backend.", -"id": "AppsPeopleOzExternalMergedpeopleapiPronoun", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"pronounData": { -"$ref": "SocialGraphApiProtoPronounData" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRawDeviceContactAnalyticalInfo": { -"id": "AppsPeopleOzExternalMergedpeopleapiRawDeviceContactAnalyticalInfo", -"properties": { -"dataSet": { -"description": "The data set within the account that this raw contact belongs to.", -"type": "string" -}, -"dirty": { -"description": "The CP2 dirty field which indicates the sync state of the raw contact: https://developer.android.com/reference/android/provider/ContactsContract.SyncColumns#DIRTY True if the row is changed but not synced", -"type": "boolean" -}, -"sourceIdExist": { -"description": "Whether the source ID exists for non-Google contacts. Won't set for Google contacts.", -"type": "boolean" -}, -"syncInfo": { -"$ref": "SocialGraphApiProtoSyncInfo", -"description": "The Sync Info of a raw contact." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRawDeviceContactInfo": { -"description": "Raw device contact information.", -"id": "AppsPeopleOzExternalMergedpeopleapiRawDeviceContactInfo", -"properties": { -"accountName": { -"description": "Account name of raw contact, e.g. \"google@gmail.com\".", -"type": "string" -}, -"accountType": { -"description": "Account type of raw contact, e.g. \"com.google\" or \"com.linkedin.android\".", -"type": "string" -}, -"appContactData": { -"description": "The detailed app-specific endpoint data available for the given RawDeviceContactInfo instance. This proto should be used to obtain the list of actions and mimetypes supported by the third-party app. Design: go/3p-contact-upload", -"items": { -"$ref": "SocialGraphApiAppContactData" -}, -"type": "array" -}, -"appInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAppUniqueInfo", -"description": "The app-specific endpoint data needed for app action fulfillment. Usage of this field should be avoided on the server-side, and should use the more detailed |full_app_info| field. " -}, -"crossDeviceAllowed": { -"description": "If true, this raw contact can be used on other devices than the one it originated from. Assigned by the server.", -"type": "boolean" -}, -"deviceContactMetadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiDeviceContactExtraMetadata", -"description": "Extra metadata for this raw contact." -}, -"googleContactId": { -"description": "The focus contact id for Google contacts.", -"format": "int64", -"type": "string" -}, -"id": { -"description": "The base64 serialized social.graph.peopleapi.proto.internal.RawDeviceContactId. This id should be used to correlate to field.metadata.encoded_container_id when the field.metadata.container_type is RAW_DEVICE_CONTACT The id also correlates to person.metadata.identity_info.source_id.id. For value format, see google3/social/graph/api/proto/main_merged_person.proto?q=symbol:SourceIdentity.id", -"type": "string" -}, -"photoType": { -"description": "The type of photo from the device (if any).", -"enum": [ -"PHOTO_TYPE_UNKNOWN", -"NO_PHOTO", -"THUMBNAIL", -"FULL_SIZE_PHOTO" -], -"enumDescriptions": [ -"", -"This raw contact doesn't have a photo.", -"This raw contact only has a thumbnail.", -"This raw contact has a full size photo." -], -"type": "string" -}, -"rawContactId": { -"description": "The id of the raw contact on the device.", -"format": "int64", -"type": "string" -}, -"rawDeviceContactAnalyticalInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRawDeviceContactAnalyticalInfo", -"description": "Only to be used by Romanesco team specifically for analytics." -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiReadOnlyProfileInfo": { -"description": "Metadata information about a profile. This message replaces legacy profile-specific singleton fields from the PersonMetadata proto (singleton top level Person fields are not compatible with non-profile-centric person results, which may include multiple profile containers).", -"id": "AppsPeopleOzExternalMergedpeopleapiReadOnlyProfileInfo", -"properties": { -"accountEmail": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAccountEmail", -"description": "The account email linked to the profile, if any exists and is visible to the requester." -}, -"blockType": { -"description": "Indicates whether the profile owner has blocked this person.", -"items": { -"enum": [ -"BLOCK_TYPE_UNKNOWN", -"CIRCLE", -"LEGACY" -], -"enumDescriptions": [ -"", -"Profile-blocked users", -"Chat-blocked users" -], -"type": "string" -}, -"type": "array" -}, -"customerInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiCustomerInfo", -"description": "CustomerInfo for dasher user. The reader has to explicitly request this in the field_mask as 'read_only_profile_info.customer_info'" -}, -"domainInfo": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiReadOnlyProfileInfoDomainInfo", -"deprecated": true, -"description": "DEPRECATED. Use the `ReadOnlyProfileInfo.customer_info` field instead (b/138120418). Only populated if in_viewer_domain is true." -}, -"inViewerDomain": { -"deprecated": true, -"description": "DEPRECATED. Proxying trust between users in a domain should use go/flex-orgs-platform. For more info see: http://doc/18i0-C7vWcz2UuXYBsmulnriVCK3_EuMPpRlPa2OmMHw#heading=h.dobotdwx25kg Indicates whether the profile owner is in the same domain as the viewer.", -"type": "boolean" -}, -"incomingBlockType": { -"description": "Indicates whether this person is blocking the profile owner.", -"items": { -"enum": [ -"BLOCK_TYPE_UNKNOWN", -"CIRCLE", -"LEGACY" -], -"enumDescriptions": [ -"", -"Profile-blocked users", -"Chat-blocked users" -], -"type": "string" -}, -"type": "array" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"objectType": { -"deprecated": true, -"description": "DEPRECATED.", -"enum": [ -"OBJECT_TYPE_UNKNOWN", -"PERSON", -"PAGE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"ownerId": { -"description": "The Focus-obfuscated Gaia ID of the profile owner (go/obfuscated-ids).", -"type": "string" -}, -"ownerUserType": { -"items": { -"enum": [ -"OWNER_USER_TYPE_UNKNOWN", -"GOOGLE_USER", -"GPLUS_USER", -"GPLUS_DISABLED_BY_ADMIN", -"GOOGLE_APPS_USER", -"GOOGLE_APPS_SELF_MANAGED_USER", -"GOOGLE_FAMILY_USER", -"GOOGLE_FAMILY_CHILD_USER", -"GOOGLE_APPS_ADMIN_DISABLED", -"GOOGLE_ONE_USER", -"GOOGLE_FAMILY_CONVERTED_CHILD_USER" -], -"enumDescriptions": [ -"", -"The owner is a Google user.", -"The owner is a Currents user.", -"Google+, i.e. INSServiceID::GOOGLE_ME service, is disabled by dasher admin. Only returned for requester's own profile.", -"The owner is a Google Apps for Work user.", -"The owner is a self-managed Google Apps for Work user. Only returned for requester's own profile.", -"The owner is a Google for Families user. Only returned for requester's own profile.", -"The owner is a Google for Families child user. Only returned for requester's own profile.", -"The owner has been disabled (archived, suspended) by the G Suite admin.", -"The owner is a Google One user (go/googleone). Only returned for requester's own profile.", -"The owner is a Google for Families child user that was converted from an existing user. Only returned for requester's own profile. (go/griffin-account)" -], -"type": "string" -}, -"type": "array" -}, -"plusPageType": { -"deprecated": true, -"description": "DEPRECATED. Please use `person.plus_page_info` instead.", -"enum": [ -"PLUS_PAGE_TYPE_UNKNOWN", -"LOCAL", -"COMPANY", -"BRAND", -"CELEBRITY", -"CAUSE", -"ENTERTAINMENT", -"OTHER", -"OBSOLETE_PRIVATE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"Formerly a private madison account(go/private-madison-account). Don't use it anymore." -], -"type": "string" -}, -"profileOwnerStats": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProfileOwnerStats", -"description": "Stats/counters pertaining to followers and incoming edges." -}, -"unjoinedEmailCertificates": { -"description": "Returned only when explicitly requested in the request mask as read_only_profile_info.unjoined_email_certificates. Equivalent to fetching the Emails & the Email Certificates with the acls ignored.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiEmail" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiReadOnlyProfileInfoDomainInfo": { -"deprecated": true, -"description": "DEPRECATED.", -"id": "AppsPeopleOzExternalMergedpeopleapiReadOnlyProfileInfoDomainInfo", -"properties": { -"domainBadge": { -"deprecated": true, -"description": "DEPRECATED. Organization badge for the domain this person is a member of. The badge is the primary hosted domain.", -"items": { -"type": "string" -}, -"type": "array" -}, -"domainName": { -"deprecated": true, -"description": "DEPRECATED. Hosted domain this person is a member of. Formerly only available via PersonExtendedData.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRelation": { -"description": "Relation stores the related person between the contact or profile and a third person. See go/relation-vs-relationship for relation vs relationship explanation. This field currently includes RelationshipToMe data in fields value and canonical_value for ContainerType CONTACT_ANNOTATION. This will be moved to RelationshipToMe in b/221081499.", -"id": "AppsPeopleOzExternalMergedpeopleapiRelation", -"properties": { -"canonicalValue": { -"description": "Canonicalized `value` of the relation from this person to the user. This is currently used for data from contact annotations. Possible canonical values are based from http://google3/googledata/quality/aliases/relationship_en.config.", -"type": "string" -}, -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"relationDetails": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRelationRelationDetails", -"description": "The person whose email matches the Relation.value field, if it is a valid email address. This field is read-only and ignored on update." -}, -"type": { -"description": "The relation type. The type can be free form or one of these predefined values: * `spouse` * `child` * `mother` * `father` * `parent` * `brother` * `sister` * `friend` * `relative` * `domesticPartner` * `manager` * `assistant` * `referredBy` * `partner`", -"type": "string" -}, -"value": { -"description": "The person this relation applies to. Custom value provided by the user.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRelationRelationDetails": { -"id": "AppsPeopleOzExternalMergedpeopleapiRelationRelationDetails", -"properties": { -"displayName": { -"description": "Equivalent to Name.display_name for the person_id profile.", -"type": "string" -}, -"jobTitle": { -"description": "Equivalent to Organization.title for the primary organization of the person_id profile.", -"type": "string" -}, -"personId": { -"type": "string" -}, -"photoUrl": { -"description": "Equivalent to Photo.url for the person_id profile.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRelationshipInterest": { -"deprecated": true, -"description": "Deprecated in b/122464133. No data returned for this field.", -"id": "AppsPeopleOzExternalMergedpeopleapiRelationshipInterest", -"properties": { -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"description": "These fields may give away the sexual orientation of the user.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRelationshipStatus": { -"deprecated": true, -"description": "Deprecated in b/122464133. No data returned for this field.", -"id": "AppsPeopleOzExternalMergedpeopleapiRelationshipStatus", -"properties": { -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRightOfPublicityState": { -"description": "User preference for shared endorsements. See go/se-devguide and go/sharedendorsements for details, including guidance on which contexts are which.", -"id": "AppsPeopleOzExternalMergedpeopleapiRightOfPublicityState", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"state": { -"enum": [ -"STATE_UNSPECIFIED", -"NOT_OK_TO_DISPLAY", -"OK_TO_DISPLAY", -"OK_TO_DISPLAY_IN_NON_ADS_COMMERCIAL_CONTEXT" -], -"enumDescriptions": [ -"", -"OK to display in non-commercial contexts, but not in commercial ones.", -"Okay to display in commercial or advertising contexts.", -"Okay to display in commercial contexts other than advertising." -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRosterDetails": { -"description": "Information specific to rosters like Google Groups and Chatrooms.", -"id": "AppsPeopleOzExternalMergedpeopleapiRosterDetails", -"properties": { -"abridgedRosterMemberships": { -"description": "Abridged / sample subset of member details of the roster. NOTE: This field is only returned if the request's field mask includes \"person.roster_details.abridged_roster_memberships\". http://cs/symbol:google.apps.cloudidentity.groups.internal.GroupSummary.abridged_memberships", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRosterMember" -}, -"type": "array" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"rosterMemberCount": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiRosterMemberCount", -"description": "Indicates the number of members and sub-rosters of the roster. Corresponds to http://cs/symbol:google.apps.cloudidentity.groups.internal.Group.direct_member_count_per_type" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRosterMember": { -"description": "Represents details of a member of a roster. Used in RosterDetails. Corresponds to http://cs/symbol:google.apps.cloudidentity.groups.internal.Membership", -"id": "AppsPeopleOzExternalMergedpeopleapiRosterMember", -"properties": { -"memberType": { -"description": "Type of the member.", -"enum": [ -"ROSTER_MEMBER_TYPE_UNSPECIFIED", -"PERSON", -"ROSTER" -], -"enumDescriptions": [ -"", -"CIG's Membership.Type.USER", -"CIG's Membership.Type.GROUP" -], -"type": "string" -}, -"personId": { -"description": "Focus-Obfuscated Gaia Id of the member.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiRosterMemberCount": { -"description": "Represents the summary of member counts of a roster. Used in RosterDetails. Corresponds to http://cs/symbol:google.apps.cloudidentity.groups.internal.Group.DirectMemberCountPerType", -"id": "AppsPeopleOzExternalMergedpeopleapiRosterMemberCount", -"properties": { -"directGroupCount": { -"description": "Indicates the number of direct sub-rosters of the roster. This comes from http://cs/symbol:google.apps.cloudidentity.groups.internal.Group.DirectMemberCountPerType.group_count", -"format": "int64", -"type": "string" -}, -"directUserCount": { -"description": "Indicates the number of direct, non-roster members of the roster. This comes from http://cs/symbol:google.apps.cloudidentity.groups.internal.Group.DirectMemberCountPerType.user_count", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSearchProfile": { -"description": "Profile for Janata and Search. go/janata-profile-in-sgbe", -"id": "AppsPeopleOzExternalMergedpeopleapiSearchProfile", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"searchProfileData": { -"$ref": "SocialGraphApiProtoSearchProfileData" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSipAddress": { -"description": "As of 03/2018 is not supported for user Profile.", -"id": "AppsPeopleOzExternalMergedpeopleapiSipAddress", -"properties": { -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"description": "The type of the SIP address. The type can be free form or or one of these predefined values: * `home` * `work` * `mobile` * `other`", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSkills": { -"id": "AppsPeopleOzExternalMergedpeopleapiSkills", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSocialConnection": { -"description": "The social connection of this person to the viewer. NOTE: this is used by go/starlight, but not actually used or returned in PeopleAPI. See b/27281119 for context.", -"id": "AppsPeopleOzExternalMergedpeopleapiSocialConnection", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"type": { -"items": { -"enum": [ -"SOCIAL_CONNECTION_UNKNOWN", -"NO_CONNECTION", -"GPLUS_SECOND_HOP", -"DIRECT_CONNECTION", -"SELF" -], -"enumDescriptions": [ -"", -"This person is not connected to the viewer.", -"Google+ user is a second hop relative to viewer.", -"User is a contact or in circles of the viewer.", -"This person is the viewer." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSortKeys": { -"id": "AppsPeopleOzExternalMergedpeopleapiSortKeys", -"properties": { -"affinity": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiAffinity" -}, -"type": "array" -}, -"interactionRank": { -"deprecated": true, -"description": "Deprecated. This field is only populated with 0.000 for legacy reasons. Clients should not use this field.", -"type": "string" -}, -"lastName": { -"type": "string" -}, -"lastNameRaw": { -"type": "string" -}, -"name": { -"type": "string" -}, -"nameRaw": { -"description": "Raw name strings that were used to generate the name and last_name sort keys fields above. Contacts+ need them to generate section headers for list view (b/30642866).", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSourceIdentity": { -"description": "Id of a single source from the merged person.", -"id": "AppsPeopleOzExternalMergedpeopleapiSourceIdentity", -"properties": { -"container": { -"deprecated": true, -"description": "The type of source. To be deprecated infavor of container_type", -"enum": [ -"UNKNOWN", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Data from the user's frequent interactions.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"containerType": { -"description": "The type of the source.", -"enum": [ -"UNKNOWN_CONTAINER", -"PROFILE", -"CONTACT", -"CIRCLE", -"PLACE", -"ACCOUNT", -"EXTERNAL_ACCOUNT", -"DOMAIN_PROFILE", -"DOMAIN_CONTACT", -"DEVICE_CONTACT", -"GOOGLE_GROUP", -"NAMED_CHAT_ROOM", -"UNNAMED_CHAT_ROOM", -"AFFINITY", -"RAW_DEVICE_CONTACT", -"CONTACT_ANNOTATION", -"DELEGATED_CONTACT" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"Google Profile. PROFILE fields are editable by the profile owner, unless the field metadata includes writeable=false.", -"Google Contact", -"Deprecated. E-mail only circle membership should be dead. E-mail only circle membership of non-g+ user.", -"A maps place", -"The requester's own Gaia account. ACCOUNT fields are not editable by anyone. They only occur when the requester is \"me\". In the event an ACCOUNT field is ACLed non-private, it will be returned as a PROFILE field when viewed by a requester who is not \"me\".", -"LinkedExternalSites (go/PeopleApiConnectedSites)", -"Google-For-Work Profile. DOMAIN_PROFILE fields are editable by the Dasher administrator of the domain. They are not editable by the profile owner.", -"Domain shared contact. An entity that is owned by a domain and represents a person, but is not a user in the domain. For more details see https://support.google.com/a/answer/9281635.", -"To be deprecated in favor of RAW_DEVICE_CONTACT See go/mergedperson-for-device-contacts Aggregation is represented using person.metadata.device_contact_info", -"Google group. Examples: sales@zara.es.", -"Dynamite \"Named Flat Room\" (NFR). This is a Baggins Roster with label DYNAMITE_SPACE *and* label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Dynamite \"Unnamed Flat Room\" (UFR). This is a Baggins Roster with label DYNAMITE_SPACE but does *not* have label DYNAMITE_NAMED_SPACE. See go/people-api-concepts#chat-rooms", -"Device contacts that are used in interaction ranking, but have not been uploaded to Google. These are synthesized people from interaction data. This container type is only used in ListRankedPeople and ListRankedTargets actions. See go/TopNWithClusteringPart1.", -"Data from a raw (non-aggregated) device contact. See go/mergedperson-for-device-contacts", -"Data from contact annotation. Contact annotations are currently generated and used by Google Assistant.", -"Data from delegated contacts. Delegated contacts are the contacts delegated to the current requester. The requester can then access those contacts. See go/ph-delegation." -], -"type": "string" -}, -"deleted": { -"description": "In sync responses, indicates whether the identity source has been deleted. Not applicable to GOOGLE_GROUP.", -"type": "boolean" -}, -"id": { -"description": "The encoded id of the data source. This field correlates to PersonFieldMetadata.encoded_container_id. The possible values of this `id` field are as follows based on the value of the `container_type` field: CONTACT: Hex-encoded contact id. PROFILE: DOMAIN_PROFILE: GOOGLE_GROUP: NAMED_CHAT_ROOM: Focus-obfuscated Gaia ID. DOMAIN_CONTACT: Synthetic-contact id representing the domain shared contact. PLACE: Encoded PlaceId (go/javagoog/maps/api/places/util/PlaceIdEncoder.java) RAW_DEVICE_CONTACT: Pair of device_id and raw_contact_id, encoded as base64 serialized social.graph.peopleapi.proto.internal.RawDeviceContactId proto. CONTACT_ANNOTATION: Pair of annotation_id and event_timestamp, encoded as base64 serialized social.graph.peopleapi.proto.internal.ContactAnnotationId proto. -- DEPRECATED container types -- If the container is CIRCLE, then the id is going to be the synthetic- contact id representing the email-only circle member or gaia circle member for which the requester does not have a contact for.", -"type": "string" -}, -"lastUpdated": { -"description": "Last update timestamp of this source. NOTE: Only populated for CONTACT container type in Java PeopleAPI. Populated for CONTACT, PROFILE, DOMAIN_PROFILE in Sharpen implementation. NOTE: Not populated for GOOGLE_GROUP.", -"format": "google-datetime", -"type": "string" -}, -"lastUpdatedMicros": { -"deprecated": true, -"description": "**DEPRECATED** Please use `last_updated` field instead. Last update timestamp of this source in microseconds. NOTE: Only populated for CONTACT container type.", -"format": "int64", -"type": "string" -}, -"sourceEtag": { -"description": "NOTE: Not populated for GOOGLE_GROUP.", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiSshPublicKey": { -"description": "Custom field that represents SSH public keys associated with the user. We can treat the field as a map from a string fingerprint to the SSH public key information.", -"id": "AppsPeopleOzExternalMergedpeopleapiSshPublicKey", -"properties": { -"expirationTime": { -"format": "google-datetime", -"type": "string" -}, -"fingerprint": { -"description": "The value is automatically set to a SHA-256 fingerprint of an SSH public key. A fingerprint should uniquely identify an SSH public key.", -"type": "string" -}, -"key": { -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiStructuredPhone": { -"description": "This message mirrors the ContactPhoneNumber message in ccc/grand_central/common/types.proto. For various reasons, we cannot take on a direct dependency. See other proto file for most recent documentation.", -"id": "AppsPeopleOzExternalMergedpeopleapiStructuredPhone", -"properties": { -"formattedType": { -"description": "The phone formatted type. See docs from mirrored proto: http://google3/ccc/grand_central/common/types.proto?l=128&rcl=241000760", -"type": "string" -}, -"phoneNumber": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiStructuredPhonePhoneNumber" -}, -"shortCode": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiStructuredPhoneShortCode" -}, -"type": { -"description": "The type of phone. See docs from mirrored proto: http://google3/ccc/grand_central/common/types.proto?l=125&rcl=241000760", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiStructuredPhonePhoneNumber": { -"id": "AppsPeopleOzExternalMergedpeopleapiStructuredPhonePhoneNumber", -"properties": { -"e164": { -"type": "string" -}, -"i18nData": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiStructuredPhonePhoneNumberI18nData" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiStructuredPhonePhoneNumberI18nData": { -"id": "AppsPeopleOzExternalMergedpeopleapiStructuredPhonePhoneNumberI18nData", -"properties": { -"countryCode": { -"format": "int32", -"type": "integer" -}, -"internationalNumber": { -"type": "string" -}, -"isValid": { -"type": "boolean" -}, -"nationalNumber": { -"type": "string" -}, -"regionCode": { -"type": "string" -}, -"validationResult": { -"enum": [ -"UNKNOWN", -"IS_POSSIBLE", -"INVALID_COUNTRY_CODE", -"TOO_SHORT", -"TOO_LONG", -"IS_POSSIBLE_LOCAL_ONLY", -"INVALID_LENGTH" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiStructuredPhoneShortCode": { -"id": "AppsPeopleOzExternalMergedpeopleapiStructuredPhoneShortCode", -"properties": { -"code": { -"description": "The phone code. See docs from mirrored proto: http://google3/ccc/grand_central/common/types.proto?l=70&rcl=241000760", -"type": "string" -}, -"countryCode": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiTagline": { -"deprecated": true, -"id": "AppsPeopleOzExternalMergedpeopleapiTagline", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiTeamsExtendedData": { -"deprecated": true, -"description": "*UNSUPPORTED*. This message is never populated and is no longer used.", -"id": "AppsPeopleOzExternalMergedpeopleapiTeamsExtendedData", -"properties": { -"adminTo": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"admins": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"dottedLineManagers": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"dottedLineReports": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonListWithTotalNumber" -}, -"failures": { -"items": { -"enum": [ -"UNKNOWN_FAILURE", -"MANAGEMENT_CHAIN", -"REPORTS", -"DOTTED_LINE_REPORTS", -"DOTTED_LINE_MANAGERS", -"ADMINS", -"ADMIN_TO" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"managementChain": { -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPerson" -}, -"type": "array" -}, -"reports": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonListWithTotalNumber" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiUserDefined": { -"id": "AppsPeopleOzExternalMergedpeopleapiUserDefined", -"properties": { -"key": { -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiUserVisibleStats": { -"deprecated": true, -"description": "DEPRECATED in favor of ProfileStats. Stats pertaining to incoming edges and views, visible to the requester (with acls applied). Related to, but not equal to, com.google.focus.proto.Storage.UserVisibleStats.", -"id": "AppsPeopleOzExternalMergedpeopleapiUserVisibleStats", -"properties": { -"incomingAnyCircleCount": { -"description": "Replacement for deprecated follower_count. Comes from the EdgeSummary.", -"format": "int64", -"type": "string" -}, -"viewCount": { -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiVisibleToGuests": { -"description": "Store metadata information like annotation-id and product source for visible to guests contacts go/visible-to-guests.", -"id": "AppsPeopleOzExternalMergedpeopleapiVisibleToGuests", -"properties": { -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiWebContactsExtendedData": { -"id": "AppsPeopleOzExternalMergedpeopleapiWebContactsExtendedData", -"properties": { -"isIncomplete": { -"description": "Used by Contacts client-side to indicate whether a person is not completed.", -"type": "boolean" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiWebsite": { -"id": "AppsPeopleOzExternalMergedpeopleapiWebsite", -"properties": { -"formattedType": { -"description": "The `type` translated and formatted in the request locale. See go/people-api-howto/localization for details on how to usage.", -"type": "string" -}, -"metadata": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiPersonFieldMetadata" -}, -"rel": { -"description": "Currently in Oz: \"Links\": Links with no rel. \"Other profiles\": Links with rel=ME. \"Contributor to\": Links with rel=CONTRIBUTOR_TO or PAST_CONTRIBUTOR_TO.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiWebsiteRelationshipInfo" -}, -"type": "array" -}, -"type": { -"description": "The type of the website. The type can be free form or one of these predefined values: * `home` * `work` * `blog` * `profile` * `homePage` * `ftp` * `reservations` * `appInstallPage`: website for a Currents application. * `other`", -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiWebsiteRelationshipInfo": { -"id": "AppsPeopleOzExternalMergedpeopleapiWebsiteRelationshipInfo", -"properties": { -"type": { -"enum": [ -"UNKNOWN", -"ME", -"NOT_ME", -"CONTRIBUTOR_TO", -"PAST_CONTRIBUTOR_TO" -], -"enumDescriptions": [ -"", -"rel=\"me\"", -"rel=\"\"", -"rel=\"contributor-to\"", -"rel=\"past-contributor-to\"" -], -"type": "string" -} -}, -"type": "object" -}, -"AppsPeopleOzExternalMergedpeopleapiYoutubeExtendedData": { -"description": "Extension data for use in Youtube Product Profile.", -"id": "AppsPeopleOzExternalMergedpeopleapiYoutubeExtendedData", -"properties": { -"channelData": { -"description": "Information about a channel created by the user. A user can create multiple Youtube channels.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiChannelData" -}, -"type": "array" -}, -"failure": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiProductProfileFailure", -"description": "Failure type if there is an error when fetching product profile data." -} -}, -"type": "object" -}, -"AssistantApiActionV2SupportedFeatures": { -"description": "The features supported by the ActionV2 protocol. Note that after we move on to ConversationProto for all surfaces we can remove this message.", -"id": "AssistantApiActionV2SupportedFeatures", -"properties": { -"expressUrlInSettingsResponseSupported": { -"description": "This flag is used to work around a bug in AGSA 6.8 that got away. The bug prevents users from accessing their shopping list if the URL of the shopping list is not a keep.google.com URL. This will happen when switch the backend that stores the shopping list from Keep to a backend maintained by the Google Shopping Express team.", -"type": "boolean" -}, -"reconnectClientInputSupported": { -"description": "Whether client supports reconnect client input in action v2 payload. This capability is needed to determine if client supports parsing client input payload from actionv2 proto for workflow purposes. See go/personal-workflow. OWNER:nyzstar,vvvemuri.", -"type": "boolean" -}, -"simpleActionV2PuntSupported": { -"description": "Whether or not the surface supports a simple UnsupportedAction instead of a ModalState punt card for rendering. For ActionV2 punt cards, the ModalState extension on the ResourceSet is the canonical way of building punt cards. However, while most all devices support the ActionV2 protocol, not all devices handle the ModalState rendering as part of the ActionV2. For these devices, we want to build a modified ActionV2 for punt cards which omits this ModalState. At present, this is only Android Wear and should not be used for other devices if they support ModalState or Conversation protocol.", -"type": "boolean" -}, -"supportedActionType": { -"description": "A list of all the action types supported by the client. These should be the string representation of majel.ActionTypes within \"quality/majel/api/proto/action_v2.proto\".", -"items": { -"type": "string" -}, -"type": "array" -}, -"takeScreenshotSupported": { -"description": "Checks if screenshots can be taken on the client. This field is set on the client from AGSA 7.2 onwards.", -"type": "boolean" -}, -"voiceDelightImmersiveUiSupported": { -"description": "If IMMERSIVE_ACTIONS UiType is supported by the client.", -"type": "boolean" -}, -"voiceDelightStickersSupported": { -"description": "If Voice Delight Stickers are supported by the client. In order to support Voice Delight stickers, the client should know how to extract sticker_url from VoiceDelightSystemInteractionSegment.", -"type": "boolean" -}, -"voiceDelightSuggestionsSupported": { -"description": "If Voice Delight Suggestion Chips are supported by the client. In order to support Voice Delight Suggestion Chips, the client should know how to extract suggestions form VoiceDelightSystemInteraction.ResourceSet.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiAndroidIntentCapabilities": { -"description": "Capabilities related to Android intent support. These capabilities can apply to any device on the Android platform. Provides the list of Android package names that support a given Android intent.", -"id": "AssistantApiAndroidIntentCapabilities", -"properties": { -"androidIntentCapability": { -"items": { -"$ref": "AssistantApiAndroidIntentCapabilitiesAndroidIntentCapability" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiAndroidIntentCapabilitiesAndroidIntentCapability": { -"id": "AssistantApiAndroidIntentCapabilitiesAndroidIntentCapability", -"properties": { -"intentActionName": { -"description": "The Action name of the Android Intent in standard notation (https://developer.android.com/reference/android/content/Intent#getAction()).", -"type": "string" -}, -"packageNames": { -"description": "The Android provider packages that support the intent, e.g. \"com.google.android.deskclock\".", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiAppCapabilities": { -"description": "Used to describe app capabilities of the device installed apps reported to the server.", -"id": "AssistantApiAppCapabilities", -"properties": { -"allowlistedForMediaFulfillment": { -"description": "Indicates whether the provider is compatible for media fulfillment on this surface. For example, Amazon Music isn't compatible with the driving mode.", -"type": "boolean" -}, -"appIntegrationsSettings": { -"$ref": "AssistantApiAppIntegrationsSettings", -"description": "Currently unused. Will be used in the future when integrating with incremental app capabilities." -}, -"disabledSystemApp": { -"description": "This system app is disabled in settings.", -"type": "boolean" -}, -"provider": { -"$ref": "AssistantApiCoreTypesProvider", -"description": "The installed app of the provider." -}, -"routableToProviderCloud": { -"description": "This provider has integrated its cloud backend with Google, and Google can route the user queries to the provider's cloud.", -"type": "boolean" -}, -"searchableOnDevice": { -"description": "This provider has an app that supports on-device search through the provider's own inventory.", -"type": "boolean" -}, -"searchableOnServer": { -"description": "This provider has integrated its content with Google, and Google has enabled to serve its content as a server-side solution.", -"type": "boolean" -}, -"supports3pPodcastPlayback": { -"description": "Indicates whether the provider supports playback of 3P(externally hosted) podcasts.", -"type": "boolean" -}, -"supportsScreenlessInitiation": { -"description": "This provider has an app that supports starting new media playback when there is no screen (e.g. by integrating with the Bisto SDK).", -"type": "boolean" -}, -"whitelistedForAnnotation": { -"description": "This provider is an app which should be used for query annotations. This is useful for apps which may not be already indexed by Google or are client specific.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiAppCapabilitiesDelta": { -"description": "Used to describe the incremental change of app capabilities of the device installed apps reported to the server.", -"id": "AssistantApiAppCapabilitiesDelta", -"properties": { -"appIntegrationsSettings": { -"$ref": "AssistantApiAppIntegrationsSettings", -"description": "Currently unused. Will be used in the future when integrating with incremental app capabilities." -}, -"providerDelta": { -"$ref": "AssistantApiCoreTypesProviderDelta", -"description": "The installed app of the provider." -} -}, -"type": "object" -}, -"AssistantApiAppControlSupport": { -"id": "AssistantApiAppControlSupport", -"properties": { -"enabled": { -"enum": [ -"DEFAULT_DISABLED", -"ENABLED_WITH_SMART_DICTATION" -], -"enumDescriptions": [ -"Disable all app control capabilities.", -"Enable app control SD-dependent capabilities." -], -"type": "string" -}, -"sendMessageSuppressed": { -"description": "If true, disable send message AppControl/SD flow. This is needed to suppress the feature for specific device. If we decide to turn down this feature for all devices, then this field will become obsolete and should be removed. See context in b/275727627.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiAppIntegrationsSettings": { -"description": "Contains the app privacy bits used for App Integrations implicit request. (go/app-privacy-settings-for-implicit-requests)", -"id": "AssistantApiAppIntegrationsSettings", -"properties": { -"handleRequestsWithPredictedApps": { -"description": "Whether to enable Assistant to handle request with predicted apps.", -"enum": [ -"UNSET", -"FALSE", -"TRUE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiAssistantContinuedPresenceSupport": { -"id": "AssistantApiAssistantContinuedPresenceSupport", -"properties": { -"plateSupport": { -"description": "Indicates in what cases assistant continued presence can be shown as a plate. This field is white-listed as being PII-free. Please do not add PII here.", -"enum": [ -"DEFAULT_NO_PLATE", -"SEARCH_ONLY" -], -"enumDescriptions": [ -"Assistant continued presence cannot be shown.", -"Assistant continued presence can be shown only in SearchNowActivity. See go/lockhart-acp." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiAudioInput": { -"description": "These capabilities represent the audio input features associated with the device. This includes what kind of audio input the device can handle, and what the privacy circumstances of that input are.", -"id": "AssistantApiAudioInput", -"properties": { -"environment": { -"enum": [ -"SURROUNDING_USERS", -"AUTHENTICATED_USER_ONLY" -], -"enumDescriptions": [ -"Audio may be produced by or visible to any users in the vicinity of the authenticated user -- e.g., a speaker. NOTE that this is the default value since it's better to assume that the user is in public when they're not than vice versa.", -"Audio can be assumed to come from or go to the authenticated user -- e.g., the mic or headphone on a phone or headset." -], -"type": "string" -}, -"quality": { -"enum": [ -"VOICE_QUALITY", -"MUSIC_QUALITY" -], -"enumDescriptions": [ -"An audio device capable of handling audio at fidelity high enough for voice use (TTS, speech recognition, telephone) but insufficient for media playback.", -"An audio device capable of handling high-fidelity audio, suitable for use as a media device." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiAudioOutput": { -"description": "These capabilities represent the audio output features associated with the device. This includes what kind of audio output the device can handle, and what the privacy circumstances of that output are.", -"id": "AssistantApiAudioOutput", -"properties": { -"alwaysOnSpeaker": { -"enum": [ -"UNKNOWN", -"NOT_SUPPORTED", -"SUPPORTED" -], -"enumDescriptions": [ -"The setting for always on speaker is unknown.", -"Always on speaker is not supported.", -"Always on speaker is supported." -], -"type": "string" -}, -"environment": { -"enum": [ -"SURROUNDING_USERS", -"AUTHENTICATED_USER_ONLY" -], -"enumDescriptions": [ -"Audio may be produced by or visible to any users in the vicinity of the authenticated user -- e.g., a speaker. NOTE that this is the default value since it's better to assume that the user is in public when they're not than vice versa.", -"Audio can be assumed to come from or go to the authenticated user -- e.g., the mic or headphone on a phone or headset." -], -"type": "string" -}, -"mediaTtsMixable": { -"enum": [ -"MEDIA_TTS_MIXABLE_UNKNOWN", -"MEDIA_TTS_MIXABLE_NOT_SUPPORTED", -"MEDIA_TTS_MIXABLE_SUPPORTED" -], -"enumDescriptions": [ -"Unknown whether device supports mixing TTS and device media.", -"Device cannot mix TTS and device media.", -"Device can mix TTS and device media." -], -"type": "string" -}, -"quality": { -"enum": [ -"VOICE_QUALITY", -"MUSIC_QUALITY" -], -"enumDescriptions": [ -"An audio device capable of handling audio at fidelity high enough for voice use (TTS, speech recognition, telephone) but insufficient for media playback.", -"An audio device capable of handling high-fidelity audio, suitable for use as a media device." -], -"type": "string" -}, -"volumeProperties": { -"$ref": "AssistantApiVolumeProperties" -} -}, -"type": "object" -}, -"AssistantApiBluetoothCapabilities": { -"description": "Bluetooth capabilities related to usage of a feature.", -"id": "AssistantApiBluetoothCapabilities", -"properties": { -"isBluetoothConnectedProfileRequired": { -"description": "If this surface needs to bluetooth pair a phone before using a feature.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCallCapabilities": { -"description": "CallCapabilities supported by a surface. See go/call-capabilities. Next ID: 7", -"id": "AssistantApiCallCapabilities", -"properties": { -"callFormats": { -"description": "The supported call formats on the surface.", -"items": { -"enum": [ -"UNSPECIFIED_FORMAT", -"AUDIO", -"VIDEO", -"TEXT" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"callMediums": { -"description": "The supported call mediums on the surface.", -"items": { -"enum": [ -"UNSPECIFIED_MEDIUM", -"PSTN", -"VOIP", -"EMAIL", -"ONLINE_CHAT", -"TEXT_MESSAGING", -"MESSAGE" -], -"enumDescriptions": [ -"", -"Public Switched Telephone Network.", -"Voice Over IP.", -"", -"", -"Super set of SMS and MMS", -"Super set of EMAIL, ONLINE_CHAT, TEXT_MESSAGING" -], -"type": "string" -}, -"type": "array" -}, -"callOptions": { -"description": "The call options this surface can provide. For example, SPEAKERPHONE is available on Android OPA while iOPA doesn't support it yet.", -"items": { -"enum": [ -"UNSPECIFIED_CALL_OPTION", -"SPEAKERPHONE", -"BLUETOOTH", -"HEADSET" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"fallbackToTetheredDeviceAppCapabilities": { -"description": "If true, APP_ID queries initiated by this device should fall back to execution on the tethered device if it's available and if the primary device cannot perform the action (e.g. due to the app not being installed).", -"type": "boolean" -}, -"supportedRecipientTypes": { -"description": "Should only be checked if nonempty.", -"items": { -"enum": [ -"UNSPECIFIED_ENDPOINT", -"PHONE_NUMBER", -"EMAIL_ADDRESS", -"APP_UNIQUE_ID", -"EMERGENCY_PHONE_NUMBER", -"VOICEMAIL" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"supportsDuoEmailEndpoint": { -"deprecated": true, -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCameraCapabilities": { -"description": "These capabilities represent the camera features associated with the device.", -"id": "AssistantApiCameraCapabilities", -"properties": { -"faceMatchCapable": { -"description": "Whether the device supports Face Match.", -"type": "boolean" -}, -"hasCamera": { -"description": "Whether the device has a camera.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCameraReceiverCapabilities": { -"description": "These capabilities present the capability of the device running camera receiver apps.", -"id": "AssistantApiCameraReceiverCapabilities", -"properties": { -"hasLimitedCameraStreamCapability": { -"description": "Whether the device has limited camera stream capability. If true, check supported_camera_receivers for detailed supported cameras.", -"type": "boolean" -}, -"supportedCameraReceivers": { -"description": "The camera receiver cast apps the device supports. Only used if has_limited_camera_stream_capability is true.", -"items": { -"$ref": "AssistantApiCoreTypesCastAppInfo" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiCarAssistantCapabilities": { -"description": "Capabilities that are associated with Assistants on auto surfaces. This is different from other capabilities such as CarSettingsCapabilities, CloudCarCapabilities since they are specific to settings and 3P cloud information. All the auto/car Assistant specific capabilities should live here.", -"id": "AssistantApiCarAssistantCapabilities", -"properties": { -"shouldPuntMultiAssistantMode": { -"description": "Indicates whether the current Assistant should provide a multi Assistant specific punt when there are multiple Auto specific Google Assistants (Android Auto Projected (AAP) and Android Auto Embedded (AAE)) in the same GAS enabled car. This will be used by both AAP and AAE. Design doc: go/doubledash++", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCarSettingsCapabilities": { -"description": "Capabilities that are associated with Assistant Settings on auto surfaces.", -"id": "AssistantApiCarSettingsCapabilities", -"properties": { -"playWarmerWelcome": { -"description": "If true, it indicates that the auto surface client should receive a warmer welcome TTS for signed-out users. For signed-in user, we will rely on server side metadata. go/aaae:preview-lang", -"type": "boolean" -}, -"supportsAddingCars": { -"description": "If true, it indicates that the client can be used to add cars after account linking with the OEM.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCastAssistantSettingLinkingResult": { -"id": "AssistantApiCastAssistantSettingLinkingResult", -"properties": { -"castLinkingStatus": { -"description": "Cast linking status for ATV surfaces. This is derived from error messages returned from Cast Orchestration Server and will be used for data profiling only(go/katniss-settings-dashboard).", -"enum": [ -"NOT_SET", -"SUCCEED", -"DEVICE_CONFLICT", -"DEVICE_NAME_EMPTY", -"CLIENT_ID_MISSING_TAG", -"INVALID_DEVICE_ID", -"DATA_SYNC_THROTTLED", -"CREATE_ROBOT_ACCOUNT_FAILED", -"UNAUTHORIZED_CLIENT", -"OTHER_ERROR" -], -"enumDescriptions": [ -"LINT.IfChange", -"", -"", -"", -"", -"", -"", -"", -"", -"LINT.ThenChange(//depot/google3/logs/proto/assistant/capabilities_log.proto)" -], -"type": "string" -}, -"truncatedErrorMsg": { -"description": "The error msg returned from COS, truncated in case it's too large.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCastCapabilities": { -"description": "These capabilities represent capabilities that have to do with casting that pertain to this device. Next ID: 9", -"id": "AssistantApiCastCapabilities", -"properties": { -"cameraReceiverCapabilities": { -"$ref": "AssistantApiCameraReceiverCapabilities", -"description": "Whether the device has limited camera stream capability and if yes, which receivers are supported." -}, -"cameraStreamSupportedProtocols": { -"description": "The supported protocols for camera streaming. The value is used as string in go/smarthome-internal-api#camera-stream, so using a string for this field instead of an enum. Supported protocols: (align the definition in go/smarthome-camerastream-trait) - \"hls\": HTTP Live Streaming - \"dash\": Dynamic Adaptive Streaming over HTTP - \"smooth_stream\": Smooth Streaming - \"progressive_mp4\": Progressive MP4 (will likely only be used for Clips) - \"webrtc\": WebRTC (currently, only H.264 is supported) - \"nexustalk\": Internal-only protocol used for Nest", -"items": { -"type": "string" -}, -"type": "array" -}, -"canReceiveCast": { -"description": "True if we can cast things to this device.", -"type": "boolean" -}, -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "Optional for primarily cast devices (e.g., Chirp, Chromecast). For devices that are NOT primarily cast devices, but having a cast receiver as secondary functionality, this field SHOULD store the cast-device-id to be used to send remote casting commands to the device. Example: Android TV, which supports both Android-native actions as well as remote casting using its built-in cast receiver. Android TV device id contains a DUSI id, which is not a cast-device-id. When executing a cast command on the Android TV, this field is used to route the cast command (through CloudCastService) to the cast receiver on the device." -}, -"dynamicGroupsSupported": { -"description": "Whether this device supports dynamic groups or not. It implies if a Stream Control operation (transfer, expansion, and contraction) could be applied on this device since Stream Control is implemented as part of dynamic groups (ie, adding/removing devices from playback)", -"type": "boolean" -}, -"groupType": { -"enum": [ -"NONE", -"STATIC_GROUP", -"DYNAMIC_GROUP", -"STEREO_PAIR" -], -"enumDescriptions": [ -"", -"Static group is created by the user through UI. It contains two or more devices.", -"Dynamic group is a temporary group created when devices are added to / removed from a playback. It should be torn down after the playback is done.", -"Stereo pair is created by the user through UI. It contains exact two devices, a left device and a right device." -], -"type": "string" -}, -"overlayApplicationsSupported": { -"description": "Whether UI overlay applications are supported on this device. It's used by Chromecast only.", -"type": "boolean" -}, -"yetiGamingSupported": { -"description": "Whether the device supports playing games through Yeti. This is set by the cast device when the device is updated: Chromecast updates -> Chromecast registers its capabilities with CCS -> CCS passes the capabilities to the AssistantSettingsService -> AssistantSettingsService stores the device's capabilities. go/yeti-gaming-supported-cast-capability", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiClientOpPropertiesDeviceModifySettingClientOpProperty": { -"description": "The properties of the client op device.MODIFY_SETTING. This proto is stored in the SupportedClientOp proto.", -"id": "AssistantApiClientOpPropertiesDeviceModifySettingClientOpProperty", -"properties": { -"skipAndroidAndGsaVersionCheck": { -"description": "Additional specific setting capabilities. This boolean is used to indicate whether we want to skip the Android and GSA version check in CheckSettingSchemaAndMaybeGetUris() from assistant/vertical/device/fulfillment/utils/setting_utils.h. Consider setting this field to true if your device is neither Android or GSA (especially when the UserAgent string of your device's TaskRequest will not contain a valid/up-to-date Android/GSA version).", -"type": "boolean" -}, -"supportedSettings": { -"description": "Uses DeviceSetting enum which corresponds to setting_id. This indicates which specific settings are supported by client. An empty list implies all settings are supported.", -"items": { -"enum": [ -"UNSPECIFIED", -"ABOUT_ME", -"ACCESSIBILITY", -"ACTIVE_EDGE", -"ACTIVE_EDGE_SENSITIVITY", -"ADAPTIVE_BATTERY", -"ADAPTIVE_BRIGHTNESS", -"ADAPTIVE_CHARGING", -"ADAPTIVE_CONNECTIVITY", -"ADAPTIVE_SOUND", -"ADD_ACCOUNT", -"ADD_BLUETOOTH_DEVICE", -"ADD_DEVICE", -"ADD_FINGERPRINT", -"ADS_TRACKING", -"AIRPLANE_MODE", -"ALARM_VOLUME", -"ALARM_SOUND", -"ALLOW_MULTIPLE_USERS", -"AMBIENT_DISPLAY_ALWAYS_ON", -"AMBIENT_DISPLAY_NEW_NOTIFICATION", -"ANDROID_AUTO", -"ANDROID_VERSION", -"APP_BATTERY_USAGE", -"APP_DATA_USAGE", -"APP_DETAILS", -"APP_SHORTCUT", -"APPS_NOTIFICATIONS", -"APPS_STORAGE", -"ASSISTANT_ACCOUNT", -"ASSISTANT_FACE_MATCH", -"ASSISTANT_LANGUAGE", -"ASSISTANT_VOICE_MATCH", -"AUTO_ROTATE", -"AUTO_ROTATE_FACE_DETECTION", -"BACKUP", -"BATTERY_HEALTH", -"BATTERY_LEVEL", -"BATTERY_LOW", -"BATTERY_PERCENTAGE", -"BATTERY_PRESENT", -"BATTERY_SAVER", -"BATTERY_SAVER_SCHEDULE", -"BATTERY_SHARE", -"BATTERY_USAGE", -"BIOMETRIC", -"BLUETOOTH", -"BLUETOOTH_NAME", -"BLUETOOTH_ADDRESS", -"BLUETOOTH_SETTINGS", -"BRIGHTNESS_LEVEL", -"BUBBLES", -"CALL_VOLUME", -"CAMERA_DOUBLE_TWIST", -"CAST", -"CAR_CRASH_DETECTION", -"COLOR_INVERSION", -"COLOR_CORRECTION", -"CONVERSATIONS", -"CHARGING_SOUNDS_AND_VIBRATION", -"CHARGING_STATE", -"CONNECTED_DEVICES", -"CONTACTLESS_PAYMENTS", -"DATA_ROAMING", -"DATA_SAVER", -"DATA_USAGE", -"DATA_LIMIT", -"DATA_LIMIT_LEVEL", -"DATA_WARNING", -"DATA_WARNING_LEVEL", -"DEFAULT_ALARM_SOUND", -"DEFAULT_NOTIFICATION_SOUND", -"DEFAULT_APPS", -"DEVELOPER_OPTIONS", -"DEVICE_ASSISTANT_APP", -"DEVICE_NAME", -"DISPLAY_OVER_OTHER_APPS", -"DISPLAY_SIZE", -"DO_NOT_DISTURB", -"DO_NOT_DISTURB_MESSAGES", -"DO_NOT_DISTURB_CALLS", -"DO_NOT_DISTURB_ALARMS", -"DO_NOT_DISTURB_SCHEDULES", -"DOUBLE_TAP_CHECK_PHONE", -"DRIVING_MODE", -"EARTHQUAKE_ALERTS", -"EMERGENCY", -"EMERGENCY_ALERTS", -"EMERGENCY_CONTACTS", -"EMERGENCY_INFORMATION", -"ETHERNET_TETHERING", -"EXTRA_DIM", -"EXTREME_BATTERY_SAVER", -"FACTORY_RESET", -"FIND_MY_DEVICE", -"FLASHLIGHT", -"FOCUS_MODE", -"FONT_SIZE", -"FREE_UP_SPACE", -"FINGERPRINT_MANAGER", -"GESTURES", -"HAPTIC_FEEDBACK_VIBRATION", -"HARD_KEYBOARD", -"HEADS_UP", -"HIGH_REFRESH_RATE", -"HOT_SPOT", -"HOTSPOT_TETHERING", -"HOT_WORD", -"HUB_MODE", -"IP_ADDRESS", -"IMPROVE_LOCATION_ACCURACY", -"JUMP_TO_CAMERA", -"KEYBOARD_SHORTCUTS", -"LIFT_CHECK_PHONE", -"LIVE_TRANSLATE", -"LOCATION", -"LOCATION_HISTORY", -"LOCATION_BLUETOOTH_SCANNING", -"LOCATION_WIFI_SCANNING", -"LOCK_SCREEN", -"LOCK_SCREEN_DEVICE_CONTROLS", -"LOCK_SCREEN_WALLET", -"MAC_ADDRESS", -"MAGNIFICATION", -"MAGNIFY_BUTTON", -"MAGNIFY_TRIPLE_TAP", -"MANIFY_BUTTON", -"MANIFY_TRIPLE_TAP", -"MEDIA", -"MEDIA_VOLUME", -"MICROPHONE_ACCESS", -"MOBILE", -"MOBILE_DATA", -"MUSIC", -"MUTE_MODE", -"NETWORK", -"NETWORK_RESET", -"NFC", -"NIGHT_LIGHT_INTENSITY", -"NIGHT_LIGHT_SWITCH", -"NIGHT_MODE", -"NOTIFICATION_BADGE", -"NOTIFICATION_SOUND", -"NOTIFICATION_ON_SCREEN", -"NOTIFICATION_HISTORY", -"NOTIFY_FOR_PUBLIC_NETWORKS", -"ONEHANDED_MODE", -"OS_VERSION", -"PASSWORD", -"PERMISSION_MANAGER", -"PERMISSION_USAGE", -"PERSONALIZATION", -"PRINTING", -"PHONE_NUMBER", -"PICTURE_IN_PICTURE", -"POINTER_SPEED", -"POWER_MENU", -"REMINDERS", -"REQUIRE_DEVICE_UNLOCK_FOR_NFC", -"RINGTONE", -"RING_VOLUME", -"NEARBY_DEVICES_SCANNING", -"NEARBY_SHARE", -"SCREEN_LOCKING_SOUND", -"SCREEN_MAGNIFICATION", -"SCREEN_TIMEOUT", -"SCREEN_LOCK", -"SCREEN_SAVER", -"SELECT_TO_SPEAK", -"SET_TIME_AUTOMATICALLY", -"SET_TIME_ZONE_AUTOMATICALLY", -"SETTINGS", -"SIM", -"SIM_MANAGER", -"SPEECH_RATE", -"STORAGE_USAGE", -"SWIPE_FOR_NOTIFICATION", -"SWITCH_ACCESS", -"SYSTEM_UPDATE", -"SYSTEM_UPDATES", -"SYSTEM_NAVIGATION", -"SYSTEM_NAVIGATION_GESTURES", -"SYSTEM_NAVIGATION_BUTTONS", -"TALKBACK_PASSWORDS", -"TEXT_TO_SPEECH", -"TIME_ZONE", -"UNUSED_APPS", -"USB", -"USB_TETHERING", -"VERBOSE_TTS", -"VIBRATE", -"VIBRATION", -"VIBRATION_MODE", -"VOICE", -"VOLUME_LEVEL", -"WAKE_SCREEN_FOR_NOTIFICATIONS", -"WALLPAPERS", -"WEBVIEW", -"WIFI", -"WIFI_ADD_NETWORK", -"WIFI_ADD_NETWORK_QR_CODE", -"WIFI_CALLING", -"WIFI_HOTSPOT", -"ACCESSIBILITY_SELECT_TO_SPEAK", -"CRISIS_ALERTS", -"REGULATORY_LABELS", -"SEND_FEEDBACK", -"OS_BUILD_NUMBER", -"ACCESSIBILITY_SHORTCUTS", -"ACCESSIBILITY_MENU", -"ACCESSIBILITY_SOUND_AMPLIFIER", -"ACCESSIBILITY_LIVE_TRANSCRIBE", -"ACCESSIBILITY_SOUND_NOTIFICATIONS", -"ACCESSIBILITY_TALKBACK", -"ACCESSIBILITY_TIMEOUT", -"CAMERA_SETTINGS", -"BATTERY_SAVER_TOGGLE", -"COVID_NOTIFICATIONS", -"APP_LOCATION", -"LOCATION_SERVICES", -"PRIVATE_DNS", -"UNRESTRICTED_DATA", -"PREFERRED_NETWORK", -"NETWORK_PREFERENCES", -"PASSWORDS_AND_ACCOUNTS", -"PRIVACY_DASHBOARD", -"MORE_SECURITY", -"APP_PINNING", -"CONFIRM_SIM_DELETION", -"ENCRYPT_PHONE", -"FACE_UNLOCK", -"INSTALL_UNKNOWN_APPS", -"NOW_PLAYING", -"STAY_AWAKE", -"ASSISTANT_VOICE", -"RESET_BLUETOOTH_WIFI", -"DEFAULT_PHONE_APP", -"GOOGLE_ACCOUNT", -"ACCESSIBILITY_CAPTION_SIZE_AND_STYLE", -"ACCESSIBILITY_CAPTION_STYLE", -"ACCESSIBILITY_CAPTION_TEXT", -"ACCESSIBILITY_CAPTION_PREFERENCES", -"ACCESSIBILITY_COLOR_AND_MOTION", -"ACCESSIBILITY_BOLD_TEXT", -"LIVE_CAPTION", -"POWER_BUTTON_ENDS_CALL", -"TOUCH_AND_HOLD_DELAY", -"ACCESSIBILITY_VOICE_ACCESS", -"SMS_PERMISSIONS", -"SPECIAL_APP_ACCESS", -"DARK_THEME_SCHEDULE", -"LOCK_SCREEN_TEXT", -"NIGHT_LIGHT_SCHEDULE", -"AUTOFILL", -"USAGE_AND_DIAGNOSTICS", -"SENSITIVE_NOTIFICATIONS", -"ENCRYPTION_AND_CREDENTIALS", -"SPACIAL_AUDIO", -"RESET_OPTIONS", -"QUICK_TAP", -"TIPS_AND_SUPPORT", -"SCREEN_ATTENTION", -"BLUETOOTH_TETHERING", -"ALL_APPS", -"EXTEND_UNLOCK", -"CLEAR_CALLING", -"GOOGLE_SETTINGS", -"APP_LANGUAGES", -"SIM_STATUS", -"MICROPHONE_PERMISSIONS", -"GOOGLE_PLAY_UPDATE", -"ADD_GOOGLE_ACCOUNT", -"ASSISTANT_SPOKEN_NOTIFICATIONS", -"ABOUT_PHONE", -"ACCOUNTS", -"APPLICATION", -"ASSISTANT", -"AUDIO", -"BATTERY", -"BELL_SCHEDULE", -"CONTINUED_CONVERSATION", -"DATE_TIME", -"DARK_THEME", -"DEVICE_INFO", -"DICTIONARY", -"DIGITAL_WELLBEING", -"DISPLAY", -"LANGUAGE", -"NIGHT_LIGHT", -"NOTIFICATION", -"NOTIFICATION_VOLUME", -"PHONE_RINGTONE", -"PRIVACY", -"ROAMING", -"ROUTINES", -"SEARCH", -"SECURITY", -"SOUND", -"SPELL_CHECKER", -"SYSTEM", -"STORAGE", -"VPN", -"AUTOCLICK", -"CARET_HIGHLIGHT", -"CHROMEVOX", -"CURSOR_HIGHLIGHT", -"DOCKED_MAGNIFIER", -"FOCUS_HIGHLIGHT", -"FULLSCREEN_MAGNIFIER", -"HIGH_CONTRAST_MODE", -"LARGE_CURSOR", -"MONO_AUDIO", -"STICKY_KEYS", -"TAP_DRAGGING", -"VIRTUAL_KEYBOARD", -"WEARABLE_AMBIENT", -"WEARABLE_NOISE_CANCELLATION", -"WEARABLE_TOUCH_CONTROLS", -"RAISE_TO_TALK", -"BEDTIME_MODE", -"THEATER_MODE", -"TOUCH_LOCK", -"PRESS_AND_HOLD", -"WATCH_FACE", -"NOTIFICATION_ANNOUNCEMENT" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Pair a bluetooth device.", -"Link a device to Assistant/Google Home.", -"", -"", -"", -"", -"Deprecated, use DEFAULT_ALARM_SOUND instead.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Denotes if the battery health is normal / reduced / unknown.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Bluetooth on/off toggle", -"", -"", -"Bluetooth preferences page", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Cellular carrier (non-wifi) data usage statistics & settings for the device.", -"Toggle to enable data use limit", -"The mobile data use after which mobile data is automatically disabled.", -"Toggle to enable data use warning", -"The mobile data use after which a warning is shown to the user.", -"", -"", -"", -"", -"Device's voice assistant app selection.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Special mode for using device hands-free while driving.", -"", -"Main emergency settings", -"", -"", -"", -"", -"Makes screen extra dark.", -"", -"", -"", -"", -"", -"", -"", -"", -"A page for device gestures", -"Slider that controls touch feedback vibration", -"", -"", -"", -"Toggle for internet sharing via WiFi.", -"Common settings page for internet sharing via various means.", -"", -"See go/hubmodesettings-PRD.", -"Show my IP address", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Settings page for what is visible on the lock screen.", -"", -"", -"Show my MAC address", -"", -"", -"", -"", -"", -"", -"", -"Microphone Access is a toggleable setting that controls if Microphone usage is allowed at the system level or not. It is not the per-App Microphone permission page. https://screenshot.googleplex.com/4hrskftPSur7hHh", -"Wireless internet settings, including WiFi, Cellular, etc.", -"Setting to enable/disable cellular data connection being used on the device. https://screenshot.googleplex.com/jMSRtW3Aq4o", -"", -"", -"", -"Restarts the device network stack, no settings are deleted.", -"", -"", -"", -"", -"", -"Deprecated, use DEFAULT_NOTIFICATION_SOUND instead.", -"", -"", -"", -"", -"", -"Password manager", -"Shows permissions and allows add/remove allowed apps.", -"Shows usage of each permission by app.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Settings page for screen locking security methods.", -"", -"", -"", -"", -"", -"Currently active/primary SIM.", -"", -"", -"", -"", -"", -"Deprecated, use SYSTEM_UPDATES instead.", -"", -"Settings page for configuring how navigation between apps is done. Specifically android has choice between Buttons of Gestures mode.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Deprecated, use VIBRATION instead.", -"Vibration master toggle - controls all vibration functions. Corresponds to \"Vibration & haptics\" toggle in the Android settings app.", -"Controls whether the ringer mode will be silent or will vibrate. Details in https://developer.android.com/reference/android/media/AudioManager#RINGER_MODE_VIBRATE Different from VIBRATION from above.", -"deprecated, use ASSISTANT_VOICE", -"", -"", -"", -"", -"", -"", -"", -"", -"Duplicate, use HOT_SPOT instead", -"Accessibility > Select to Speak", -"Safety > Crisis alerts", -"Settings > About phone > Regulatory labels", -"Settings > About phone > Send feedback about this device", -"", -"", -"", -"Settings > Accessibility > Hearing > Sound Amplifier", -"", -"", -"Settings > Accessibility > TalkBack", -"Settings > Accessibility > Time to take action (Accessibility timeout)", -"", -"", -"Settings > Google > COVID-19 Exposure Notifications", -"", -"", -"", -"Settings > Network & Internet > Data Saver > Unrestricted data", -"", -"", -"", -"", -"", -"", -"", -"Settings > Security > Encrypt phone", -"", -"", -"", -"", -"", -"", -"", -"", -"Settings > Accessibility > Caption preferences > Caption size and style", -"Settings > Accessibility > Caption preferences > Caption size and style > Caption Style", -"Settings > Accessibility > Caption preferences > Caption size and style > Text size", -"Settings > Accessibility > Caption preferences", -"Settings > Accessibility > Color and motion", -"Settings > Accessibility > Display size and text > Bold text", -"", -"Settings > Accessibility > Power button ends call", -"", -"", -"", -"Settings > Apps & notifications > Advanced > Special app access", -"", -"Settings > Display > Lock screen > Add text on lock screen", -"", -"", -"", -"", -"Settings > Security > Advanced settings > Encryption & credentials", -"Settings > Sound & vibration > Spatial Audio", -"Settings > System > Advanced > Reset options", -"Settings > System > Gestures > Quick tap", -"Settings > Tips & support", -"Display > Screen timeout > Screen attention", -"", -"", -"", -"", -"", -"", -"", -"Permission Manager > Microphone", -"", -"", -"Settings for assistant to announce messages/notifications", -"Begin of NGA proto consistence", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Duplicate, use NIGHT_MODE instead", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"End of NGA proto consistence", -"Chrome OS specific accessibility settings", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"End of Chrome OS specific accessibility settings", -"Wearable device settings", -"", -"", -"", -"", -"", -"", -"", -"Settings > Display > Change watch face End of Wearable device settings", -"Assistant Spoken Notification Setting" -], -"type": "string" -}, -"type": "array" -}, -"supportsDoNotDisturbWithDuration": { -"description": "Additional specific setting capabilities. This boolean is used to indicate if do not disturb with duration is supported through device.MODIFY_SETTING clientop on a client or not.", -"type": "boolean" -}, -"supportsMuteUnmute": { -"description": "Additional specific setting capabilities. This boolean is used to indicate if new unmute logic is enabled on a client or not.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiClientOpPropertiesProviderOpenClientOpProperty": { -"description": "The properties of the provider.OPEN ClientOp. This proto is stored in the SupportedClientOp proto with the key provider.OPEN.", -"id": "AssistantApiClientOpPropertiesProviderOpenClientOpProperty", -"properties": { -"keepsConversationAliveAfterOpeningApp": { -"description": "Whether conversation is kept alive after opening the app. See go/keep-opa-conversation-alive for details.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiClockCapabilities": { -"description": "Used to describe clock capabilities of the device (for example, capabilities related to maximum number of supported alarms and timers that can be created on the device). Fields may be populated by clients or be backfilled by SAL (in case of Timon, for example).", -"id": "AssistantApiClockCapabilities", -"properties": { -"maxSupportedAlarms": { -"description": "Maximum number of alarms that can be created on the client.", -"format": "int32", -"type": "integer" -}, -"maxSupportedExtendedTimerDuration": { -"$ref": "AssistantApiDuration", -"description": "Maximum extended timer duration supported by the client. The extended timer duration is the total start-to-finish duration after an AddTimeToTimer operation. E.g. if a user sets a timer for 30 minutes, and later adds 10 minutes, the extended duration is 40 minutes." -}, -"maxSupportedTimerDuration": { -"$ref": "AssistantApiDuration", -"description": "Maximum duration of timers that can be created on the client." -}, -"maxSupportedTimers": { -"description": "Maximum number of timers that can be created on the client.", -"format": "int32", -"type": "integer" -}, -"preferredStopwatchProvider": { -"$ref": "AssistantApiCoreTypesProvider", -"description": "The preferred provider to use for stopwatch related functionality." -}, -"restrictAlarmsToNext24h": { -"description": "Whether the client restricts alarms to ring within the next 24 hours.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCommunicationUiCapabilities": { -"description": "UI capabilities for the surfaces rendering Comms features. See go/rohan-comms-fluid-actions-customization.", -"id": "AssistantApiCommunicationUiCapabilities", -"properties": { -"fluidActionsUiType": { -"enum": [ -"DEFAULT", -"SIMPLIFIED" -], -"enumDescriptions": [ -"", -"Fluid actions output optimized for small devices, with no on-device management of conversation state available." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiContactLookupCapabilities": { -"id": "AssistantApiContactLookupCapabilities", -"properties": { -"fallbackToTetheredDevice": { -"description": "If true, contact.LOOKUP should be routed to the tethered device (if present) if the tethered device supports contact.LOOKUP and the primary device does not.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesAndroidAppInfo": { -"description": "The android app information of the provider. Like, Spotify. Next ID: 17", -"id": "AssistantApiCoreTypesAndroidAppInfo", -"properties": { -"accountType": { -"type": "string" -}, -"activityInfo": { -"$ref": "AssistantApiCoreTypesAndroidAppInfoActivityInfo" -}, -"androidIntent": { -"description": "Intent associated with the app. We include intents here as different versions of the same app may support different intents. In those cases, the package_name is not enough to identify the app and we should use the combination of package_name and android_intent. This field might contain sensitive data, if represents ClientOp with encapsulated PII such as user query.", -"type": "string" -}, -"appUniqueId": { -"description": "Store the app unique id endpoint. This will be passed over to app to fulfill the action.", -"type": "string" -}, -"appVersion": { -"deprecated": true, -"description": "The android app version. Deprecated because https://developer.android.com/reference/android/content/pm/PackageInfo.html#getLongVersionCode", -"format": "int32", -"type": "integer" -}, -"dataMimetype": { -"deprecated": true, -"description": "data_mimetype and account_type are the what AGSA uses to filter which contacts support this Android app in ContactProvider.", -"type": "string" -}, -"isBroadcastIntent": { -"description": "If true, client should broadcast the intent instead of open the intent.", -"type": "boolean" -}, -"isDefault": { -"description": "App is the default app for it's core functionality. For example, it will be true for Android messages if it is the default app to send and receive SMS on the phone.", -"type": "boolean" -}, -"localizedAppName": { -"description": "The localized app name.", -"type": "string" -}, -"longVersionCode": { -"description": "The long android app version.", -"format": "int64", -"type": "string" -}, -"mimetype": { -"description": "Store mimetype of this endpoint. We will use this as the differentiator for Assistant to know whether to use the RawContact for messaging, call or video call. For example, send message mimetype for whatsapp: \"vnd.android.cursor.item/vnd.com.whatsapp.profile\" voice call mimetype for whatsapp: \"vnd.android.cursor.item/vnd.com.whatsapp.voip.call\"", -"type": "string" -}, -"packageName": { -"description": "The android app package of the provider, like \"com.spotify.music\".", -"type": "string" -}, -"providerType": { -"description": "The OemProviderType is specific for OEM system Android apps. For example, in Auto Embedded, the OEM will have a system Radio/Media app. The system app\u2019s capabilities/core functionalities are captured here. For physical media sources, the OEM may decide to implement one media app (thus, one package name) that handles multiple physical media sources. For these cases, each physical media source will be sent as different providers even though the package name is the same.", -"enum": [ -"UNKNOWN_OEM_PROVIDER_TYPE", -"RADIO_PROVIDER_TYPE", -"SXM_RADIO_PROVIDER_TYPE" -], -"enumDescriptions": [ -"", -"Refers to an app that handles AM/FM Radio via a physical radio tuner in the device. See go/radio-assistant-for-auto.", -"Refers to an app that handles Satellite (SiriusXM) Radio via a physical Satellite tuner in the device. See go/sxm-on-gas." -], -"type": "string" -}, -"shortcutId": { -"description": "Id of the app's Android shortcut to be launched by Assistant. The client is expected to use the Android LauncherApps API to execute this shortcut which in turn will open the app. For example, Whatsapp may create an Android shortcut for a frequently messaged contact with an id \"contact_123\". This field will contain that id and the client can execute it to open up the chat with that particular contact. If this field is set, the package_name field must also be set since both will be used by the LauncherApps API for execution. If this field is set, the intent related fields will be ignored and not used as a fallback. Design: go/shortcut-id-in-provider-open-clientop This field should only be set for devices with Android API level >= 25 (since that is the version from which the LauncherApps startShortcut API is available)", -"type": "string" -}, -"targetClass": { -"description": "The fully qualified target class name of the provider, like \"com.example.myapp.GetOrderService\".", -"type": "string" -}, -"versionName": { -"description": "The android app version name, like \"4.1.091.05.40d\", \"11.2.7.21.alpha\". Android Docs: https://developer.android.com/reference/android/content/pm/PackageInfo#versionName", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesAndroidAppInfoActivityInfo": { -"description": "General information about activities in the app.", -"id": "AssistantApiCoreTypesAndroidAppInfoActivityInfo", -"properties": { -"activeLaunchableActivities": { -"description": "Activities that are currently active and tagged as ACTION_MAIN and CATEGORY_LAUNCHER. Includes the activity corresponding to android_intent if it falls under CATEGORY_LAUNCHER.", -"items": { -"$ref": "AssistantApiCoreTypesAndroidAppInfoActivityInfoActivity" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesAndroidAppInfoActivityInfoActivity": { -"description": "Information about an individual activity.", -"id": "AssistantApiCoreTypesAndroidAppInfoActivityInfoActivity", -"properties": { -"localizedActivityName": { -"description": "The localized user visible activity name.", -"type": "string" -}, -"shortClassName": { -"description": "Short class name for activity, following https://developer.android.com/reference/android/content/ComponentName#getShortClassName()", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesAndroidAppInfoDelta": { -"description": "The change of AndroidAppInfo, e.g. app installation or deletion for incremental delta app info upload.", -"id": "AssistantApiCoreTypesAndroidAppInfoDelta", -"properties": { -"androidAppInfo": { -"$ref": "AssistantApiCoreTypesAndroidAppInfo", -"description": "The android app information of the provider. Like, Spotify." -}, -"lastUpdateTimestamp": { -"description": "The client-side timestamp in millis when the app is last updated, installed or deleted.", -"format": "google-datetime", -"type": "string" -}, -"updateType": { -"description": "App is installed or deleted.", -"enum": [ -"UNKNOWN_TYPE", -"IS_INSTALLED", -"IS_DELETED" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCalendarEvent": { -"description": "This proto contains the information of a calendar event, including title, start time, end time, etc. LINT.IfChange(CalendarEvent) NEXT_ID: 26", -"id": "AssistantApiCoreTypesCalendarEvent", -"properties": { -"attendees": { -"description": "Attendees invited to the event, usually includes also the organizer.", -"items": { -"$ref": "AssistantApiCoreTypesCalendarEventAttendee" -}, -"type": "array" -}, -"backgroundColor": { -"description": "The background color of the event, in RGB format.", -"format": "int32", -"type": "integer" -}, -"calendarId": { -"description": "Optional calendar containing the event.", -"type": "string" -}, -"creator": { -"$ref": "AssistantApiCoreTypesCalendarEventAttendee", -"description": "The person who created this event." -}, -"description": { -"description": "Optional description of the event (plain text).", -"type": "string" -}, -"end": { -"$ref": "AssistantApiDateTime", -"description": "The end time of the event. Start and end time must either both be date or both be datetime. End is exclusive, ie. the first day / first second when the event is over." -}, -"eventId": { -"description": "Optional event id provided by assistant server. Needs to be unique, at least on a per-user and calendar level, ideally globally unique. If none is given, the server will assign an id.", -"type": "string" -}, -"flairName": { -"description": "The flair name, calculated according to the event title (go/as-cal-flair). With the flair name, background images can be got from gstatic (go/scs): https://ssl.gstatic.com/tmly/f8944938hffheth4ew890ht4i8/flairs/", -"type": "string" -}, -"foregroundColor": { -"description": "The foreground color of the event, in RGB format.", -"format": "int32", -"type": "integer" -}, -"guestsCanInviteOthers": { -"description": "Whether the guests can invite other guests.", -"type": "boolean" -}, -"guestsCanModify": { -"description": "Whether the guests can modify the event.", -"type": "boolean" -}, -"guestsCanSeeGuests": { -"description": "Whether the guests of the event can be seen. If false, the user is reported as the only attendee to the event, even though there may be more attendees.", -"type": "boolean" -}, -"habitId": { -"description": "Optional id of the Habit (Calendar Goal) this event is linked to", -"type": "string" -}, -"habitStatus": { -"description": "Optional status for this habit event instance.", -"enum": [ -"UNKNOWN_STATUS", -"ACTIVE", -"DEFERRAL_REQUESTED", -"COMPLETE", -"UNDEFERRABLE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"htmlLink": { -"description": "Absolute link to this event in the Calendar web UI.", -"type": "string" -}, -"location": { -"description": "Optional location of the event (plain text).", -"type": "string" -}, -"meetingContacts": { -"items": { -"$ref": "AssistantApiCoreTypesCalendarEventMeetingContact" -}, -"type": "array" -}, -"organizer": { -"$ref": "AssistantApiCoreTypesCalendarEventAttendee", -"description": "The organizer of this event." -}, -"otherAttendeesExcluded": { -"description": "Whether not all attendees are included in the attendee list. This is set when the attendees list has been truncated (e.g., when the number of attendees is beyond the maxAttendees limitation).", -"type": "boolean" -}, -"participationResponse": { -"description": "The user's response (the owner of this copy of the event) to this event.", -"enum": [ -"RESPONSE_STATUS_UNSPECIFIED", -"NEEDS_ACTION", -"DECLINED", -"TENTATIVE", -"ACCEPTED" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"recurringEventId": { -"description": "If this is an instance of a recurring event, recurring_event_id identifies the recurring series as a whole.", -"type": "string" -}, -"rooms": { -"description": "Meeting rooms associated to this event.", -"items": { -"$ref": "AssistantApiCoreTypesCalendarEventRoom" -}, -"type": "array" -}, -"start": { -"$ref": "AssistantApiDateTime", -"description": "The start time of the event. This event is an all-day event if start has no time_of_day." -}, -"summary": { -"description": "The title of the event.", -"type": "string" -}, -"visibility": { -"description": "Optional visibility of the event.", -"enum": [ -"DEFAULT", -"PUBLIC", -"PRIVATE", -"CONFIDENTIAL", -"SECRET", -"SHADOW", -"UNKNOWN" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"When we receive a value outside of this enum it will be replaced with this UNKNOWN field. When trying to send UNKNOWN it will be silently converted to DEFAULT." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCalendarEventAttendee": { -"description": "Next id: 8", -"id": "AssistantApiCoreTypesCalendarEventAttendee", -"properties": { -"displayName": { -"description": "Display name, present only if available.", -"type": "string" -}, -"email": { -"description": "Email address of the attendee (calendar), for regular events. For +Events, this field is not populated, instead \"id\" is used.", -"type": "string" -}, -"givenName": { -"description": "Given (first) name, present only if available. This is used for generating meeting titles as given name is preferred over display (full) name (ie: \"Jeff : Sundar\" is better than \"Jeff Dean : Sundar Pichai\").", -"type": "string" -}, -"id": { -"description": "Profile ID of the principal, for +Events. For regular events, this field is not populated, instead \"email\" is used.", -"type": "string" -}, -"organizer": { -"description": "Is this the organizer?", -"type": "boolean" -}, -"responseStatus": { -"description": "Attendees response status.", -"enum": [ -"RESPONSE_STATUS_UNSPECIFIED", -"NEEDS_ACTION", -"DECLINED", -"TENTATIVE", -"ACCEPTED" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"self": { -"description": "Is this the owner of this copy of the event?", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCalendarEventMeetingContact": { -"description": "Next id: 10", -"id": "AssistantApiCoreTypesCalendarEventMeetingContact", -"properties": { -"conferenceId": { -"description": "ID that corresponds to in ConferenceData.conference_id in calendar.common.ConferenceData proto. For Meet, this is the identifier used to join a meeting via URL.", -"type": "string" -}, -"dialInNumberClasses": { -"items": { -"enum": [ -"NUMBER_CLASS_UNSPECIFIED", -"LOW_COST", -"HIGH_COST", -"LEGACY" -], -"enumDescriptions": [ -"No number class has been specified.", -"The number has a low cost to receive calls on.", -"The number has a high cost to receive calls on.", -"Class for legacy numbers." -], -"type": "string" -}, -"type": "array" -}, -"phoneNumberUri": { -"description": "Default meeting phone number, for example: \"tel:+1-475-777-1840\"", -"type": "string" -}, -"pinNumber": { -"description": "A PIN that the participant will need to input after dialing in the conference.", -"type": "string" -}, -"provider": { -"$ref": "AssistantApiCoreTypesProvider", -"description": "Provider info for the meeting." -}, -"regionCode": { -"description": "The region code for the default meeting phone number", -"type": "string" -}, -"source": { -"enum": [ -"SOURCE_UNSPECIFIED", -"STRUCTURED_DATA", -"UNSTRUCTURED_DATA" -], -"enumDescriptions": [ -"The source is unknown.", -"The conference information was retrieved from structured fields.", -"The conference information was parsed and extracted from unstructured fields (e.g. event description)." -], -"type": "string" -}, -"universalPinNumber": { -"description": "The universal meeting PIN number for phone numbers in all available countries", -"type": "string" -}, -"url": { -"description": "URL that can be used to join the meeting.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCalendarEventRoom": { -"description": "A room that is available for a potential meeting or has been booked for a scheduled meeting. Next id: 4", -"id": "AssistantApiCoreTypesCalendarEventRoom", -"properties": { -"email": { -"description": "Room email that identifies the room and is used to book it.", -"type": "string" -}, -"locationDetails": { -"$ref": "AssistantApiCoreTypesCalendarEventRoomRoomLocationDetails", -"description": "Additional room details. Read-only, populated on request." -}, -"name": { -"description": "Room name (ex: \"MTV-PR55-5-A-Shadow 5K0 (13) GVC (No external guests)\").", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCalendarEventRoomRoomLocationDetails": { -"description": "Room location details. Read-only, populated on request. Next id: 8", -"id": "AssistantApiCoreTypesCalendarEventRoomRoomLocationDetails", -"properties": { -"building": { -"description": "Building where the room is (ex: \"PR55\").", -"type": "string" -}, -"city": { -"description": "City where the room is (ex: \"MTV\").", -"type": "string" -}, -"floor": { -"description": "Floor where the room is (ex: \"5\").", -"type": "string" -}, -"latitude": { -"description": "The latitude in degrees.", -"format": "double", -"type": "number" -}, -"longitude": { -"description": "The longitude in degrees.", -"format": "double", -"type": "number" -}, -"section": { -"description": "Section in the floor (ex: \"A\").", -"type": "string" -}, -"simpleName": { -"description": "Room name (ex: \"Shadow 5K0\").", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCalendarEventWrapper": { -"description": "This empty type allows us to publish sensitive calendar events to go/attentional-entities, while maintaining BUILD visibility protection for their contents. The BUILD-visibility-protected extension to this message is defined at http://google3/assistant/verticals/calendar/proto/multi_account_calendar_event.proto", -"id": "AssistantApiCoreTypesCalendarEventWrapper", -"properties": {}, -"type": "object" -}, -"AssistantApiCoreTypesCastAppInfo": { -"description": "The cast app information of the provider.", -"id": "AssistantApiCoreTypesCastAppInfo", -"properties": { -"castAppId": { -"description": "The cast app id. |cast_app_id| is the ID of the cast app used on the current device and |content_app_id| is the ID of the app that provides the actual content. For example, in a group playback, on a follower device, the |cast_app_id| is the follower cast app ID and the |content_app_id| is the leader cast app ID.", -"type": "string" -}, -"contentAppId": { -"description": "The id of the cast app that provides the content in a group. The field will always be filled. In the case of a group playback and the current device is a follower, the |cast_app_id| has the ID of the follower app, and |content_app_id| has ID of the actual content app. In all other cases, |content_app_id| and |cast_app_id| will be the same.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesChromeOsAppInfo": { -"description": "The ChromeOS app information of the provider. Next ID: 3", -"id": "AssistantApiCoreTypesChromeOsAppInfo", -"properties": { -"localizedAppName": { -"description": "The localized app name.", -"type": "string" -}, -"packageName": { -"description": "Unique package name that identifies a ChromeOS app of the provider.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCloudProviderInfo": { -"description": "The third party provider information.", -"id": "AssistantApiCoreTypesCloudProviderInfo", -"properties": { -"agentStyle": { -"$ref": "AssistantApiCoreTypesCloudProviderInfoAgentStyle" -}, -"directoryUrl": { -"description": "URL to a directory page about the third party agent in Assistant HQ. This is a universal (https) URL that may be handled natively by clients to show HQ or launch to the HQ directory web page.", -"type": "string" -}, -"logoUrl": { -"description": "The logo url for the third party provider.", -"type": "string" -}, -"name": { -"description": "The user visible name of the cloud provider, which may be used for example in the chat header during a conversation with the third party.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesCloudProviderInfoAgentStyle": { -"description": "The style customizations for the 3p agent.", -"id": "AssistantApiCoreTypesCloudProviderInfoAgentStyle", -"properties": { -"backgroundColor": { -"$ref": "AssistantApiCoreTypesColor", -"description": "The background color of the agent. Used if no background image is specified for the given display orientation, or if the provided background image does not fit." -}, -"headerTheme": { -"enum": [ -"DEFAULT", -"DARK", -"LIGHT" -], -"enumDescriptions": [ -"The client's default theme.", -"The theme used when the card has a background image. See go/aog-cards-header-theme-dark for details on applying this theme.", -"The theme used when the card has a background color (and no background image). See go/aog-cards-header-theme-light for details on applying this theme." -], -"type": "string" -}, -"landscapeBackgroundImageUrl": { -"description": "URL for the background image of the agent on landscape display.", -"type": "string" -}, -"logoUrl": { -"description": "URL for the image containing the 3p logo. This can include logomark and logotype, or logotype only. If present, this can be used in place of the square logo contained in the top level logo_url field in CloudProviderInfo. See go/cards-logo-customization for details on applying this logo.", -"type": "string" -}, -"maskColor": { -"$ref": "AssistantApiCoreTypesColor", -"description": "The color of the mask to apply to the background. See go/aog-cards-background-mask for details on applying this mask." -}, -"portraitBackgroundImageUrl": { -"description": "URL for the background image of the agent on portrait display.", -"type": "string" -}, -"primaryColor": { -"$ref": "AssistantApiCoreTypesColor", -"description": "The primary color of the agent. Used by the client to style the header and suggestion chips." -} -}, -"type": "object" -}, -"AssistantApiCoreTypesColor": { -"description": "Represents a color in the RGBA color space. This message mirrors google.type.Color.", -"id": "AssistantApiCoreTypesColor", -"properties": { -"alpha": { -"description": "The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: pixel color = alpha * (this color) + (1.0 - alpha) * (background color) This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. If omitted, this color object is to be rendered as a solid color (as if the alpha value had been explicitly given with a value of 1.0).", -"format": "float", -"type": "number" -}, -"blue": { -"description": "The amount of blue in the color as a value in the interval [0, 1].", -"format": "float", -"type": "number" -}, -"green": { -"description": "The amount of green in the color as a value in the interval [0, 1].", -"format": "float", -"type": "number" -}, -"red": { -"description": "The amount of red in the color as a value in the interval [0, 1].", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesDeviceConfig": { -"description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. Next Id: 5", -"id": "AssistantApiCoreTypesDeviceConfig", -"properties": { -"agentId": { -"description": "Pantheon Project ID that uniquely identifies the consumer project ID. Required", -"type": "string" -}, -"deviceId": { -"description": "Unique identifier for the device. Example: DBCDW098234. Required", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesDeviceId": { -"description": "LINT.IfChange(DeviceId) Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. Next ID: 14", -"id": "AssistantApiCoreTypesDeviceId", -"properties": { -"agsaClientInstanceId": { -"description": "The client_instance_id on devices with GSA. See 'client_instance_field' in go/androidids.", -"type": "string" -}, -"alloDeviceId": { -"description": "Allo Id. Corresponds to the GBotRequest.Sender.sender. NOTE(dychen): This may change to standard android/ios physical device ids in order to enable shared data (e.g. installed app on physical device shared between Allo and Opa apps on Nexus).", -"type": "string" -}, -"canonicalDeviceId": { -"description": "A unique device ID for Assistant devices as proposed by go/ocelot-team to solve the device id fragmentation problem. The value of this id is the HomeGraph id of the device. See go/ocelot-track-0-registry-design. New surfaces should use the canonical_device_id instead of using other ids, and the registration should utilize the DeviceDataLayer (go/ddl-v0). Please contact the assistant-state-management@ team for guidance. Note: We didn't reuse |home_graph_device_id| because in Assistant code base |home_graph_device_id| is common to associate it with 3P devices. See go/project-yellowstone for more context.", -"type": "string" -}, -"castDeviceId": { -"description": "If set, indicates that the device is a cast device, and contains the UUID of the cast device. Corresponds to the device_id field of the CastDevice proto.", -"type": "string" -}, -"clientInstanceId": { -"description": "DUSI (go/dusi) is used as the identifier here. This identifier is unique to the user and device. This will help identify which device or application the user's request originated from. This is not to be confused with the client_instance_id that android devices provide. This is currently used by surfaces that use the assistant-legacy-nexus and assistant-legacy-clockwork pipelines. DUSI is created and set in S3. This field is only filled for GAIA requests.", -"type": "string" -}, -"connectedDockId": { -"description": "A device ID produced by a connected dock, which is registered in HomeGraph.", -"type": "string" -}, -"deviceConfig": { -"$ref": "AssistantApiCoreTypesDeviceConfig", -"description": "The unique DeviceConfig to the specific third party device. It is also used by Android Auto Embedded first party device. See go/opa-ids." -}, -"deviceType": { -"deprecated": true, -"description": "DEPRECATED. assistant.api.core_types.SurfaceIdentity.surface_type field should be used instead. The device's surface type. This is the string version of surface_type. The server should use the SurfaceType value derived from this string. If the device_type isn't supported within the SurfaceType enum, it will be set as UNKNOWN. Developers should use the enum in ServerParams instead of this string. WARNING: Clients are not actually setting this field. This field will be removed once references to it within the code base have been removed.", -"type": "string" -}, -"homeGraphDeviceId": { -"description": "The unique device ID for HomeGraph devices. This is the HomeGraph ID, created when the device is registered into HomeGraph. It is immutable for the same device unless it is completely deleted and recreated. See go/home-graph for details.", -"type": "string" -}, -"libassistantDeviceId": { -"description": "The unique ID for libassistant based devices. See go/libassistant-id for details.", -"type": "string" -}, -"multiHotwordArbitrationDeviceId": { -"description": "If set, indicates that the device is participating the multi-hotword arbitration and the id is an UUID to distinguish it from other devices. It should also be consistent between requests from a single device within a session (or short duration).", -"type": "string" -}, -"opaIosDeviceId": { -"description": "The unique device ID for the Assistant App on iOS. See go/opa-ios-design for details.", -"type": "string" -}, -"quartzDeviceId": { -"description": "The unique ID of a Quartz device. See go/quartz-design-doc for more details. Quartz ID is a hash of (android_id + gaia).", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesDeviceUserIdentity": { -"description": "The set of information that helps Assistant identify a device-user pair. An example use of this proto is in broadcast reply, when after receiving a broadcast, we store a device-user pair in an attentional entity in order to later infer who initated the broadcast on which device.", -"id": "AssistantApiCoreTypesDeviceUserIdentity", -"properties": { -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "The identifier of the device." -}, -"gaiaId": { -"description": "The identifier of the user.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesHomeAppInfo": { -"description": "The Home app information of the provider. Next ID: 3", -"id": "AssistantApiCoreTypesHomeAppInfo", -"properties": { -"localizedAppName": { -"description": "The localized app name.", -"type": "string" -}, -"packageName": { -"description": "Unique package name that identifies a Home app of the provider.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesImage": { -"description": "An image represents the data about an image or a photo. NextId: 13", -"id": "AssistantApiCoreTypesImage", -"properties": { -"accessibilityText": { -"description": "A text description of the image to be used for accessibility, e.g. screen readers.", -"type": "string" -}, -"appIconIdentifier": { -"description": "App identifier. This field is specific to mobile surfaces and stands for app package name for Android surface, and app bundle identifier for iOS. In case identifier is specified but invalid, some default icon will be used, e.g. PackageManager.getDefaultActivityIcon() for Android. If you want to show image for AGSA versions which don't support this field, you can specify source_url as backup.", -"type": "string" -}, -"badgeImage": { -"$ref": "AssistantApiCoreTypesImage", -"description": "This is the image that is displayed as the badge on the main image." -}, -"content": { -"description": "Content of the image in bytes.", -"format": "byte", -"type": "string" -}, -"height": { -"format": "int32", -"type": "integer" -}, -"imageSource": { -"description": "Indicate the data source where the image is fetched.", -"enum": [ -"UNKNOWN", -"PLACEHOLDER", -"VISUAL_DICT", -"LAVD", -"VISUAL_DICT_DEFAULT_LOCALE" -], -"enumDescriptions": [ -"Unknown image source.", -"Indicates this image is a default placeholder because no valid image is found.", -"Indicates the image is fetched from Visual Dictionary.", -"Indicates the image is fetched from LAVD (visual-dictionary-license-aware-requirements).", -"Indicates the image is fetched from Visual Dictionary of default locale \"en\"." -], -"type": "string" -}, -"jsonContent": { -"description": "Content of image in form of JSON representation.", -"type": "string" -}, -"letterDrawableText": { -"description": "Text used to generate a letter drawable (a letter icon with color). It will be the default icon if the source_url is empty or cannot be rendered.", -"type": "string" -}, -"providerUrl": { -"description": "Url of the image provider, which is the website containing the image. For example, https://www.agentx.com.", -"type": "string" -}, -"sourceUrl": { -"description": "The source url of the image. For example, https://www.agentx.com/logo.png", -"type": "string" -}, -"sourceUrlType": { -"description": "Type of the source url.", -"enum": [ -"DEFAULT_URL_TYPE", -"LOTTIE", -"DUO_CLIENT", -"CONTACT_ID", -"GLIDE_CACHE_ID" -], -"enumDescriptions": [ -"", -"Json url used to drive animation on surfaces using Airbnb lottie library.", -"Duo url specifying the Duo resource id to be used ('duo://'). WARNING: This may not be supported on all clients. At the moment, only Dragonglass surfaces are supporting this.", -"Contact ID specifying contact resource to be used.", -"ID used for caching image with Glide." -], -"type": "string" -}, -"width": { -"description": "The width and height of the image in pixels.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesInternalProviderInfo": { -"description": "Info for targeting a feature provided directly by the Assistant surface itself. i.e Could be pointing to AGSA audio player for AUDIO_PLAYER on AGSA.", -"id": "AssistantApiCoreTypesInternalProviderInfo", -"properties": { -"type": { -"description": "Specifying which type of internal provider.", -"enum": [ -"UNKNOWN_INTERNAL_PROVIDER_TYPE", -"AUDIO_PLAYER", -"AUDIO_PLAYER_V2", -"MEDIA_PLAYER", -"MEDIA_PLAYER_IOS", -"AUDIO_ONLY_PLAYER", -"NARRATED_WEB_MEDIA_PLAYER", -"LIBASSISTANT_MEDIA_PLAYER", -"LENS_PLAYER", -"NEWS_PLAYER" -], -"enumDescriptions": [ -"", -"Deprecated, because supported AGSA implementation had a bug (b/72229328). Use AUDIO_PLAYER_V2 instead.", -"Internal provider for playing audio.", -"Internal provider for playing audio and video for AGSA.", -"iOS MEDIA_PLAYER.", -"Internal provider for playing audio only, different from MEDIA_PLAYER. It is used by KaiOS, for news vertical. It is different from AUDIO_PLAYER_V2 which is for AGSA, and not supported by news vertical.", -"Internal provider for playing audio narration of web pages.", -"Internal provider for playing audio for libassistant. Historically each enum represents a different media player implementation. This one is meant for the playing media on the libassistant enabled surfaces.", -"Internal provider for playing audio using lens audio player, for screenshot based readout.", -"Internal provider for news vertical on AssistantMediaPlayer in AGSA." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesIosAppInfo": { -"description": "The iOS app information of the provider. Next ID: 4", -"id": "AssistantApiCoreTypesIosAppInfo", -"properties": { -"bundleIdentifier": { -"description": "Bundle identifier that identifies an iOS app of the provider.", -"type": "string" -}, -"localizedAppName": { -"description": "The localized app name.", -"type": "string" -}, -"openAppUrl": { -"description": "A URL to open the provider's app.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesKaiOsAppInfo": { -"description": "The KaiOS app information of the provider. Next ID: 4", -"id": "AssistantApiCoreTypesKaiOsAppInfo", -"properties": { -"localizedAppName": { -"description": "The localized app name.", -"type": "string" -}, -"openAppUrl": { -"description": "A URL to open the provider's app.", -"type": "string" -}, -"packageName": { -"description": "Unique package name that identifies a KaiOS app of the provider.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesLocationCoordinates": { -"description": "Geographic coordinate information for location.", -"id": "AssistantApiCoreTypesLocationCoordinates", -"properties": { -"accuracyMeters": { -"description": "The accuracy of the coordinates in meters.", -"format": "double", -"type": "number" -}, -"latDegrees": { -"description": "Latitude degrees.", -"format": "double", -"type": "number" -}, -"lngDegrees": { -"description": "Longitude degrees.", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesMessageNotification": { -"description": "This proto captures the contents of a messaging app notification that is typically part of a conversation thread. Next Id: 21", -"id": "AssistantApiCoreTypesMessageNotification", -"properties": { -"appName": { -"description": "App name of the message notification, e.g. Hangouts.", -"type": "string" -}, -"bundleId": { -"description": "The key used to group this notification into a cluster.", -"type": "string" -}, -"dataUri": { -"description": "Uri for the attachment (image, audio, video etc.).", -"type": "string" -}, -"groupName": { -"description": "Name of the group associated with the message notification. This field is set iff this is a group message.", -"type": "string" -}, -"groupingKey": { -"description": "The group key of a proactive notification. Details in assistant.api.client_op.NotificationArgs.grouping_key.", -"type": "string" -}, -"index": { -"description": "Index of the message notification.", -"format": "int32", -"type": "integer" -}, -"markAsReadActionAvailable": { -"deprecated": true, -"description": "Boolean indicating if the mark_as_read action is available for this message.", -"type": "boolean" -}, -"messageLength": { -"description": "Length of the message/notification content in characters. Note: We can't send the full content because of privacy restriction, preventing sending client content to our backends. Concatenated message_length of all notification_entries.", -"format": "int32", -"type": "integer" -}, -"messageRecipientType": { -"enum": [ -"UNKNOWN", -"INDIVIDUAL", -"GROUP" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"messageWordCount": { -"description": "Word count of the message", -"format": "int32", -"type": "integer" -}, -"mimeType": { -"description": "Mime type of the data_uri. e.g. 'audio/wav', 'video/mp4', 'image/png'.", -"type": "string" -}, -"notificationEntries": { -"items": { -"$ref": "AssistantApiCoreTypesMessageNotificationNotificationEntry" -}, -"type": "array" -}, -"notificationIconKey": { -"description": "On-device cache key for notification icon.", -"type": "string" -}, -"notificationKey": { -"description": "String key of the notification. It is the key from original StatusBarNotification received from Android OS. It is used to identify the original notification to send a reply.", -"type": "string" -}, -"opaqueToken": { -"description": "The opaque_token of a proactive notification. Details in assistant.api.client_op.NotificationArgs.opaque_token.", -"format": "byte", -"type": "string" -}, -"packageName": { -"description": "App pkg of the message notification, e.g. \"com.google.android.talk\".", -"type": "string" -}, -"postTime": { -"description": "Timestamp of the last notification's post time.", -"format": "int64", -"type": "string" -}, -"replyActionAvailable": { -"description": "Boolean indicating if the reply action is available for this message.", -"type": "boolean" -}, -"sender": { -"$ref": "AssistantApiCoreTypesMessageNotificationPerson" -}, -"senderName": { -"deprecated": true, -"description": "Sender's name of the message notification, e.g. Elsa. Last sender name in case of a group conversation.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesMessageNotificationNotificationEntry": { -"description": "Structure of each notification in the MessageNotification Bundle. Attribute sender_name could be different in case of group messages. Next Id: 9", -"id": "AssistantApiCoreTypesMessageNotificationNotificationEntry", -"properties": { -"audioDuration": { -"description": "Duration of audio message.", -"format": "google-duration", -"type": "string" -}, -"charCount": { -"description": "Count of characters in the message body in the notification.", -"format": "int32", -"type": "integer" -}, -"dataUri": { -"description": "Uri for the attachment (image, audio, video etc.).", -"type": "string" -}, -"messageBody": { -"description": "Note that this is not present in production traffic. Content of the message body in the notification.", -"type": "string" -}, -"mimeType": { -"description": "Mime type of the data_uri. e.g. 'audio/wav', 'video/mp4', 'image/png'.", -"type": "string" -}, -"postTime": { -"description": "Timestamp of the notification's post time.", -"format": "google-datetime", -"type": "string" -}, -"sender": { -"$ref": "AssistantApiCoreTypesMessageNotificationPerson", -"description": "Sender of the message notification." -}, -"wordCount": { -"description": "Count of words in the message body in the notification.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesMessageNotificationPerson": { -"description": "Mirrors part of https://developer.android.com/reference/android/app/Person Next Id: 4", -"id": "AssistantApiCoreTypesMessageNotificationPerson", -"properties": { -"isImportant": { -"type": "boolean" -}, -"key": { -"type": "string" -}, -"name": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesProvider": { -"description": "Provider. Like, Spotify or iHeartRadio. Next ID: 13", -"id": "AssistantApiCoreTypesProvider", -"properties": { -"androidAppInfo": { -"$ref": "AssistantApiCoreTypesAndroidAppInfo", -"description": "The android app information of the provider." -}, -"castAppInfo": { -"$ref": "AssistantApiCoreTypesCastAppInfo", -"description": "The cast app information of the provider." -}, -"chromeosAppInfo": { -"$ref": "AssistantApiCoreTypesChromeOsAppInfo", -"description": "The ChromeOS app information of the provider." -}, -"cloudProviderInfo": { -"$ref": "AssistantApiCoreTypesCloudProviderInfo", -"description": "The third party provider information." -}, -"fallbackUrl": { -"description": "A URL to fallback to if app can not be opened.", -"type": "string" -}, -"homeAppInfo": { -"$ref": "AssistantApiCoreTypesHomeAppInfo" -}, -"iconImageUrl": { -"description": "Public URL pointing to an icon image for the provider. e.g. https://lh3.googleusercontent.com/UrY7BAZ-XfXGpfkeWg0zCCeo-7ras4DCoRalC_WXXWTK9q5b0Iw7B0YQMsVxZaNB7DM", -"type": "string" -}, -"internalProviderInfo": { -"$ref": "AssistantApiCoreTypesInternalProviderInfo", -"description": "The internal assistant provider information." -}, -"iosAppInfo": { -"$ref": "AssistantApiCoreTypesIosAppInfo", -"description": "The iOS app information of the provider." -}, -"kaiosAppInfo": { -"$ref": "AssistantApiCoreTypesKaiOsAppInfo", -"description": "The KaiOS app information of the provider." -}, -"sipProviderInfo": { -"$ref": "AssistantApiCoreTypesSipProviderInfo", -"description": "The sip information of the provider." -}, -"webProviderInfo": { -"$ref": "AssistantApiCoreTypesWebProviderInfo", -"description": "The web provider information." -} -}, -"type": "object" -}, -"AssistantApiCoreTypesProviderDelta": { -"description": "ProviderDelta. The incremental change, e.g. installation or deletion for Spotify or iHeartRadio. Currently it is for Android only. A few considerations for edge cases: - If the app being deleted is not found from Footprints, it is ignored. - For Footprint upload through Geller, the gPRC response is available for client to retry in the next upload if the upload fails. - For Assistant Request, there is no upload status similar to the current AppCapabilities. Next ID: 4", -"id": "AssistantApiCoreTypesProviderDelta", -"properties": { -"androidAppInfoDelta": { -"$ref": "AssistantApiCoreTypesAndroidAppInfoDelta", -"description": "The android app information of the provider." -}, -"fallbackUrl": { -"description": "A URL to fallback to if app can not be opened.", -"type": "string" -}, -"iconImageUrl": { -"description": "Public URL pointing to an icon image for the provider. e.g. https://lh3.googleusercontent.com/UrY7BAZ-XfXGpfkeWg0zCCeo-7ras4DCoRalC_WXXWTK9q5b0Iw7B0YQMsVxZaNB7DM", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadata": { -"description": "Task metadata information describing the ringtone. Next id: 11", -"id": "AssistantApiCoreTypesRingtoneTaskMetadata", -"properties": { -"category": { -"description": "The category related with the ringtone. It's used to generate ringtone related with the category if the entity_mid is not be populated. E.g. for instrument, the ringtone may be piano sound.", -"enum": [ -"UNKNOWN_CATEGORY", -"ANIMAL", -"CHARACTER", -"EMOTION", -"INSTRUMENT", -"MEDIA", -"SPORTS_EQUIPMENT", -"VEHICLE", -"ON_DEVICE", -"FUNTIME" -], -"enumDeprecated": [ -false, -false, -true, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"Character alarm is a kind of media alarm. CHARACTER category should be removed. Use MEDIA for character alarms", -"", -"", -"", -"", -"", -"An alarm sound that the user can select to play when an alarm is firing, the alarm sound resource is located on device in libassistant.", -"An alarm sound that is based upon the label that the timer or alarm is invoked with (ie pizza timer invokes an Italian accordian theme). The assets are streamed from web storage." -], -"type": "string" -}, -"characterAlarmMetadata": { -"$ref": "AssistantApiCoreTypesRingtoneTaskMetadataCharacterAlarmMetadata" -}, -"characterTag": { -"deprecated": true, -"type": "string" -}, -"entityMid": { -"description": "The freebase mid of the entity related to the ringtone. It will be used to generate the ringtone for the alarm or timer (with support for i18n). For instance, for the \"cat\" mid, the related ringtone will be a cat sound in some language, and for the \"Beyonce\" mid, the ringtone will be, e.g., a playlist of Beyonce's best hits.", -"type": "string" -}, -"funtimeMetadata": { -"$ref": "AssistantApiCoreTypesRingtoneTaskMetadataFuntimeMetadata" -}, -"genMlAlarmMetadata": { -"$ref": "AssistantApiCoreTypesRingtoneTaskMetadataGenMlAlarmMetadata" -}, -"gentleWakeInfo": { -"$ref": "AssistantApiCoreTypesRingtoneTaskMetadataGentleWakeInfo", -"description": "Gentle wake information for this alarm." -}, -"onDeviceAlarmMetadata": { -"$ref": "AssistantApiCoreTypesRingtoneTaskMetadataOnDeviceAlarmMetadata" -}, -"onDeviceAlarmSound": { -"description": "Will be deprecated. Use OnDeviceAlarmMetadata.", -"enum": [ -"DEFAULT", -"MELLOW", -"MODERN_TIMES", -"BEAUTIFUL_MIND", -"LITTLE_SUNSHINE", -"TOUCH_OF_ZEN", -"ABOUT_TIME", -"RANDOM", -"BOROBUDUR", -"PEBBLES", -"BRIGHT_MORNING", -"ACROSS_THE_VALLEY", -"MORNING_SONG", -"KYOTO", -"AWAKEN", -"CUCKOO", -"DIGITAL_BLAST", -"ACOUSTIC_SUNLIGHT", -"SUNRISE_BOSSA", -"CALM_GLOW", -"ANTIQUE_CLOCK", -"JUST_BIRDS", -"JUNGLE_AMBIENCE", -"QUAINT_VILLAGE", -"BUBBLY_BOSSA", -"ACOUSTIC_JAM", -"EUPHORIC" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"routineAlarmMetadata": { -"$ref": "AssistantApiCoreTypesRingtoneTaskMetadataRoutineAlarmMetadata" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadataCharacterAlarmMetadata": { -"id": "AssistantApiCoreTypesRingtoneTaskMetadataCharacterAlarmMetadata", -"properties": { -"agentIds": { -"description": "For character alarm, the media resources are provided through AOG apps. During alarm trigger phase, aog apps with the specified agent_ids are used to get the media resources. Multiple \"AoG agents\" can satisfy a character_tag. So the user will select the agents they want at alarm creation time. The chosen agents will be recorded so that the resources only from those agents will be used at trigger time. The number of selected agent_ids will not exceed 3. See go/character-alarm-aog.", -"items": { -"type": "string" -}, -"type": "array" -}, -"characterTags": { -"description": "The Character Alarm tag. Tags are needed to identify the theme of the alarm. For example, if the tag is 'astronaut', astronaut based audio is played during alarm ring. Note : We have made it repeated so that the user can choose multiple character alarm themes at one go. At present, the user is allowed to choose only one theme during alarm creation.", -"items": { -"type": "string" -}, -"type": "array" -}, -"iconUrls": { -"description": "Icons urls corresponding to a character. Note : We have made it repeated so that we can show different images when the alarm rings. At present, we only support only one image.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadataFuntimeMetadata": { -"description": "Used to make timers and alarms more delightful. See go/funtime-engdesign for more details.", -"id": "AssistantApiCoreTypesRingtoneTaskMetadataFuntimeMetadata", -"properties": { -"agentIds": { -"description": "For FunTime alarms and timers, the media resources are provided through AOG apps during their ringtone. Multiple AoG agents can satisfy a label. So a random agent will be chosen from those that are supplied. See go/funtime-engdesign.", -"items": { -"type": "string" -}, -"type": "array" -}, -"animationBlob": { -"description": "These bytes may represent the blob of the Rive animation that we pass to the Opal App. We will deprecate this field if we figure out a solution to load the animation from the web.", -"format": "byte", -"type": "string" -}, -"animationUrl": { -"description": "Url for Rive animation that is brought up on ring. Rive is a lightweight animation library that is compatible with Flutter on Opal. See https://rive.app/.", -"type": "string" -}, -"timerHeroUrl": { -"description": "The url used to load the image that is at the center of the timer during timer countdown visuals.", -"type": "string" -}, -"ttsServiceRequestBytes": { -"description": "This is used to call S3 to realize the TTS. Is in the form of bytes because of a circular dependency issue in libassistant protos. It is a serialized proto of type speech.s3.TtsServiceRequest.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadataGenMlAlarmMetadata": { -"description": "Metadata for machine learning generated audio samples. This will be similar to character alarms, Category will be set MEDIA but this metadata field will be used to identify the ringtone type on surface. (go/magenta-alarm-ringtones).", -"id": "AssistantApiCoreTypesRingtoneTaskMetadataGenMlAlarmMetadata", -"properties": { -"isEnabled": { -"type": "boolean" -}, -"ringtoneLabel": { -"description": "Label for the generated ringtone.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadataGentleWakeInfo": { -"description": "Gentle wake actions like slowly brightening the room/device screen leading up to the alarm firing (go/cube-gentle-wake-up).", -"id": "AssistantApiCoreTypesRingtoneTaskMetadataGentleWakeInfo", -"properties": { -"effectDurationMs": { -"description": "Specifies how long the effect lasts. Allowed for effect to last after the alarm has started ringing. If unset or negative or 0, effect is assumed to last until alarm trigger time.", -"format": "int64", -"type": "string" -}, -"isEnabled": { -"description": "Indicates if gentle wake action is to be performed before this alarm fires. This is enabled only if the device supports sunrise alarm capability. http://cs/symbol:assistant.api.SunriseFeaturesSupport", -"type": "boolean" -}, -"startTimedeltaMs": { -"description": "Specifies how long before the alarm fire time, the wakeup effect will start. ALWAYS POSITIVE.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadataOnDeviceAlarmMetadata": { -"description": "This describes the alarm sound resource enum and the alarm sound label for the on device alarm sound. On-device ringtones are product specific, hence Opal/UI layer will be responsible for populating this metadata at creation/edit. The enum map will be used to convert to an internal resource id used by libassistant for accessing the asset which are not exposed to UI.", -"id": "AssistantApiCoreTypesRingtoneTaskMetadataOnDeviceAlarmMetadata", -"properties": { -"onDeviceAlarmSound": { -"description": "Opal/UI layer will set this bit based on the user selection.", -"enum": [ -"DEFAULT", -"MELLOW", -"MODERN_TIMES", -"BEAUTIFUL_MIND", -"LITTLE_SUNSHINE", -"TOUCH_OF_ZEN", -"ABOUT_TIME", -"RANDOM", -"BOROBUDUR", -"PEBBLES", -"BRIGHT_MORNING", -"ACROSS_THE_VALLEY", -"MORNING_SONG", -"KYOTO", -"AWAKEN", -"CUCKOO", -"DIGITAL_BLAST", -"ACOUSTIC_SUNLIGHT", -"SUNRISE_BOSSA", -"CALM_GLOW", -"ANTIQUE_CLOCK", -"JUST_BIRDS", -"JUNGLE_AMBIENCE", -"QUAINT_VILLAGE", -"BUBBLY_BOSSA", -"ACOUSTIC_JAM", -"EUPHORIC" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"onDeviceAlarmSoundLabel": { -"description": "A string label to identify the alarm sound name. Opal/UI layer will set this as per product definition. This will be used to display the name of the selected ringtone.", -"type": "string" -}, -"ttsServiceRequestBytes": { -"description": "This is used to call S3 to realize the TTS. Is in the form of bytes because of a circular dependency issue in libassistant protos. It is a serialized proto of type speech.s3.TtsServiceRequest. This request will contain an ssml with the url to the ringtone files hosted on gstatic.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesRingtoneTaskMetadataRoutineAlarmMetadata": { -"id": "AssistantApiCoreTypesRingtoneTaskMetadataRoutineAlarmMetadata", -"properties": { -"routineId": { -"description": "The unique id for each routine. When the alrm is dismissed, it will trigger the routine of the routine alarm's creator if feasible.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesSipProviderInfo": { -"description": "Session Initiation Protocol (SIP) information for providers that use SIP to initiate multimedia communication sessions, like Google Voice and Fi. https://en.wikipedia.org/wiki/Session_Initiation_Protocol", -"id": "AssistantApiCoreTypesSipProviderInfo", -"properties": { -"providerId": { -"description": "The providers id (MID) which is the primary identifier for a call provider within the Assistant. A MID, or machine identifier, is a unique identifier issued by Knowledge Graph for all entities contained in it's graph.", -"type": "string" -}, -"realm": { -"description": "Calling realm to be use for each call. i.e. For anonymous, this would be set to anonymous.chirp.google.com", -"type": "string" -}, -"useBirdsongTacl": { -"deprecated": true, -"description": "If true, client should use the Birdsong TaCL API for this call. Uses the VoiceCallManager API by default. For more details: go/birdsong-migration-google-home", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesSurfaceIdentity": { -"description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6", -"id": "AssistantApiCoreTypesSurfaceIdentity", -"properties": { -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "The identifier of the device." -}, -"legacySurfaceType": { -"deprecated": true, -"description": "DEPRECATED: The legacy device's surface type enum. NOTE: Prefer using the ontological `surface_type` field above. If you need to access the legacy surface type, please file a bug and add it in your code to migrate to ontological surface type.", -"enum": [ -"UNKNOWN", -"ANDROID_ALLO", -"ANDROID_AUTO", -"ANDROID_THINGS_CUBE", -"ANDROID_THINGS_JASPER", -"ANDROID_TV", -"ANDROID_TV_KIDS", -"ANDROID_WEAR", -"AR_GLASSES", -"ASSISTANT_SDK", -"AUDIOWEAR", -"BUBBLE_CHARACTERS_IOS", -"CAPABILITY_BASED_SURFACE", -"CHROMECAST_ASSISTANT", -"CHROMECAST_MANHATTAN", -"CHROMECAST_SEARCH", -"CLOUD_DEVICE", -"COMPANION_SCREEN", -"DYNAMITE_WEB", -"ENSEMBLE", -"EYESFREE_AGSA", -"EYESFREE_GMM", -"GBOARD", -"GLASS", -"GOOGLE_HOME", -"HANGOUTS_CHATBOT", -"IOS_ALLO", -"IOS_GSA", -"IOS_WEAR", -"LIBASSISTANT", -"LINE_CHATBOT", -"MATTER", -"MULTIMODAL_AGSA", -"NON_ASSISTANT_SURFACE", -"OPA_AGSA", -"OPA_AGSA_CHROME_OS", -"OPA_ANDROID_AUTO", -"OPA_ANDROID_LITE", -"OPA_ANDROID_SCREENLESS", -"OPA_ANDROID_SMART_DISPLAY", -"OPA_ANDROID_TABLET", -"OPA_CROS", -"OPA_GACS", -"OPA_IOS", -"OPA_IOS_SCREENLESS", -"OPA_KAIOS", -"OPA_MOBILE_WEB", -"RTOS_PHONE", -"SMS_CHATBOT", -"TELEGRAM_CHATBOT", -"TELEPHONE_ASSISTANT", -"VERILY_ONDUO", -"YOUTUBE_APP", -"AGSA_BISTO_FOR_EVAL", -"COGSWORTH_FOR_EVAL", -"LOCKHART_MIC_FOR_EVAL", -"OPA_ANDROID_AUTO_EMBEDDED_FAKE", -"SPARK", -"WALLE", -"UNIT_TESTING" -], -"enumDeprecated": [ -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"Deprecated (never used). Use OPA_ANDROID_AUTO instead.", -"Assistant on small screen stationary device.", -"", -"", -"", -"", -"Assistant on AR Glasses with both visual and audio experiences. Ask ar-assistant@ for details.", -"For Assistant SDK gRPC client.", -"", -"go/assistant-lamda-overview-characters", -"Surface for capability based testing.", -"For chrome cast assistant web app.", -"For chrome cast with assistant + screen (e.g., Google Nest Hub).", -"", -"Virtual device for event-based triggering, e.g. executing time", -"scheduled routines: go/routine-cloud-ex For Quartz.", -"", -"", -"Deprecated. Please use OPA_ANDROID_SCREENLESS.", -"", -"For Gboard app", -"", -"", -"", -"", -"", -"", -"For standalone libassistant devices.", -"LINE 3P messaging app", -"For Matter devices. go/matter-prod", -"Deprecated.", -"Surfaces that do not speak to the Assistant, i.e. web HQ", -"Assistant on Android phones accessed through the Google App (velvet). This represents neither all of the assistant on android phones (ANDROID_ALLO, VERILY_ONDUO, etc.) nor all of the assistant built on top of AGSA (ANDROID_WEAR, OPA_ANDROID_SCREENLESS, OPA_AGSA_CHROME_OS, etc.).", -"Deprecated. Please use OPA_CROS.", -"", -"This is OPA on Android Go (not part of AGSA)", -"Assistant on Nexus with screen off/locked. Use go/bisto device to trigger.", -"Assistant on Titan hub mode (go/titan-hubmode-surface). This is OPA Android first party Smart Display devices. The UI experience is built on Opal (Gallium and Flutter) and native Android.", -"Assistant on Android tablet", -"Assistant on native Chrome OS (go/croissant).", -"For assistant on GACS devices (go/gacs-dd). Google Assistant Conversation Service (GACS) defines intents the device accepts. This surface serves the intents in the assistant response.", -"", -"Assistant on iOS with screen off/locked. Use go/bisto device to trigger.", -"Assistant on KaiOS. go/kaiosama", -"Assistant on Mobile Web. go/opa-spidey", -"Assistant on low-cost RTOS phones (go/conceRTOS).", -"", -"Telegram 3P messaging app", -"Assistant on a phone call (go/telephone).", -"", -"A Youtube app.", -"Eval-only surfaces. These surfaces are not real surfaces. They are only used in intermediate steps of eval query and request generation: 1. Eval samplers (OPA query sampler and Cannery sampler) checks logs and temporarily assigns these surface types to the logs, to put Bisto, Lockhart Mic, ... queries in the correct query pools. 2. Request builders uses these surface types to determine how TaskRequest for Bisto, Lockhart Mic, ... should be built, like whether some user user agent should be used, whether some entry source should be set. The surface types in the generated TaskRequest is still the production surface types, like OPA_ANDROID_SCREENLESS or OPA_AGSA. Temp surface for Bisto Android eval. Will be merged with OPA_ANDROID_SCREENLESS in the future.", -"", -"", -"OPA_ANDROID_AUTO has another sub-surface (embedded) that differs in the reported capabilities, client-ops, flags but uses the same surface_type string of OPA_ANDROID_AUTO. This fake surface would allow running turing2 tests and evals for this embedded surface. Deprecated as of Feb 2019. Please use \"OPA_ANDROID_AUTO_EMBEDDED\" client type for tests and eval. See go/client-onboarding for more details. https://cs.corp.google.com/piper///depot/google3/assistant/assistant_server/tools/util/consts.h?l=32&rcl=247481455", -"Unlaunched new surface prototype, ask spark-eng@.", -"Wall-E is an Area120 Project building assistant powered robots. The surface is an extended joplin which have same capabilities as a google home adding custom robot features. Currently in active development. Please reach out walle-software@ or bingxin@ for questions. For details see go/walle-as-pipeline-1-pager.", -"Surface used for unit-testing purposes. Unit-tests might still require the existence of a \"valid\" SurfaceType to succeed (eg. initialization of the CapabilityBuilder, proto matching, etc.). With the move away from SurfaceType checks, a generic \"TESTING\" SurfaceType can be used for such tests without affecting the test behavior. Unlike the CAPABILITY_BASED_SURFACE, this proto does not have any capabilities or ResponseType tied to it. It should only be used for unit-tests and should not be exposed in the ASDebugger." -], -"type": "string" -}, -"surfaceType": { -"description": "The device's surface type. The types are defined at google3/assistant/api/core_types/surfaces.gcl. For more details, refer to go/ontologicalize-surface-type.", -"enum": [ -"UNKNOWN_TYPE", -"ACCL", -"AGSA", -"ANDROID", -"ANDROID_AUTO", -"ANDROID_LITE", -"ANDROID_PHONE", -"ANDROID_SCREENLESS", -"ANDROID_SMART_DISPLAY", -"ANDROID_TABLET", -"ANDROID_THINGS", -"ANDROID_THINGS_CUBE", -"ANDROID_THINGS_JASPER", -"ANDROID_TV", -"ANDROID_WEAR", -"ASSISTANT_KIT", -"ASSISTANT_SDK", -"AUTO", -"CAST_OS", -"CHROME_OS", -"CHROMECAST_MANHATTAN", -"CLOUD_DEVICE", -"CROS", -"FITBIT_OS_WATCH", -"FITBIT_OS_WATCH_ANDROID", -"FITBIT_OS_WATCH_IOS", -"GOOGLE_HOME", -"HEADPHONE", -"HEADPHONE_ANDROID", -"HEADPHONE_ANDROID_SCREENLESS", -"HEADPHONE_IOS", -"IOPA", -"IOS", -"IOS_SCREENLESS", -"IPAD", -"IPHONE", -"KAI_OS", -"KAI_OS_AMA", -"LIBASSISTANT", -"MATTER", -"PHONE", -"PIXEL", -"PIXEL5", -"PIXEL6", -"PIXEL7", -"PIXEL8", -"PIXEL_BUDS", -"PIXEL_BUDS_SCREENLESS", -"PIXEL_TABLET", -"PIXEL_TABLET_HUB_MODE", -"PIXEL_TABLET_PERSONAL_MODE", -"PIXEL_WATCH", -"SCREENLESS", -"SMART_DISPLAY", -"SPEAKER", -"TABLET", -"TELEPHONE", -"THING", -"WATCH", -"WEAR_OS", -"WEAR_OS_WATCH" -], -"enumDescriptions": [ -"Unknown surface type.", -"Android conversation client library (go/accl) is an Android library which implements the Conversation Protocol that allows a surface to have conversations with the Assistant server. OWNERS: surfaces-infra-core@", -"Android Google Search App (go/agsa). OWNERS: surfaces-infra-core@", -"Android. OWNERS: surfaces-infra-core@", -"Assistant on Android Auto (go/ast-auto). OWNERS: opa-android-leads@", -"Assistant on Android Go (not part of AGSA). OWNERS: assistantgo-eng@", -"Assistant on Android phones accessed through the Google App (velvet). This surface is formerly known as OPA_AGSA. OWNERS: opa-android-leads@", -"Assistant on Nexus with screen off/locked. Use go/bisto device to trigger. OWNERS: opa-android-leads@", -"Assistant on Titan hub mode (go/titan-hubmode-surface). This is OPA Android first party Smart Display devices. The UI experience is built on Opal (Gallium and Flutter) and native Android. OWNERS: opa-android-leads@", -"Assistant on Android tablet. OWNERS: opa-android-leads@", -"Android Things (go/things). OWNERS: surfaces-infra-core@", -"Lenovo Smart Clock v1, a stationary smart display device with a 4-inch screen, targeted for bedroom/bedside use cases like alarm, sleep tracking. Based on Android Things. See go/cube-home. OWNERS: cube-eng@", -"Former codename for 3P 8-inch and 10-inch Smart Displays from Lenovo, JBL and LG, targeted for use in the kitchen. Based on Android Things. See go/jasper-home. OWNERS: jasper-eng@", -"The Android platform that powers smart televisions, set-top-boxes, and over-the-top devices. See go/atv-eng and go/opa-tv. OWNERS: opa-tv-eng@", -"Legacy Wear OS Assistant. OWNERS: wear-assistant-eng@", -"AssistantKit (go/assistantkit) is a collection of iOS libraries allowing first party apps to integrate Google Assistant features. OWNERS: assistantkit-eng@", -"Standalone gRPC based client. See go/easi. OWNERS: assistant-sdk-service-eng@", -"Automobile. OWNERS: surfaces-infra-core@", -"Cast OS (go/opal-castos-docs#what-is-castos). OWNERS: surfaces-infra-core@", -"Chrome OS (go/chromsos) OWNERS: surfaces-infra-core@", -"For chromecast with assistant + screen, e.g. Newman. OWNERS: cast-eng-platform@", -"Virtual device for event-based triggering, e.g. executing time scheduled routines: go/scheduled-routines. OWNERS: surfaces-infra-core@", -"Assistant on native Chrome OS (go/croissant). OWNERS: croissant@", -"Assistant on Fitbit OS Watch. OWNERS: assistant-wearable-team@", -"Assistant on Fitbit Watch paired with AGSA. OWNERS: assistant-wearable-team@", -"Assistant on Fitbit Watch paired with iOPA. OWNERS: assistant-wearable-team@", -"Google home speaker, i.e., Chirp devices. See go/assistant-speakers. OWNERS: assistant-speaker-leads@", -"Headphone. OWNERS: surfaces-infra-core@", -"Assistant on headphones with APA. OWNERS: assistant-wearable-team@", -"Assistant on headphones with AGSA-C. OWNERS: assistant-wearable-team@", -"Assistant on headphones paired with iOPA. OWNERS: assistant-wearable-team@", -"Assistant on iOS devices (go/opa-ios). OWNERS: opa-ios-eng@", -"iOS. OWNERS: surfaces-infra-core@", -"Assistant on iOS with screen off/locked. Use go/bisto device to trigger. OWNERS: bisto-team@", -"iPad devices. OWNERS: surfaces-infra-core@", -"iPhone devices. OWNERS: surfaces-infra-core@", -"Kai OS (go/kaios). OWNERS: surfaces-infra-core@", -"Assistant on KaiOS (go/kaiosama). OWNERS: assistant-kaios-eng@", -"LibAssistant (go/libassistant) C++ SDK that powers Assistant experience on both 1P devices like Google Home and 3P devices like Sonos. OWNERS: libassistant-core@", -"Matter based clients. go/matter-prod. OWNERS: assistant-media@", -"A phone. OWNERS: surfaces-infra-core@", -"Pixel devices. OWNERS: surfaces-infra-core@", -"Pixel 5. OWNERS: surfaces-infra-core@", -"Pixel 6. OWNERS: surfaces-infra-core@", -"Pixel 7. OWNERS: surfaces-infra-core@", -"Pixel 8. OWNERS: surfaces-infra-core@", -"Assistant on Pixel Buds with APA. OWNERS: assistant-wearable-team@", -"Assistant on Pixel Buds with AGSA-C. OWNERS: assistant-wearable-team@", -"Assistant on Pixel Tablet/Tangor (a.k.a, TK). OWNERS: assistant-display-eng@", -"Assistant on Tangor running in Hub mode. OWNERS: assistant-display-eng@", -"Assistant on Tangor running in Personal mode. OWNERS: assistant-display-eng@", -"Assistant on Pixel Watch (a.k.a., Rohan). OWNERS: assistant-wearable-team@", -"Devices with screen off or locked (go/bisto). OWNERS: surfaces-infra-core@", -"Smart surface, such as Titan. OWNERS: surfaces-infra-core@", -"A speaker. OWNERS: surfaces-infra-core@", -"A tablet. OWNERS: surfaces-infra-core@", -"Assistant on a phone call (go/telephone). OWNERS: telephone-assistant-eng@", -"IoT device. OWNERS: surfaces-infra-core@", -"Watch. OWNERS: surfaces-infra-core@", -"Wear OS. OWNERS: surfaces-infra-core@", -"Assistant on Wear OS Watch (a.k.a., Edoras). OWNERS: assistant-wearable-team@" -], -"type": "string" -}, -"surfaceTypeString": { -"deprecated": true, -"description": "DEPRECATED. The legacy device's surface type string. NOTE: Prefer using the ontological `surface_type` field. The device's surface type. This is the string version of the assistant.api.core_types.SurfaceType enum. The server should not use this field, rather it should use the SurfaceType value derived from this string.", -"type": "string" -}, -"surfaceVersion": { -"$ref": "AssistantApiCoreTypesSurfaceVersion", -"description": "The version of the surface/client. This is different from the Conversation protocol version." -} -}, -"type": "object" -}, -"AssistantApiCoreTypesSurfaceType": { -"description": "Specifies the types of device surfaces. LINT.IfChange When adding new surface types make sure that My Activity (https://myactivity.google.com/product/assistant) will correctly render by adding your enum to http://cs/symbol:GetAssistSurfaceName%20f:%5C.cc$ If your type doesn't fit in to any of the existing surfaces messages, add a new message in http://google3/personalization/footprints/boq/uservisible/events/intl/smh_frontend_messages.h.", -"id": "AssistantApiCoreTypesSurfaceType", -"properties": { -"type": { -"enum": [ -"UNKNOWN", -"ANDROID_ALLO", -"ANDROID_AUTO", -"ANDROID_THINGS_CUBE", -"ANDROID_THINGS_JASPER", -"ANDROID_TV", -"ANDROID_TV_KIDS", -"ANDROID_WEAR", -"AR_GLASSES", -"ASSISTANT_SDK", -"AUDIOWEAR", -"BUBBLE_CHARACTERS_IOS", -"CAPABILITY_BASED_SURFACE", -"CHROMECAST_ASSISTANT", -"CHROMECAST_MANHATTAN", -"CHROMECAST_SEARCH", -"CLOUD_DEVICE", -"COMPANION_SCREEN", -"DYNAMITE_WEB", -"ENSEMBLE", -"EYESFREE_AGSA", -"EYESFREE_GMM", -"GBOARD", -"GLASS", -"GOOGLE_HOME", -"HANGOUTS_CHATBOT", -"IOS_ALLO", -"IOS_GSA", -"IOS_WEAR", -"LIBASSISTANT", -"LINE_CHATBOT", -"MATTER", -"MULTIMODAL_AGSA", -"NON_ASSISTANT_SURFACE", -"OPA_AGSA", -"OPA_AGSA_CHROME_OS", -"OPA_ANDROID_AUTO", -"OPA_ANDROID_LITE", -"OPA_ANDROID_SCREENLESS", -"OPA_ANDROID_SMART_DISPLAY", -"OPA_ANDROID_TABLET", -"OPA_CROS", -"OPA_GACS", -"OPA_IOS", -"OPA_IOS_SCREENLESS", -"OPA_KAIOS", -"OPA_MOBILE_WEB", -"RTOS_PHONE", -"SMS_CHATBOT", -"TELEGRAM_CHATBOT", -"TELEPHONE_ASSISTANT", -"VERILY_ONDUO", -"YOUTUBE_APP", -"AGSA_BISTO_FOR_EVAL", -"COGSWORTH_FOR_EVAL", -"LOCKHART_MIC_FOR_EVAL", -"OPA_ANDROID_AUTO_EMBEDDED_FAKE", -"SPARK", -"WALLE", -"UNIT_TESTING" -], -"enumDeprecated": [ -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"Deprecated (never used). Use OPA_ANDROID_AUTO instead.", -"Assistant on small screen stationary device.", -"", -"", -"", -"", -"Assistant on AR Glasses with both visual and audio experiences. Ask ar-assistant@ for details.", -"For Assistant SDK gRPC client.", -"", -"go/assistant-lamda-overview-characters", -"Surface for capability based testing.", -"For chrome cast assistant web app.", -"For chrome cast with assistant + screen (e.g., Google Nest Hub).", -"", -"Virtual device for event-based triggering, e.g. executing time", -"scheduled routines: go/routine-cloud-ex For Quartz.", -"", -"", -"Deprecated. Please use OPA_ANDROID_SCREENLESS.", -"", -"For Gboard app", -"", -"", -"", -"", -"", -"", -"For standalone libassistant devices.", -"LINE 3P messaging app", -"For Matter devices. go/matter-prod", -"Deprecated.", -"Surfaces that do not speak to the Assistant, i.e. web HQ", -"Assistant on Android phones accessed through the Google App (velvet). This represents neither all of the assistant on android phones (ANDROID_ALLO, VERILY_ONDUO, etc.) nor all of the assistant built on top of AGSA (ANDROID_WEAR, OPA_ANDROID_SCREENLESS, OPA_AGSA_CHROME_OS, etc.).", -"Deprecated. Please use OPA_CROS.", -"", -"This is OPA on Android Go (not part of AGSA)", -"Assistant on Nexus with screen off/locked. Use go/bisto device to trigger.", -"Assistant on Titan hub mode (go/titan-hubmode-surface). This is OPA Android first party Smart Display devices. The UI experience is built on Opal (Gallium and Flutter) and native Android.", -"Assistant on Android tablet", -"Assistant on native Chrome OS (go/croissant).", -"For assistant on GACS devices (go/gacs-dd). Google Assistant Conversation Service (GACS) defines intents the device accepts. This surface serves the intents in the assistant response.", -"", -"Assistant on iOS with screen off/locked. Use go/bisto device to trigger.", -"Assistant on KaiOS. go/kaiosama", -"Assistant on Mobile Web. go/opa-spidey", -"Assistant on low-cost RTOS phones (go/conceRTOS).", -"", -"Telegram 3P messaging app", -"Assistant on a phone call (go/telephone).", -"", -"A Youtube app.", -"Eval-only surfaces. These surfaces are not real surfaces. They are only used in intermediate steps of eval query and request generation: 1. Eval samplers (OPA query sampler and Cannery sampler) checks logs and temporarily assigns these surface types to the logs, to put Bisto, Lockhart Mic, ... queries in the correct query pools. 2. Request builders uses these surface types to determine how TaskRequest for Bisto, Lockhart Mic, ... should be built, like whether some user user agent should be used, whether some entry source should be set. The surface types in the generated TaskRequest is still the production surface types, like OPA_ANDROID_SCREENLESS or OPA_AGSA. Temp surface for Bisto Android eval. Will be merged with OPA_ANDROID_SCREENLESS in the future.", -"", -"", -"OPA_ANDROID_AUTO has another sub-surface (embedded) that differs in the reported capabilities, client-ops, flags but uses the same surface_type string of OPA_ANDROID_AUTO. This fake surface would allow running turing2 tests and evals for this embedded surface. Deprecated as of Feb 2019. Please use \"OPA_ANDROID_AUTO_EMBEDDED\" client type for tests and eval. See go/client-onboarding for more details. https://cs.corp.google.com/piper///depot/google3/assistant/assistant_server/tools/util/consts.h?l=32&rcl=247481455", -"Unlaunched new surface prototype, ask spark-eng@.", -"Wall-E is an Area120 Project building assistant powered robots. The surface is an extended joplin which have same capabilities as a google home adding custom robot features. Currently in active development. Please reach out walle-software@ or bingxin@ for questions. For details see go/walle-as-pipeline-1-pager.", -"Surface used for unit-testing purposes. Unit-tests might still require the existence of a \"valid\" SurfaceType to succeed (eg. initialization of the CapabilityBuilder, proto matching, etc.). With the move away from SurfaceType checks, a generic \"TESTING\" SurfaceType can be used for such tests without affecting the test behavior. Unlike the CAPABILITY_BASED_SURFACE, this proto does not have any capabilities or ResponseType tied to it. It should only be used for unit-tests and should not be exposed in the ASDebugger." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesSurfaceVersion": { -"description": "The version of the surface/client. New surfaces are encouraged to only use the \u201cmajor\u201d field to keep track of version number. The \u201cminor\u201d field may be used for surfaces that rely on both the \u201cmajor\u201d and \u201cminor\u201d fields to define their version.", -"id": "AssistantApiCoreTypesSurfaceVersion", -"properties": { -"major": { -"format": "int32", -"type": "integer" -}, -"minor": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiCoreTypesWebProviderInfo": { -"description": "The web information of the provider. Next ID: 5", -"id": "AssistantApiCoreTypesWebProviderInfo", -"properties": { -"homeStorage": { -"description": "Serialized storage (context) persisted and retrieved for the app and home.", -"type": "string" -}, -"localizedAppName": { -"description": "The localized app name.", -"type": "string" -}, -"openAppUrl": { -"description": "A URL to open the provider's app.", -"type": "string" -}, -"thirdPartyCustomNluInfo": { -"$ref": "AssistantApiCoreTypesWebProviderInfoThirdPartyCustomNluInfo", -"description": "Info about 3P Custom NLU used in this web provider. TODO(b/321644453) remove when QRewrite is able to call SERoot." -} -}, -"type": "object" -}, -"AssistantApiCoreTypesWebProviderInfoThirdPartyCustomNluInfo": { -"id": "AssistantApiCoreTypesWebProviderInfoThirdPartyCustomNluInfo", -"properties": { -"locale": { -"description": "The locale of this agent version, represented by BCP-47 language strings, such as \"en\", \"en-US\", \"fr\", \"fr-CA\", \"sr-Latn\", \"zh-Hans-CN\", etc.", -"type": "string" -}, -"nluAgentId": { -"description": "Unique internal identifier of 3P Custom NLU agent. UUID.", -"type": "string" -}, -"nluAgentVersion": { -"description": "Identifies the 3P Custom NLU agent version.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiCrossDeviceExecutionCapability": { -"id": "AssistantApiCrossDeviceExecutionCapability", -"properties": { -"localConnectivityEnabled": { -"description": "Whether the device has torus/usonia capabililities enabled or not.", -"type": "boolean" -}, -"remoteCastMediaEnabled": { -"description": "Whether the device supports cast media originated from a remote device to be executed through local execution and can upload results asynchronously. Needs to be checked before sending remote media initiation through local channel since it needs an async result upload path.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiDataValidateCapabilities": { -"id": "AssistantApiDataValidateCapabilities", -"properties": { -"fallbackToTetheredDevice": { -"description": "If true, data.VALIDATE should be routed to the tethered device (if present) if the tethered device supports data.VALIDATE and the primary device does not.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiDate": { -"description": "A Gregorian calendar date.", -"id": "AssistantApiDate", -"properties": { -"day": { -"description": "The day, in 1...31.", -"format": "int32", -"type": "integer" -}, -"month": { -"description": "The month, in 1...12.", -"format": "int32", -"type": "integer" -}, -"year": { -"description": "The year, e.g. 2016.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiDateTime": { -"description": "A date-time specification, combining a date and civil time (relative to a given timezone).", -"id": "AssistantApiDateTime", -"properties": { -"date": { -"$ref": "AssistantApiDate", -"description": "A Gregorian calendar date." -}, -"timeOfDay": { -"$ref": "AssistantApiTimeOfDay", -"description": "A civil time relative to a timezone." -}, -"timeZone": { -"$ref": "AssistantApiTimeZone", -"description": "A time zone in IANA format." -} -}, -"type": "object" -}, -"AssistantApiDateTimeRange": { -"description": "A representation of a range of time with start and end datetime specified.", -"id": "AssistantApiDateTimeRange", -"properties": { -"endDate": { -"$ref": "AssistantApiDateTime", -"description": "End date of the range." -}, -"startDate": { -"$ref": "AssistantApiDateTime", -"description": "Start date of the range." -} -}, -"type": "object" -}, -"AssistantApiDeviceCapabilities": { -"description": "This message describes roughly what a surface is capable of doing and metadata around those capabilities. These capabilities are determined based on: - device hardware - software - status (e.g. volume level, battery percentage) These capabilities refer to the surface and not the physical device. The list of supported surfaces can be found in the assistant.api.core_types.SurfaceType enum. A surface's capabilities can differ from the device's. An example would be ANDROID_ALLO running on Pixel. Allo does not support AudioInput while the Pixel does. In this case, audio_input will be set to false for Assistant Allo requests while it might be set to true for OPA_NEXUS requests. Next ID: 37", -"id": "AssistantApiDeviceCapabilities", -"properties": { -"androidIntentCapabilities": { -"$ref": "AssistantApiAndroidIntentCapabilities", -"description": "Capabilites related to Android intent support." -}, -"assistantCapability": { -"description": "Capabilities if surface supports Google Assistant.", -"enum": [ -"UNKNOWN", -"SUPPORTED", -"UNSUPPORTED" -], -"enumDescriptions": [ -"", -"", -"Assistant is no longer supported(e.g., on legacy surfaces)." -], -"type": "string" -}, -"audioInput": { -"$ref": "AssistantApiAudioInput", -"description": "These capabilities are scoped to the ability to gather audio. It includes information like the type of audio that can be gathered (e.g. public, private)." -}, -"audioOutput": { -"$ref": "AssistantApiAudioOutput", -"description": "These capabilities are scoped to the ability to play audio. It includes information like the type of audio that can be played (e.g. public, private)." -}, -"bluetoothCapabilities": { -"$ref": "AssistantApiBluetoothCapabilities", -"description": "Bluetooth capabilities related to usage of a feature." -}, -"callCapabilities": { -"$ref": "AssistantApiCallCapabilities", -"description": "The call capabilities of this device. go/call-capabilities" -}, -"camera": { -"$ref": "AssistantApiCameraCapabilities", -"description": "These capabilities are scoped to the camera abilities of this device." -}, -"carUxRestrictions": { -"description": "UX restrictions for Auto.", -"items": { -"enum": [ -"UX_RESTRICTIONS_UNSPECIFIED", -"UX_RESTRICTIONS_BASELINE", -"UX_RESTRICTIONS_FULLY_RESTRICTED", -"UX_RESTRICTIONS_NO_KEYBOARD", -"UX_RESTRICTIONS_NO_VIDEO" -], -"enumDescriptions": [ -"This value should not be used.", -"No specific restrictions in place, but baseline distraction optimization guidelines need to be adhered to when CarUxRestrictions.isRequiresDistractionOptimization() returns true.", -"All restrictions are in effect.", -"No text entry for the purpose of searching or other manual text string entry activities.", -"No video - no animated frames > 1fps." -], -"type": "string" -}, -"type": "array" -}, -"cast": { -"$ref": "AssistantApiCastCapabilities", -"description": "These capabilities are scoped to the cast abilities of this device." -}, -"communicationUiCapabilities": { -"$ref": "AssistantApiCommunicationUiCapabilities" -}, -"contactLookupCapabilities": { -"$ref": "AssistantApiContactLookupCapabilities" -}, -"dataValidateCapabilities": { -"$ref": "AssistantApiDataValidateCapabilities" -}, -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "This is the same device id that is specified in the conversation protocol and should be unique to each device/user/model combination. For example, if a request is coming from a watch through AGSA the watch and AGSA should have different device_ids. Note: this field should only be used to determine which device the capabilities belong to and not to access the id of the device. Instead DeviceProperties should be used and accessed through ParamsAccessor." -}, -"deviceUxMode": { -"description": "Capabilities related to Android tablet UX experience.", -"enum": [ -"DEVICE_UX_MODE_DEFAULT", -"DEVICE_UX_MODE_SUPPORT_LIMITED_SHARED_LOCKSCREEN" -], -"enumDescriptions": [ -"", -"Device supports the Shared Lock Screen: a glanceable space that lets anyone in the house use shared apps and the Assistant, without unlocking the device. This is for devices which only support limited SLS, e.g. TKY." -], -"type": "string" -}, -"hasVoiceTelephony": { -"description": "Indicates that the device has connection to cellular network that allows it to make voice calls. This is distinct from device just being capable of voice telephony, because the device can be capable yet miss the suitable SIM card (for example, it could miss SIM card altogether, or have data-only SIM card).", -"type": "boolean" -}, -"jwnCapabilities": { -"$ref": "AssistantApiJwnCapabilities", -"description": "Indicates if the client supports Javascript Whatsnext (go/jwn). Also contains the Jwn libraries present on the client along with their versions." -}, -"lensPerceptionCapabilities": { -"$ref": "AssistantApiLensPerceptionCapabilities", -"deprecated": true, -"description": "Capabilities related to Lens Perception, i.e. image understanding. See go/lens-perception-sdk." -}, -"location": { -"$ref": "AssistantApiLocationCapabilities", -"description": "These capabilities are scoped to the location abilities of this device." -}, -"loggingOnlyData": { -"$ref": "AssistantApiLoggingOnlyData", -"description": "Data which is produced for logging and debugging. Servers MUST NOT use this for any other purposes, such as branching on it." -}, -"messageCapabilities": { -"$ref": "AssistantApiMessageCapabilities" -}, -"movement": { -"$ref": "AssistantApiMovementCapabilities", -"description": "These capabilities are scoped to abilities of the device to move around." -}, -"notificationCapabilities": { -"deprecated": true, -"description": "DEPRECATED: Use SystemNotificationRestrictions instead. Specifies whether the surface is able to display notifications. This field is superficially similar to ProactiveNotificationOutput, but unlike that field which tracks a per-user preference on the OPA side, this field captures whether the surface is capable of displaying notifications.", -"enum": [ -"NO_NOTIFICATION_CAPABILITY", -"NOTIFICATIONS_DISABLED", -"NOTIFICATIONS_ENABLED" -], -"enumDescriptions": [ -"The surface is not able to display notifications.", -"The notifications are disabled on the surface.", -"The notifications are enabled." -], -"type": "string" -}, -"notificationOutputRestrictions": { -"$ref": "AssistantApiNotificationOutputRestrictions", -"description": "Settings, that reflect whether a specific notification type is allowed for current device, e.g. if the user opted out from notification category or category group. This settings are server-side stored and evaluated unlike SystemNotificationRestrictions field." -}, -"outputRestrictions": { -"$ref": "AssistantApiOutputRestrictions", -"description": "These are user configured restrictions indicating what the device is allowed to output from the privacy point of view." -}, -"popOnLockscreenCapability": { -"deprecated": true, -"description": "Capability to support Pop on lockscreen.", -"enum": [ -"POP_ON_LOCKSCREEN_DEFAULT", -"POP_ON_LOCKSCREEN_ENABLED", -"POP_ON_LOCKSCREEN_DISABLED" -], -"enumDescriptions": [ -"Pop will default to enabled on lockscreen.", -"Pop is enabled on lockscreen.", -"Pop is disabled on lockscreen." -], -"type": "string" -}, -"safetyRestrictions": { -"description": "Indicates if the client has safety related restriction.", -"enum": [ -"DEFAULT_NO_SAFETY_RESTRICTION", -"DISTRACTION_SAFETY_RESTRICTION" -], -"enumDescriptions": [ -"The surface has no safety restriction.", -"The surface has safety restriction due to any distraction." -], -"type": "string" -}, -"screen": { -"$ref": "AssistantApiScreenCapabilities", -"description": "These capabilities are scoped to the ability to see and interact with the Assistant through a screen. If the device has no screen it should send an empty ScreenCapabilities. Sending no ScreenCapabilities will cause this to be overridden with the surface default." -}, -"sodaCapabilities": { -"$ref": "AssistantApiSodaCapabilities", -"description": "Capabilities related to SODA (Speech On-Device API)." -}, -"software": { -"$ref": "AssistantApiSoftwareCapabilities", -"description": "These capabilities are scoped to the software available on the device as well as the set of supported Assistant features." -}, -"speechCapabilities": { -"$ref": "AssistantApiSpeechCapabilities", -"deprecated": true, -"description": "DEPRECATED Capabilities related to speech detection on devices." -}, -"supportedLocale": { -"description": "Locales supported by assistant settings for speaking and display. This is independent from device language that is defined in device setting. New locales are added based on rollout, whitelist and app version releases because older versions does not have model support. Currently supported locale list differs by surface type.", -"items": { -"type": "string" -}, -"type": "array" -}, -"surfaceIdentity": { -"$ref": "AssistantApiCoreTypesSurfaceIdentity", -"description": "The set of information that helps the server identify the surface." -}, -"surfaceTypeString": { -"deprecated": true, -"description": "DEPRECATED. surface_identity field below already contains this information. The device's surface type. This is the string version of the assistant.api.core_types.SurfaceType enum. The server should not use this field, rather it should use the SurfaceType value derived from this string.", -"type": "string" -}, -"systemNotificationRestrictions": { -"$ref": "AssistantApiSystemNotificationRestrictions", -"description": "Restrictions related to system-level notifications. This field is superficially similar to ProactiveNotificationOutput, but unlike that field which tracks a per-user preference on the OPA side, this field captures system level notifications restrictions. This field is not stored and is merged to capabilities from conversation params. It exists mostly for logging purposes of android channel state and global app-level notification opt out." -}, -"thirdPartyCapabilities": { -"$ref": "AssistantApiThirdPartyCapabilities", -"description": "Capabilities related to third party integration." -} -}, -"type": "object" -}, -"AssistantApiDuration": { -"description": "A Duration represents a signed, fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". It is related to Timestamp in that the difference between two Timestamp values is a Duration and it can be added or subtracted from a Timestamp. Range is approximately +-10,000 years.", -"id": "AssistantApiDuration", -"properties": { -"nanos": { -"description": "Signed fractions of a second at nanosecond resolution of the span of time. Durations less than one second are represented with a 0 `seconds` field and a positive or negative `nanos` field. For durations of one second or more, a non-zero value for the `nanos` field must be of the same sign as the `seconds` field. Must be from -999,999,999 to +999,999,999 inclusive.", -"format": "int32", -"type": "integer" -}, -"seconds": { -"description": "Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiFeatureSpecificActionSupport": { -"id": "AssistantApiFeatureSpecificActionSupport", -"properties": { -"clarificationDataSupported": { -"description": "Whether client supports clarification suggestion chip to be displayed see |assistant.suggestions.ClarificationData|", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiFitnessFeatureSupport": { -"id": "AssistantApiFitnessFeatureSupport", -"properties": { -"supportedActivities": { -"description": "A list of fitness activity types supported by this client.", -"items": { -"enum": [ -"TYPE_UNSPECIFIED", -"WALK", -"RUN", -"ELLIPTICAL", -"SWIM", -"WEIGHTS", -"TREADMILL", -"BIKE", -"YOGA", -"WORKOUT", -"BOOT_CAMP", -"CIRCUIT_TRAINING", -"GOLF", -"HIKING", -"INTERVAL_TRAINING", -"KICKBOXING", -"MARTIAL_ARTS", -"PILATES", -"SPINNING", -"STAIR_CLIMBING", -"TENNIS", -"AEROBICS", -"CORE_TRAINING", -"DANCING", -"HIGH_INTENSITY_INTERVAL_TRAINING", -"KAYAKING", -"ROWING", -"SKIING", -"STANDUP_PADDLEBOARDING", -"STRENGTH_TRAINING", -"SNOWBOARDING" -], -"enumDescriptions": [ -"The type is unknown.", -"Represents a walking activity type.", -"Represents a running activity type.", -"Represents an elliptical activity type.", -"Represents a swimming activity type.", -"Represents a weightlifting activity type.", -"Represents a treadmill activity type (e.g. walking/running on a treadmill).", -"Represents a cycling activity type.", -"Represents a yoga activity type.", -"Represents a generic workout activity.", -"Represents a bootcamp activity type.", -"Represents a circuit training activity type.", -"Represents a golf activity type.", -"Represents a hiking activity type.", -"Represents an interval training activity type.", -"Represents a kickboxing activity type.", -"Represents a martial arts activity type.", -"Represents a pilates activity type.", -"Represents a spinning(indoor cycling, stationary biking) activity type.", -"Represents a stair climbing activity type.", -"Represents a tennis activity type.", -"Represents an aerobics activity type.", -"Represents a core training activity type.", -"Represents a dancing activity type.", -"Represents a HIIT activity type.", -"Represents a kayaking activity type.", -"Represents a rowing activity type.", -"Represents a skiing activity type.", -"Represents a standup paddleboading(SUP) activity type.", -"Represents a strength training activity type.", -"Represents a snowboarding activity type." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiFluidActionsSupport": { -"id": "AssistantApiFluidActionsSupport", -"properties": { -"stateSyncMethod": { -"description": "Specifies the params proto that Fluid Actions uses to sync state with server.", -"enum": [ -"STATE_SYNC_METHOD_UNSPECIFIED", -"DIALOG_STATE_PARAMS" -], -"enumDescriptions": [ -"Unspecified state sync method.", -"New params proto used to sync state between client and server. New clients must use this value. For proto details, see: http://google3/assistant/api/params/dialog_state_params.proto" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiGacsCapabilities": { -"description": "Capabilities of Google assistant conversation service(GACS) devices. These capabilites including supported GACS actions and response size limitations.", -"id": "AssistantApiGacsCapabilities", -"properties": { -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "DeviceId of the accessory device (eg. watch) Commonly the go/dusi (eg. client_instance_id) is provided." -}, -"responseConfig": { -"$ref": "GoogleAssistantAccessoryV1ResponseConfig", -"description": "Configuration sent by device." -}, -"ttsEncoding": { -"deprecated": true, -"description": "DEPRECATED: Format of TTS audio requested by the device.", -"enum": [ -"LINEAR_16BIT", -"MULAW", -"ALAW", -"MP3", -"MP3_64KBPS", -"SPEEX", -"SPEEX_WITH_HEADER_BYTE", -"SPEEX_IN_OGG", -"OPUS_IN_OGG", -"OPUS_24KBPS_IN_OGG", -"OPUS_16KBPS_IN_OGG", -"OPUS_12KBPS_IN_OGG", -"OPUS_16KBPS_CONTAINERLESS", -"OPUS_24KBPS_CONTAINERLESS", -"OPUS_32KBPS_CONTAINERLESS" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"Signed 16-bit little-endian (a.k.a s16le) linear PCM.", -"G.711 audio companding. https://en.wikipedia.org/wiki/G.711 8-bit \u03bc-law encoding according to G.711", -"8-bit A-law encoding according to G.711.", -"The default MP3 encoding is 32kbps. Text-to-speech audio at this bit rate has audible compression artifacts and is not recommended. Please use OPUS_IN_OGG instead. If you must use mp3, use MP3_64KBPS instead.", -"MP3 at 64kbps sounds nearly identical to uncompressed for 24kHz TTS audio, and sounds similar to OPUS_IN_OGG.", -"Raw speex stream with each frame concatenated together. The size of each frame will need to be communciated separately. Variable bitrate encoding is unsupported using this output type.", -"Similar to SPEEX, this mode will concatenate together the speex frames. However, each frame will have prepended a single byte that describes the length of the following frame.", -"In this case, we'll wrap the speex contents in an ogg container, resulting in a more generally decode-able format but one with more storage overhead.", -"Opus encoded audio wrapped in an ogg container. The result will be a file which can be played natively on Android, and in browsers (at least Chrome and Firefox). The quality of the encoding is considerably higher than MP3 when using the same bitrate. At the moment, this defaults to 32kbps but the TTS team reserves the right to adjust the bitrate to provide the best experience.", -"Opus encoded audio in 24kbps wrapped in an ogg container. The result will be a file which can be played natively on Android, and in browsers while reducing data consumption slightly.", -"The following bitrates for Opus below 24kbps are here only for special usecases; they result in a significant reduction in quality. Please get in touch with the TTS team if you think you might have a valid usecase. NOTE(staz): These bitrates are not officially supported. TTS is to be served at a minimum of 24kbps everywhere, including in NBU countries. See b/137234032#comment13 on why lower bitrates don't help in tangible ways even in NBU markets.", -"", -"Opus encoded audio using the specified bitrate without a container such as Ogg, RTP or WebM. The response is a stream which can be played natively on low powered wearable devices. Delivered as multiple frames per S3 response. These frames need to be carefully byte-delimited (constant bitrate) by the client. They use a statically defined frame duration of 20ms. Frame size per bit-rate: 16kbps: each frame is of size 40 bytes 24kbps: each frame is of size 60 bytes 32kbps: each frame is of size 80 bytes", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiGcmCapabilities": { -"description": "Capabilities related to GCM.", -"id": "AssistantApiGcmCapabilities", -"properties": { -"gcmRegistrationId": { -"description": "GCM registration id for the device. Used to pass messages to the device.", -"type": "string" -}, -"supportsAssistantGcm": { -"description": "Assistant supports GCM on the device. ClientOps can be sent to it over GCM and will be executed.", -"type": "boolean" -}, -"supportsClientInputOverGcm": { -"description": "If it is set to true, then it indicates to server that device is capable of receiving a GCM payload with serialized client input. The client input will be sent back to Assistant Server over conversation protocol.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiGestureCapabilities": { -"description": "The gesture capabilities related to Selina. Next ID: 4", -"id": "AssistantApiGestureCapabilities", -"properties": { -"gestureSensing": { -"description": "Whether Gesture is supported. When false, override the value for tap and omniswipe.", -"type": "boolean" -}, -"omniswipeGestureCapable": { -"description": "Whether omniswipe is supported", -"type": "boolean" -}, -"tapGestureCapable": { -"description": "Whether tap is supported", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiGuestAccessOutput": { -"description": "Access settings for guests.", -"id": "AssistantApiGuestAccessOutput", -"properties": { -"guestAccessOnYoutube": { -"enum": [ -"UNKNOWN_GUEST_ACCESS", -"USE_DEFAULT_ACCOUNT_FOR_GUEST", -"DISABLED_FOR_GUEST" -], -"enumDescriptions": [ -"", -"Guests can access content using linked users' account.", -"No access for guests." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiImmersiveCanvasSupport": { -"id": "AssistantApiImmersiveCanvasSupport", -"properties": { -"confirmationMessageSupported": { -"description": "Whether the client supports confirmation messages in Immersive Canvas actions.", -"type": "boolean" -}, -"pauseSignalSupported": { -"description": "Whether the client support canvas pause signal. If true, the Assistant Server will send a signal when canvas transitioning to pause mode.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiJwnCapabilities": { -"description": "These capabilities are used to determine the jwn libraries and their versions that are present on the client.", -"id": "AssistantApiJwnCapabilities", -"properties": { -"librariesVersionMap": { -"additionalProperties": { -"type": "string" -}, -"description": "The name and version of the jwn libraries currently stored on the client. These are the same that the server communicated when the library was first sent down.", -"type": "object" -}, -"supportedCompressionMode": { -"description": "Compression algorithms supported on the client. Server can choose one of these to compress WhatsNext Javascript programs and libraries.", -"items": { -"enum": [ -"NONE", -"BROTLI", -"FLATE" -], -"enumDescriptions": [ -"jwn_program is not in compressed state.", -"Brotli (google3/third_party/brotli) compression used. Recommended compressor for Javascript.", -"Flate (google3/util/compression/flate) compression used." -], -"type": "string" -}, -"type": "array" -}, -"supportsJwn": { -"description": "Whether the client supports running jwn code.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilities": { -"description": "Capabilities related to Lens Perception, i.e. image understanding. See go/loa-lens-device-capabilities. Next ID: 6", -"id": "AssistantApiLensPerceptionCapabilities", -"properties": { -"hasLensPerception": { -"description": "Whether the device supports Lens Perception.", -"type": "boolean" -}, -"isLensDirectIntentAvailable": { -"description": "Indicates whether Lens supports Lens Direct Intent (go/lensdirectintent).", -"type": "boolean" -}, -"isLensLiveViewfinderAvailable": { -"description": "Indicates whether Lens supports Live view-finder experience.", -"type": "boolean" -}, -"isLensPostCaptureAvailable": { -"description": "Indicates whether Lens supports Post-capture experience with an image payload.", -"type": "boolean" -}, -"lensCapabilities": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilities", -"description": "Contains the capabilities that Lens can support." -} -}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilities": { -"description": "The set of capabilities that Lens can support. This is the Assistant proto representation of Lens capabilities defined at j/c/g/android/apps/gsa/search/shared/service/proto/lens_service_event.proto Next ID: 7", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilities", -"properties": { -"dining": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesDining", -"description": "The presence of this message means that Dining is supported." -}, -"education": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesEducation", -"description": "The presence of this message means that Education is supported." -}, -"outdoor": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesOutdoor", -"description": "The presence of this message means that Outdoor is supported." -}, -"shopping": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesShopping", -"description": "The presence of this message means that Shopping is supported." -}, -"text": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesText", -"description": "The presence of this message means that intenting directly into the text filter is supported." -}, -"translate": { -"$ref": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesTranslate", -"description": "The presence of this message means that Translation is supported." -} -}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilitiesDining": { -"description": "Dining recognition capability. For example popular dishes on a given restaurant menu image.", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesDining", -"properties": {}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilitiesEducation": { -"description": "Education recognition capability.", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesEducation", -"properties": {}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilitiesOutdoor": { -"description": "Outdoor place recognition capability. For example recognizing storefronts.", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesOutdoor", -"properties": {}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilitiesShopping": { -"description": "Shopping recognition capability.", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesShopping", -"properties": {}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilitiesText": { -"description": "Text recognition capability.", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesText", -"properties": { -"isTextToSpeechSupported": { -"description": "Indicates whether text-to-speech is supported.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiLensPerceptionCapabilitiesLensCapabilitiesTranslate": { -"description": "Translation capability.", -"id": "AssistantApiLensPerceptionCapabilitiesLensCapabilitiesTranslate", -"properties": { -"supportedLanguageTags": { -"description": "The list of language IETF BCP 47 tags that are supported. See the full details in the comment on the equivalent field in: http://google3/java/com/google/android/apps/gsa/search/shared/service/proto/lens_service_event.proto;l=55;rcl=355512559", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiLiveTvChannelCapabilities": { -"id": "AssistantApiLiveTvChannelCapabilities", -"properties": { -"channelsByProvider": { -"description": "A list of channel providers each of which provides a list of its channels.", -"items": { -"$ref": "AssistantApiLiveTvChannelCapabilitiesChannelsByProvider" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiLiveTvChannelCapabilitiesChannelsByProvider": { -"id": "AssistantApiLiveTvChannelCapabilitiesChannelsByProvider", -"properties": { -"channels": { -"description": "A list of channels provided by this input. Keep the performance impact in mind when the number/size of the channels is large. When there are too many channels, consider stripping out some data.", -"items": { -"$ref": "AssistantApiLiveTvChannelCapabilitiesLiveTvChannel" -}, -"type": "array" -}, -"inputId": { -"description": "An identifier to identify the input source. For example for TIF based channels, this will be the TIF input ID to differentiate different tuner apps. See https://source.android.com/devices/tv", -"type": "string" -}, -"providerType": { -"description": "Type of provider who provides this channel input.", -"enum": [ -"UNKNOWN_PROVIDER_TYPE", -"OTT_PROVIDER", -"TUNER" -], -"enumDescriptions": [ -"", -"OTT providers providing live tv contents like Sling, Youtube TV.", -"TV Tuner apps providing live tv contents." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiLiveTvChannelCapabilitiesLiveTvChannel": { -"id": "AssistantApiLiveTvChannelCapabilitiesLiveTvChannel", -"properties": { -"channelId": { -"description": "Unique channel identifier.", -"type": "string" -}, -"channelName": { -"description": "A list of channel names and synonyms.", -"items": { -"type": "string" -}, -"type": "array" -}, -"channelNumber": { -"description": "Channel number displayed to user. Optional.", -"type": "string" -}, -"deeplink": { -"description": "A deep link into the Live player app that tunes to this channel.", -"type": "string" -}, -"mid": { -"description": "KG mid of the channel if it exists in KG.", -"type": "string" -}, -"networkMid": { -"description": "Network KG mid of the channel if it exists in KG", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiLiveTvProvider": { -"id": "AssistantApiLiveTvProvider", -"properties": { -"providerInfo": { -"$ref": "AssistantApiCoreTypesProvider", -"description": "Contains detailed provider information such as android app package name." -}, -"providerKey": { -"description": "A provider enum string for OTT providers. The available key can be found in go/ump-provider-enum For Tuner provider, the provider key would be an ID the tuner app uploaded from TIF. See https://source.android.com/devices/tv", -"type": "string" -}, -"providerType": { -"enum": [ -"UNKNOWN_PROVIDER_TYPE", -"OTT_PROVIDER", -"TUNER" -], -"enumDescriptions": [ -"", -"OTT providers providing live tv contents like Sling, Youtube TV.", -"TV Tuner apps providing live tv contents." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiLocationCapabilities": { -"id": "AssistantApiLocationCapabilities", -"properties": { -"gpsAvailable": { -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiLoggingOnlyData": { -"description": "Data which is produced for logging and debugging. Servers MUST NOT use this for any other purposes, such as branching on it. Next ID: 16", -"id": "AssistantApiLoggingOnlyData", -"properties": { -"acpVersion": { -"description": "A user-readable string describing the ACP version (go/acp-version) of the client app used by the user to originate the conversation.", -"type": "string" -}, -"androidId": { -"description": "Random identifier assigned to Android mobile devices. Older logs may have previously stored other kinds of android IDs in this field, but all current logs should use the GServices Id. See go/androidids.", -"format": "int64", -"type": "string" -}, -"appVersion": { -"description": "A user-readable string describing the version of the client app used by the user to originate the conversation.", -"type": "string" -}, -"assistantSettingsSource": { -"description": "An enum specifying when was this ATV AssistantSettings entry initially created.", -"enum": [ -"NOT_SET", -"FIRST_SCREEN_DEVICE_OOBE", -"FIRST_SCREEN_KATNISS_OOBE", -"FIRST_SCREEN_KATNISS_BACKGROUND_LINKING", -"FIRST_SCREEN_DELEGATION_OOBE", -"FIRST_SCREEN_FIXER_JOB", -"FIRST_SCREEN_FCM_JOB", -"FIRST_SCREEN_HOME_GRAPH_JOB", -"FIRST_SCREEN_PERSONAL_BIT", -"FIRST_SCREEN_VOICE_INPUT_BIT", -"FIRST_SCREEN_OTHER", -"SECOND_SCREEN_AGSA", -"SECOND_SCREEN_GHA_IOS", -"SECOND_SCREEN_GHA_ANDROID" -], -"enumDescriptions": [ -"LINT.IfChange", -"Created during ATV Device OOBE(setupwraith)", -"Created within Katniss(Assistant client on TV)", -"Created within Katniss when running cast linking in the background of device setup.", -"Created within Katniss delegation path", -"Created by fix settings job in katniss.", -"Created by FCM jobs in katnisss.", -"Created by home graph jobs in katniss.", -"Created by enable personal bit button in Settings UI", -"Created by enable voice input bit button in Settings UI", -"Created by other parts in katniss", -"Created by AGSA", -"Created by Google Home App IOS", -"Created by Google Home App Android LINT.ThenChange(//depot/google3/logs/proto/assistant/capabilities_log.proto)" -], -"type": "string" -}, -"boardName": { -"description": "The type of board used by manufacturer for this device", -"type": "string" -}, -"boardRevision": { -"description": "The revision of board used", -"type": "string" -}, -"castAssistantSettingLinkingResult": { -"$ref": "AssistantApiCastAssistantSettingLinkingResult", -"description": "This field records the linking status between Assistant setting entry and Cast setting entry. Currently only ATV surface populates this field for profiling purpose." -}, -"deviceModel": { -"description": "A user-readable string describing the device's hardware platform.", -"type": "string" -}, -"embedderBuildInfo": { -"description": "Any relevant info concerning the build options of the embedder (that is the software which runs as the 'driver' of an Assistant library, such as libassistant. the embedder is typically built by a third party)", -"type": "string" -}, -"initialAppVersion": { -"description": "A string recording the app version that is initially used to created this settings entry.", -"type": "string" -}, -"isPrimaryUser": { -"description": "Boolean field to indicate whether a certain user is the primary user of a device or not. This is useful for distinguishing the user0 from other users given user0 might have certain privileges (Eg: Hubmode on Tangor)", -"type": "boolean" -}, -"mdnsDisplayName": { -"description": "default display name of device over mdns. This is specified at the factory, not specified by the user.", -"type": "string" -}, -"platformBuild": { -"description": "A user-readable string describing the device's software platform.", -"type": "string" -}, -"virtualReleaseChannel": { -"deprecated": true, -"description": "A string describing device's release channel. For cast devices, the string will look like \"qa-beta-channel\", \"eng-no-update\", etc.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiMediaControlSupport": { -"id": "AssistantApiMediaControlSupport", -"properties": { -"skipConfirmationsWhilePlaying": { -"description": "Whether to prevent confirmations (text, tts) for media control actions while media is playing so that the media session is not interrupted.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiMessageCapabilities": { -"id": "AssistantApiMessageCapabilities", -"properties": { -"fallbackToTetheredDeviceAppCapabilities": { -"description": "If true, APP_ID queries initiated by this device should fall back to execution on the tethered device if it's available and if the primary device cannot perform the action (e.g. due to the app not being installed).", -"type": "boolean" -}, -"preferTargetingPrimaryDevice": { -"description": "For chat_message.SEND targeting, when either the primary or secondary (tethered) device is capable of handling the chat_message.SEND action, prefer targeting it to the primary device.", -"type": "boolean" -}, -"supportedRecipientTypes": { -"description": "Should only be checked if nonempty.", -"items": { -"enum": [ -"UNSPECIFIED_ENDPOINT", -"PHONE_NUMBER", -"EMAIL_ADDRESS", -"APP_UNIQUE_ID", -"EMERGENCY_PHONE_NUMBER", -"VOICEMAIL" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiMessageSummarySupport": { -"description": "LINT.IfChange(messageSummarySupport)", -"id": "AssistantApiMessageSummarySupport", -"properties": { -"deviceSettingStatus": { -"description": "The current setting status of message summary on current device.", -"enum": [ -"DEVICE_SETTING_STATUS_UNKNOWN", -"DEVICE_SETTING_STATUS_ENABLED", -"DEVICE_SETTING_STATUS_DISABLED" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"lastVoiceOptInFlowTimestamp": { -"description": "Last time that voice opt-in flow has been triggered.", -"format": "google-datetime", -"type": "string" -}, -"readNotificationSummarizationSupported": { -"description": "Whether the client supports message summarization.", -"type": "boolean" -}, -"voiceOptInFlowCounter": { -"description": "The voice consent flow counter on current device.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiMovementCapabilities": { -"id": "AssistantApiMovementCapabilities", -"properties": { -"mobility": { -"description": "Indicates how much the device moves around. E.g., TV has a low mobility level, while Auto has a very high level.", -"enum": [ -"UNSPECIFIED", -"LOW", -"MEDIUM", -"HIGH", -"VERY_HIGH" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiNotificationOutputRestrictions": { -"id": "AssistantApiNotificationOutputRestrictions", -"properties": { -"optOutState": { -"$ref": "AssistantApiNotificationOutputRestrictionsOptOutState" -} -}, -"type": "object" -}, -"AssistantApiNotificationOutputRestrictionsOptOutState": { -"description": "Per category/category group notification opt out settings.", -"id": "AssistantApiNotificationOutputRestrictionsOptOutState", -"properties": { -"categoryGroupState": { -"items": { -"$ref": "AssistantApiNotificationOutputRestrictionsOptOutStateCategoryGroupState" -}, -"type": "array" -}, -"categoryState": { -"items": { -"$ref": "AssistantApiNotificationOutputRestrictionsOptOutStateCategoryState" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiNotificationOutputRestrictionsOptOutStateCategoryGroupState": { -"id": "AssistantApiNotificationOutputRestrictionsOptOutStateCategoryGroupState", -"properties": { -"categoryGroup": { -"enum": [ -"UNSPECIFIED", -"SYSTEM", -"PROMOTIONAL", -"SUBSCRIPTIONS", -"PROACTIVE", -"REMINDERS", -"EXTENDED_ANSWERS", -"FEEDBACK", -"ACTIONS_ON_GOOGLE", -"DUO_MISSED_CALLS", -"HOME_AUTOMATION", -"GETTING_AROUND", -"UNIT_TESTING" -], -"enumDescriptions": [ -"", -"This is required, do not remove. Group for notification categories that should not be displayed at all.", -"Promotional notifications.", -"Notifications for Subscriptions.", -"Proactive notifications", -"Reminders (including assigned reminders).", -"Cross surface answers.", -"Assistant asking for feedback from users.", -"AoG 3P notifications.", -"Missed call notifications from DUO.", -"Home Automation notifications.", -"Getting around notifications.", -"Always keep at the end." -], -"type": "string" -}, -"state": { -"enum": [ -"OPTED_IN", -"OPTED_OUT" -], -"enumDescriptions": [ -"Default value is intentionally set to opted in as all categories/category groups are enabled by default.", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiNotificationOutputRestrictionsOptOutStateCategoryState": { -"id": "AssistantApiNotificationOutputRestrictionsOptOutStateCategoryState", -"properties": { -"category": { -"enum": [ -"UNSPECIFIED", -"SYSTEM_REQUIRED_LOW_PRIORITY", -"SYSTEM_REQUIRED_HIGH_PRIORITY", -"DISCOVERY", -"REALTIME_PROMOTIONAL", -"SUBSCRIPTIONS", -"FLIGHT_UPDATES", -"TRANSPORT_UPDATES", -"BILL_UPDATES", -"PACKAGE_DELIVERY_UPDATES", -"EVENT_UPDATES", -"DUE_DATE_UPDATES", -"CELEBRATION_UPDATES", -"ROUTINE_UPDATES", -"TASK_SUGGESTIONS", -"AT_A_PLACE", -"APP_RECOMMENDATIONS", -"TRAVEL_UPDATES", -"REMINDER_DUE", -"ASSIGNED_REMINDER_DUE", -"ROUTINE_SETTINGS_UPDATES", -"MAPS_OR_DIRECTIONS", -"MOVIE_SHOWTIMES", -"SPORTS_UPDATES", -"NEWS_UPDATES", -"SONGS_AND_ARTISTS", -"TRANSLATIONS", -"ANSWERS_TO_QUESTIONS", -"SETTINGS_LINKS", -"RESERVATION_UPDATES", -"DEPRECATED_FEEDBACK_REQUESTS", -"FEEDBACK_REQUESTS", -"ACTIONS_ON_GOOGLE", -"DUO_MISSED_CALLS", -"HOME_AUTOMATION", -"TIME_TO_LEAVE", -"COMMUTE", -"OCCASIONALLY_REPEATED_ACTIONS", -"FREQUENTLY_REPEATED_ACTIONS", -"ASPIRE", -"ASSISTANT_DRIVING_MODE", -"DISCOVERY_DEFAULT_PRIORITY", -"HOLIDAY_REMINDERS", -"CROSS_DEVICE_TIMER", -"LIVE_CARD", -"ASYNC_ACTION", -"UNIT_TESTING" -], -"enumDescriptions": [ -"", -"Categories required by Assistant & won't be shown on settings page.", -"", -"Promotional discovery notifications.", -"", -"Subscriptions (both event and time based).", -"Proactive notification categories.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Reminder notification categories.", -"We're probably going to deprecate ASSIGNED_REMINDER_DUE.", -"Routine related notification categories.", -"Extended answer (cross device) notification categories.", -"", -"", -"", -"", -"", -"", -"", -"", -"Feedback requests from Assistant.", -"", -"Actions on Google (Third party) notifications.", -"Missed call notifications from DUO.", -"Home Automation related notifications.", -"Getting around notifications.", -"", -"Repeated actions. Having two categories for occasionally and frequently repeated ones since the FREQUENTLY_REPEATED_ACTIONS are buzzing.", -"", -"Assistant Aspire notifications.", -"", -"Discovery notifications that shows in status bar but doesn't make noise.", -"", -"", -"Live cards, these are not real notifications but live cards shown on Home App.", -"These are not really notifications, rather push messages to initiate an asynchronous action.", -"Always keep at the end." -], -"type": "string" -}, -"state": { -"enum": [ -"OPTED_IN", -"OPTED_OUT" -], -"enumDescriptions": [ -"Default value is intentionally set to opted in as all categories/category groups are enabled by default.", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiOemCapabilities": { -"description": "Encapsulates the action capabilities of the OEM device. This data is merged from Device Model lookup, per-device registration, and per-request context. This data is sent to NLU layer for query understanding.", -"id": "AssistantApiOemCapabilities", -"properties": { -"cloudCapability": { -"$ref": "AssistantDevicesPlatformProtoCloudCapability", -"description": "The OEM Cloud execution capability of this device, containing routing details for cloud fulfillment." -}, -"cloudDeviceCapabilities": { -"additionalProperties": { -"description": "Properties of the object.", -"type": "any" -}, -"description": "If fulfillment is done via 3P cloud and 3P supports device capabilities, this field will be set.", -"type": "object" -}, -"deviceModelId": { -"description": "Device Model Id from DeviceModelPackage.", -"type": "string" -}, -"deviceModelRevisionId": { -"description": "Device Model Revision Id from DeviceModelPackage.", -"format": "int64", -"type": "string" -}, -"deviceSpecificData": { -"description": "Opaque supported action data related to a specific domain of devices, for example for car. go/car-talk-registration-model", -"type": "string" -}, -"internalCapability": { -"$ref": "AssistantDevicesPlatformProtoInternalCapability", -"description": "Internal-only config containing metadata about the Device Model, for example to control the ranking behavior." -}, -"thirdPartyActionConfig": { -"$ref": "AssistantApiThirdPartyActionConfig", -"description": "3P Action Metadata, populated from the Device Model lookup and the client request parameters. For example, an Assistant SDK request would have the billed project id of the Assistant request added here in order to enable any Device Actions developed using the same Google Cloud project. This data is sent to Service Engine to mask triggering for Device Actions." -} -}, -"type": "object" -}, -"AssistantApiOnDeviceAssistantCapabilities": { -"description": "Definitions of on-device assistant capabilities.", -"id": "AssistantApiOnDeviceAssistantCapabilities", -"properties": { -"isLocalNetworkArbitrationSupported": { -"description": "Capabilities related to local network arbitration (go/local-network-arbitration). Indicates if the device is capable of being a host device in the LAN whiling doing local network arbitration.", -"type": "boolean" -}, -"isOnDeviceArbitrationSupported": { -"deprecated": true, -"description": "Capabilities related to on-device arbitration(go/arbitration-on-device).", -"type": "boolean" -}, -"isOnDeviceAssistantSupported": { -"description": "Indicates if on-device assistant is enabled on this device. Example usecases: NGA (go/nga) or Marble (go/marble).", -"type": "boolean" -}, -"isOnDeviceUnderstandingSupported": { -"deprecated": true, -"description": "This may be used by NGA. E.g. if understanding happens on device, we can have more aggressive logic when fulfilling some features on the server side, like teleport.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiOnDeviceSmartHomeCapabilities": { -"description": "Definitions of on-device Smart Home capabilities. Next ID: 2", -"id": "AssistantApiOnDeviceSmartHomeCapabilities", -"properties": { -"isOnDeviceSmartHomeSupported": { -"description": "Master bit for on-device Smart Home features.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiOnDeviceStorageCapabilities": { -"description": "The on-device storage capabilities found on the device.", -"id": "AssistantApiOnDeviceStorageCapabilities", -"properties": { -"isSupported": { -"description": "Determines if an on-device storage is supported.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiOutputRestrictions": { -"description": "These are user configurable permissions representing what the device is allowed to output. Next ID: 11", -"id": "AssistantApiOutputRestrictions", -"properties": { -"googlePhotoContent": { -"description": "The type of Google Photo content which the device can output.", -"enum": [ -"ALL_PHOTO_CONTENT", -"NO_RESTRICTED_PHOTO_CONTENT" -], -"enumDescriptions": [ -"All content can be output, including contents from restricted categories (e.g. violent, racy).", -"Only content from non-restricted categories can be output." -], -"type": "string" -}, -"guestAccessOutput": { -"$ref": "AssistantApiGuestAccessOutput", -"description": "Access settings for guests." -}, -"personalData": { -"description": "The level of personal data which the device can output. See go/personal-readout for detail.", -"enum": [ -"PERSONAL_DATA_OUTPUT_UNKNOWN", -"ALL_PERSONAL_DATA_WITH_PROACTIVE", -"ALL_PERSONAL_DATA", -"NO_PERSONAL_DATA" -], -"enumDeprecated": [ -false, -true, -false, -false -], -"enumDescriptions": [ -"UNKNOWN.", -"DO NOT USE. This field has been deprecated.", -"All personal data can be output.", -"No personal data can be output." -], -"type": "string" -}, -"proactiveNotificationOutput": { -"description": "This controls if the server can proactively send notification to users, and it does not affect scenarios that users ask for information. The notification may include TTS and lights. It could be only lights for chirp.", -"enum": [ -"UNKNOWN_PROACTIVE_NOTIFICATION", -"NO_PROACTIVE_NOTIFICATION", -"ALL_PROACTIVE_NOTIFICATIONS" -], -"enumDescriptions": [ -"The setting for proactive notification is unknown", -"No proactive notification can be output", -"All proactive notifications can be output" -], -"type": "string" -}, -"proactiveOutput": { -"$ref": "AssistantApiProactiveOutput", -"description": "Restrictions on displaying and interacting with content on proactive surfaces (e.g. Dragonglass home screen). Note: NEVER access this field of OutputRestrictions directly, use the code in assistant/assistant_server/settings/device/device_settings_util.h instead." -}, -"youtubeAutoplayRestriction": { -"description": "Whether YouTube autoplay is allowed for queries from the user to this device. See go/assistant-youtube-settings for details.", -"enum": [ -"AUTOPLAY_RESTRICTION_UNSPECIFIED", -"AUTOPLAY_ALLOWED", -"AUTOPLAY_DISABLED" -], -"enumDescriptions": [ -"", -"Autoplay can be applied to youtube queries to the device from this user", -"Autoplay must not be enabled for youtube queries to the device from this user" -], -"type": "string" -}, -"youtubeContent": { -"description": "The type of YouTube content which the device can output.", -"enum": [ -"ALL_YOUTUBE_CONTENT", -"NO_RESTRICTED_CONTENT" -], -"enumDescriptions": [ -"All content can be output.", -"No restricted content can be output." -], -"type": "string" -}, -"youtubeTvContent": { -"description": "The type of YouTube TV content which the device can output.", -"enum": [ -"ALL_YOUTUBE_TV_CONTENT", -"NO_RESTRICTED_YOUTUBE_TV_CONTENT" -], -"enumDescriptions": [ -"All YouTube TV content can be output.", -"No restricted YouTube TV content can be output." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiProactiveOutput": { -"description": "Next ID: 7", -"id": "AssistantApiProactiveOutput", -"properties": { -"allowAllPersonalData": { -"description": "Allows displaying all personal data on proactive surfaces with no face match capability.", -"type": "boolean" -}, -"androidTvAssistantSettingsSource": { -"description": "For ANDROID_TV devices, the location that this setting was last changed from. Note: this structure allows to extend to more per-vertical bits in the future.", -"enum": [ -"NOT_SET", -"FIRST_SCREEN_DEVICE_OOBE", -"FIRST_SCREEN_KATNISS_OOBE", -"FIRST_SCREEN_KATNISS_BACKGROUND_LINKING", -"FIRST_SCREEN_DELEGATION_OOBE", -"FIRST_SCREEN_FIXER_JOB", -"FIRST_SCREEN_FCM_JOB", -"FIRST_SCREEN_HOME_GRAPH_JOB", -"FIRST_SCREEN_PERSONAL_BIT", -"FIRST_SCREEN_VOICE_INPUT_BIT", -"FIRST_SCREEN_OTHER", -"SECOND_SCREEN_AGSA", -"SECOND_SCREEN_GHA_IOS", -"SECOND_SCREEN_GHA_ANDROID" -], -"enumDescriptions": [ -"LINT.IfChange", -"Created during ATV Device OOBE(setupwraith)", -"Created within Katniss(Assistant client on TV)", -"Created within Katniss when running cast linking in the background of device setup.", -"Created within Katniss delegation path", -"Created by fix settings job in katniss.", -"Created by FCM jobs in katnisss.", -"Created by home graph jobs in katniss.", -"Created by enable personal bit button in Settings UI", -"Created by enable voice input bit button in Settings UI", -"Created by other parts in katniss", -"Created by AGSA", -"Created by Google Home App IOS", -"Created by Google Home App Android LINT.ThenChange(//depot/google3/logs/proto/assistant/capabilities_log.proto)" -], -"type": "string" -}, -"healthAndFitnessProactive": { -"description": "Allows displaying Health and Fitness content on proactive surfaces. This is a sub bit of the device-wide PR bit - the device-wide PR bit must be enabled AND this vertical sub bit must be enabled for H&F content to be shown. This bit will be available on all surfaces that have the proactive-bit enabled. If the proactive-bit is not enabled, then we do not show health and fitness content at all (or even allow access to this setting).", -"enum": [ -"NO_HEALTH_AND_FITNESS_PROACTIVE_OUTPUT", -"ALL_HEALTH_AND_FITNESS_PROACTIVE_OUTPUT" -], -"enumDescriptions": [ -"No health and fitness proactive content can be output", -"All health and fitness proactive content can be output" -], -"type": "string" -}, -"photosProactive": { -"description": "Allows displaying photos content on Dragonglass proactive surfaces. This is a sub bit of the device-wide PR bit - the device-wide PR bit must be enabled AND this vertical sub bit must be enabled for photos content to be shown on Dragonglass surfaces. This bit will be available on all Dragonglass surfaces that have the proactive-bit enabled. If the proactive-bit is not enabled or it's not a Dragonglass surface, then we do not show proactive photos content at all, nor allow access to this setting. See go/opa-photos-sg-settings for more details.", -"enum": [ -"UNKNOWN_PHOTOS_PROACTIVE_OUTPUT", -"NO_PHOTOS_PROACTIVE_OUTPUT", -"ALL_PHOTOS_PROACTIVE_OUTPUT" -], -"enumDescriptions": [ -"", -"No photos proactive content can be output. This indicates that user has explicitly disabled photos proactive settings.", -"All photos proactive content can be output. This indicates that user has explicitly enabled photos proactive settings." -], -"type": "string" -}, -"userMatchProactive": { -"description": "Settings for displaying personal data on proactive surfaces with face match capability.", -"enum": [ -"UNKNOWN_USER_MATCH_PROACTIVE", -"NEVER_SHOW", -"ONLY_SHOW_ON_USER_MATCH", -"ALWAYS_SHOW" -], -"enumDescriptions": [ -"", -"Never show proactive content.", -"Only show proactive content on Face Match.", -"Always show proactive content." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiProtobuf": { -"description": "Also known as \"Extensions Without Extensions\" or \"The Poor Man's Any\", this simple proto is used to transmit arbitrary protocol buffers over the wire. Unlike extensions: - It does not require the proto type to be compiled into the binary. (Useful so that the proto declaration can be inside the conversation package) - It is compatible with all versions of proto, including proto3 and the wack-tastic version used on ChromecastOS. Server libraries for dealing with it live in google3/assistant/protocol/protobuf_lib.h.", -"id": "AssistantApiProtobuf", -"properties": { -"protobufData": { -"description": "The serialized protocol buffer.", -"format": "byte", -"type": "string" -}, -"protobufType": { -"description": "The type of the protocol buffer to use. This must be a resolvable name (Namespace.ProtoName) and refer to a proto which is either compiled in to both client and server (e.g. a base proto type) or to one which is part of the conversation package.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiRecurrence": { -"description": "Date-based recurrences specify repeating events. Conceptually, a recurrence is a (possibly unbounded) sequence of dates on which an event falls, described by a list of constraints. A date is in a recurrence if and only if it satisfies all of the constraints. Note that devices may support some constraints, but not all.", -"id": "AssistantApiRecurrence", -"properties": { -"begin": { -"$ref": "AssistantApiDate", -"description": "The first day of the recurrence. If begin is not set, then the reminder will start infinitely in the past." -}, -"blacklistedRanges": { -"description": "A list of blacklisted dates to skip the alarm on.", -"items": { -"$ref": "AssistantApiDateTimeRange" -}, -"type": "array" -}, -"dayOfMonth": { -"description": "Specifies the date in a month. For example, if day_of_month is 15, then it represent the 15th day of the specified month.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"dayOfWeek": { -"description": "Specifies a weekly or daily recurrence. Constraint: The date falls on one of these days of the week, in 0...6 (Sunday...Saturday).", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"end": { -"$ref": "AssistantApiDate", -"description": "The last day of the recurrence." -}, -"every": { -"description": "Multiplier on the frequency of the recurrence. Use this to specify patterns that recur every X days, months, years, etc. Example: [remind me to call mom every 2nd week]. Default is 1 (every day, every month, every year).", -"format": "int32", -"type": "integer" -}, -"monthOfYear": { -"description": "Specifies the month in a year. Constrain: the month falls on one of these months, in 1, 2, ... 12 (January...December).", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"numOccurrences": { -"description": "The number of occurrences after which the recurrence should end.", -"format": "int32", -"type": "integer" -}, -"weekOfMonth": { -"description": "Specifies the index of week in a month. For example, the second Tuesday every month, in this case, week_of_month should be 2.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiRobinCapabilities": { -"description": "Used to describe the capabilities and params of Robin Client. Field will be populated from Client or backfilled in SAL only for Robin client.", -"id": "AssistantApiRobinCapabilities", -"properties": { -"enabledExtensions": { -"description": "List of enabled Robin extensions.", -"items": { -"type": "string" -}, -"type": "array" -}, -"robinStatus": { -"$ref": "AssistantApiRobinCapabilitiesRobinStatus" -}, -"supportedRobinOps": { -"description": "List of supported RobinOps.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiRobinCapabilitiesRobinStatus": { -"description": "Current Robin status of the client.", -"id": "AssistantApiRobinCapabilitiesRobinStatus", -"properties": { -"available": { -"$ref": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusAvailable" -}, -"notAvailable": { -"$ref": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusNotAvailable" -}, -"optedIn": { -"$ref": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusOptedIn" -}, -"optedOut": { -"$ref": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusOptedOut" -} -}, -"type": "object" -}, -"AssistantApiRobinCapabilitiesRobinStatusRobinStatusAvailable": { -"description": "Robin is available and can be enabled by the user.", -"id": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusAvailable", -"properties": {}, -"type": "object" -}, -"AssistantApiRobinCapabilitiesRobinStatusRobinStatusNotAvailable": { -"description": "Robin is not available and can not be enabled by the user.", -"id": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusNotAvailable", -"properties": { -"reasons": { -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiRobinCapabilitiesRobinStatusRobinStatusOptedIn": { -"description": "Robin is available and the user opted in already.", -"id": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusOptedIn", -"properties": {}, -"type": "object" -}, -"AssistantApiRobinCapabilitiesRobinStatusRobinStatusOptedOut": { -"description": "Robin would be available, but the user explicitly opted out.", -"id": "AssistantApiRobinCapabilitiesRobinStatusRobinStatusOptedOut", -"properties": {}, -"type": "object" -}, -"AssistantApiScreenCapabilities": { -"description": "These capabilities represent the tactile features associated with the device. This includes, for example, whether the device has a screen, how big the screen is, and privacy of the screen. Next ID: 11", -"id": "AssistantApiScreenCapabilities", -"properties": { -"fontScaleFactor": { -"description": "The scale factor used to convert Scalable Pixel (SP) units to Density-independent Pixel (DP) units (DP = SP * scale factor). Fonts are measured in units of SP, and on some platforms such as Android the SP to DP scale factor can be affected by the font size a user selects in accessibility settings.", -"format": "float", -"type": "number" -}, -"inputType": { -"description": "The types of input that this screen supports. Note that this can be empty in which case the screen's input type is unknown.", -"items": { -"enum": [ -"TOUCHSCREEN" -], -"enumDescriptions": [ -"The device has a screen that support touch input." -], -"type": "string" -}, -"type": "array" -}, -"mask": { -"$ref": "AssistantApiScreenCapabilitiesMask", -"description": "Mask defined for this device, if any." -}, -"protoLayoutTargetedSchema": { -"$ref": "AssistantApiScreenCapabilitiesProtoLayoutVersion", -"description": "The targeted schema version for ProtoLayout requests." -}, -"resolution": { -"$ref": "AssistantApiScreenCapabilitiesResolution", -"description": "If this field is absent, the resolution of the screen is unknown." -}, -"screenOff": { -"description": "If screen is turned off.", -"type": "boolean" -}, -"screenStateDetection": { -"description": "The ability of the client to correctly report screen state.", -"enum": [ -"UNKNOWN_SCREEN_STATE_DETECTION", -"UNRELIABLE_SCREEN_STATE_DETECTION", -"RELIABLE_SCREEN_STATE_DETECTION" -], -"enumDescriptions": [ -"", -"The client reports screen state but may be unreliable based on different remotes/inputs used to turn the screen off. Example: Android TVs with soundbars.", -"" -], -"type": "string" -}, -"supportedRenderingFormat": { -"description": "The primary supported rendering format for display on the device's screen. This may be used to determine what format of card to be returned when rendering cards.", -"enum": [ -"UNKNOWN_RENDERING_FORMAT", -"CONCISE_TEXT", -"PROTO_LAYOUT", -"ELEMENTS" -], -"enumDescriptions": [ -"", -"Concise Text go/concise-text-responses", -"Proto Layout, the layout for Proto Tiles go/prototiles", -"Elements, the layout for xUIKit go/xuikit" -], -"type": "string" -}, -"supportedScreenStates": { -"description": "The screen states that the client supports. The current screen state is specified in DeviceProperties.screen.", -"items": { -"enum": [ -"UNKNOWN_SCREEN_STATE", -"ON", -"OFF" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"visionHelpEnabled": { -"description": "Whether the device enabled vision help features in accessibility settings. The settings is config in Assistant App and on-device settings, and stored in footprints. When enabled, font, color and TTS will be adjusted.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiScreenCapabilitiesMask": { -"description": "A mask applied to the screen's pixel space to determine regions not visible on the physical device.", -"id": "AssistantApiScreenCapabilitiesMask", -"properties": { -"type": { -"enum": [ -"UNKNOWN_MASK", -"NO_MASK", -"ROUND_MASK" -], -"enumDescriptions": [ -"Default value.", -"Used for devices with no mask. In this case, every point in the region defined by width_px and height_px corresponds to a physical, visible pixel. This is distinct from rectangular screens that DO have regions with no pixels, e.g. an inset or cutout that occludes one or more sides of the screen.", -"Used for devices with oval or circular screens. The vertical and horizontal resolution of the screen determines the vertical and horizontal diameters of the oval. Pixels outside this oval region are non-existent or physically occluded." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiScreenCapabilitiesProtoLayoutVersion": { -"description": "Version info for ProtoLayout requests.", -"id": "AssistantApiScreenCapabilitiesProtoLayoutVersion", -"properties": { -"major": { -"format": "uint32", -"type": "integer" -}, -"minor": { -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiScreenCapabilitiesResolution": { -"description": "A Resolution proto indicates the size of the application window. All fields are required.", -"id": "AssistantApiScreenCapabilitiesResolution", -"properties": { -"dpi": { -"description": "Dots (pixels) per inch of the screen.", -"format": "int32", -"type": "integer" -}, -"heightPx": { -"format": "int32", -"type": "integer" -}, -"mSize": { -"description": "m_size is the smallest square box size to display a capital letter M so that the user can still easily understand it.", -"format": "int32", -"type": "integer" -}, -"nengSize": { -"description": "neng_size is the smallest square box size to display a letter \u879a (Neng, U+879A) so that the user can easily understand it. (Neng is a visually dense Chinese letter, and so may require a larger box than an M.)", -"format": "int32", -"type": "integer" -}, -"widthPx": { -"description": "The dimensions of the application window, in pixels.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiSelinaCapabilites": { -"description": "The Soli capabilities on Elaine, including gestures and sleep sensing. go/dingo-dc-software Next ID: 4", -"id": "AssistantApiSelinaCapabilites", -"properties": { -"gestureCapabilities": { -"$ref": "AssistantApiGestureCapabilities", -"description": "A list of gestures that selina supports" -}, -"selinaSupported": { -"description": "Whether the client supports selina.", -"type": "boolean" -}, -"sleepSensingSupported": { -"description": "Whether the client can monitor sleep. This allows us to show sleep CUJ related information: go/TwilightDesign", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsAmbientSettings": { -"id": "AssistantApiSettingsAmbientSettings", -"properties": { -"anyUserHasSetPersonalPhotos": { -"description": "Whether any user sets personal photos on this device. See go/ambient-setting-in-assistant-design.", -"type": "boolean" -}, -"recentHighlightsEnabled": { -"description": "Whether or not the user's current selection for their ambient photo frame includes the auto-generated \"Recent Highlights\" album. This is used to determine which users to display the go/opa-photos-memories-tile. See go/opa-photo-memories-imax-optin for more discussion on why this bit was created.", -"type": "boolean" -}, -"showPersonalPhotoData": { -"description": "Whether to enable the personal photo data in the ambient settings: https://screenshot.googleplex.com/Wd4OFkQfOyF See go/opa-photos-ambient-location-date-dd#heading=h.5x4iaouuiett for explanation.", -"type": "boolean" -}, -"showPersonalPhotos": { -"description": "Whether current user sets personal photos on this device. See go/ambient-setting-in-assistant-design.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsAppCapabilities": { -"description": "These capabilities are associated with Assistant Settings on devices.", -"id": "AssistantApiSettingsAppCapabilities", -"properties": { -"carSettingsCapabilities": { -"$ref": "AssistantApiCarSettingsCapabilities", -"description": "Capabilities that are associated with Assistant Settings on auto surfaces." -}, -"reissueQueryAfterMusicSetup": { -"description": "Whether the client supports reissuing query after setting up in Music Settings.", -"type": "boolean" -}, -"supportsPaymentsSettingsUpdate": { -"description": "Whether the client supports updating payments setting.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsAutoFramingSettings": { -"description": "Settings pertaining to auto framing. See go/auto-framing-presentation.", -"id": "AssistantApiSettingsAutoFramingSettings", -"properties": { -"isAutoFramingEnabled": { -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsCarrierCallDeviceSettings": { -"description": "Carrier related call settings on the device.", -"id": "AssistantApiSettingsCarrierCallDeviceSettings", -"properties": { -"allowIncomingCalls": { -"deprecated": true, -"description": "Whether this device is allowed to receive incoming PSTN calls.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsCommunicationsFilter": { -"description": "Specification of which communication features can be used.", -"id": "AssistantApiSettingsCommunicationsFilter", -"properties": { -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL", -"BLOCK_CALLS_AND_MESSAGES" -], -"enumDescriptions": [ -"", -"Allow all communication features.", -"Allow all communication features, with the exception of calls and messages." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsDeviceDowntimeSettings": { -"description": "Specification of times when most features on a device are disabled for certain users. During these periods, the device will respond to most interactions with something like \"sorry, I'm sleeping right now\". Design: go/home-ft-settings-storage PRD: go/home-family-tools-prd", -"id": "AssistantApiSettingsDeviceDowntimeSettings", -"properties": { -"schedules": { -"items": { -"$ref": "AssistantApiSettingsLabeledDowntimeSchedule" -}, -"type": "array" -}, -"targets": { -"description": "The set of users of this device that will have these downtime settings applied. Must have at least one element.", -"items": { -"enum": [ -"UNKNOWN_DEVICE_SUPERVISION_TARGET", -"EVERYONE", -"KID_ACCOUNTS", -"GUESTS" -], -"enumDescriptions": [ -"", -"All people that use the device.", -"Users with a Unicorn account that are voice-matched to the device.", -"Users interacting with the device without being voice matched. We cannot identify these users and don't know their ages." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsDeviceFeatureFilters": { -"description": "Defines a set of restrictions on particular device features for a certain set of users. Design: go/home-ft-settings-storage PRD: go/home-family-tools-prd", -"id": "AssistantApiSettingsDeviceFeatureFilters", -"properties": { -"enabled": { -"description": "Enables/disables all the filters at the same time. For new devices or non-Cast devices this is always false.", -"type": "boolean" -}, -"featureFilters": { -"$ref": "AssistantApiSettingsFeatureFilters", -"description": "The filters (feature restrictions) to apply when `enabled` is true." -}, -"targets": { -"description": "The set of users of this device that will have these settings applied. Must have at least one element.", -"items": { -"enum": [ -"UNKNOWN_DEVICE_SUPERVISION_TARGET", -"EVERYONE", -"KID_ACCOUNTS", -"GUESTS" -], -"enumDescriptions": [ -"", -"All people that use the device.", -"Users with a Unicorn account that are voice-matched to the device.", -"Users interacting with the device without being voice matched. We cannot identify these users and don't know their ages." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsDeviceLogsOptIn": { -"id": "AssistantApiSettingsDeviceLogsOptIn", -"properties": { -"optInEnabled": { -"description": "Indicates whether the crash logs can be uploaded and the device logs can be enabled", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsDeviceSettings": { -"description": "Next ID: 73", -"id": "AssistantApiSettingsDeviceSettings", -"properties": { -"ackStatus": { -"description": "LINT.ThenChange(//depot/google3/assistant/ui/assistant_device_settings_ui.proto)", -"enum": [ -"ACK_COMPLETED", -"ACK_PENDING" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"address": { -"description": "A human-readable address string for the location; generally a one-line address such as \"34 Masonic Ave, San Francisco CA 94117, United States\". Set this field to empty string for deletion, in which case the rest of the location related fields below will be cleared as well.", -"type": "string" -}, -"aliasName": { -"description": "The alias names of the device, e.g. my living room tv, tv, living room and etc., which user will usually use to refer to the device in addition to human_friendly_name. It can help speech biasing and query understanding. This field is set by the user and already localized.", -"items": { -"type": "string" -}, -"type": "array" -}, -"allowIncomingCalls": { -"description": "Whether this device is allowed to receive incoming calls.", -"type": "boolean" -}, -"ambientSettings": { -"$ref": "AssistantApiSettingsAmbientSettings", -"description": "Ambient settings contains the configuration of Photo Frame on DG device. This field relies on IMAX service to do the update, sync happenes after user updates IMAX device settings or a device registers in CloudCastDevice. So it's more like a cached version instead of definitive source-of-truth. More details at go/ambient-setting-in-assistant-design." -}, -"ancillaryDeviceId": { -"$ref": "AssistantApiSettingsInternalAncillaryDeviceId", -"description": "The additional device ids. Currently used only for ATV. go/project-yellowstone Note: This field is for internal (Within settings) use only." -}, -"autoFramingSettings": { -"$ref": "AssistantApiSettingsAutoFramingSettings", -"description": "Auto framing settings associated with a device. See go/auto-framing-presentation." -}, -"blueSteelEnabled": { -"description": "Indicates whether the user has enabled Blue Steel. See go/blue-steel for more info on this project.", -"type": "boolean" -}, -"capabilities": { -"$ref": "AssistantApiDeviceCapabilities", -"description": "Describes roughly what a device is capable of doing and metadata around those capabilities. Note: this includes device limitations as well as user configurable settings." -}, -"city": { -"description": "city and postal_code are sent to third party AoG Apps as location when permission is granted for precise or coarse location. https://developers.google.com/actions/reference/rest/Shared.Types/Permission city and postal_code have the same description as in Proto Postal Address: https://cs.corp.google.com/piper///depot/google3/location/country/postaladdress.proto city corresponds to locality_name, postal_code corresponds to postal_code_number. These two fields are set in assistant_settings_service by AddressConverter. https://cs.corp.google.com/piper///depot/google3/location/addressformatter/public/addressconverter.h See go/aog-i18n-address-parse for more information", -"type": "string" -}, -"colocationStatus": { -"description": "Status of colocation. go/co-location-work-v2 Note: this is a cache at the Assistant level. The source of truth is inside CastAuthenticationServer, which is only used for Home devices.", -"enum": [ -"COLOCATION_UNKNOWN", -"COLOCATION_ESTABLISHED", -"COLOCATION_NOT_ESTABLISHED", -"COLOCATION_NOT_SUPPORTED" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"creationTimestampMs": { -"description": "The timestamp that the device is linked with the user in milliseconds.", -"format": "int64", -"type": "string" -}, -"crossSurfaceAvailability": { -"$ref": "AssistantApiSettingsDeviceSettingsCrossSurfaceAvailability", -"description": "Availability of this device for Assistant Cross-surface handoffs. (go/assistant-cross-surface)" -}, -"defaultAudioDeviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "The identification of the default device which user want to output audio. See go/default-media-output-design for more info." -}, -"defaultVideoDeviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "The identification of the default device which user want to output video. Note that, we don't fallback to this for audio playback when default_audio_device_id is not set. See go/default-media-output-design for more info." -}, -"deviceBrand": { -"description": "The brand of the device, populated from DeviceOemParams. Examples: \"google\", \"samsung\".", -"type": "string" -}, -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "The identification of the device." -}, -"deviceModelId": { -"description": "The model ID of the device. This should be globally unique across manufactures/OEMs. Examples: \"nest_cam_iq_2017\", \"comcast_voice_box_2017\".", -"type": "string" -}, -"deviceModelRevision": { -"description": "The Device Platform Service lookup revision. (go/device-model-revision) For 1p devices, and most 3p devices with no custom feature, this should be always 0, which means no lookup needed. For 3p devices with custom assistant feature, this is provided directly by OEM as incremental (e.g. 1, 2, 3, ...)", -"format": "int32", -"type": "integer" -}, -"dusi": { -"description": "Only valid for ATV. Stores the android DUSI for the corresponding user. More details: go/auto-logout-on-unlink.", -"type": "string" -}, -"faceEnrollmentErrors": { -"description": "List of errors that happened during the face enrollment process if it failed. See go/face-match-enrollment-error for more info.", -"items": { -"enum": [ -"UNKNOWN_FACE_ENROLLMENT_ERROR", -"MISSING_FRONTAL_POSE", -"FACE_NOT_DETECTED", -"MULTIPLE_FACE_DETECTED", -"FACE_WITHOUT_SIGNATURE", -"FACE_DETECTION_LOW_CONFIDENCE", -"FACE_LANDMARK_LOW_CONFIDENCE", -"FACE_WITHOUT_CONFIDENCE", -"FACE_TOO_SMALL", -"FAILED_TO_READ_IMAGE", -"FAILED_TO_DECODE_IMAGE", -"FACE_DETECTION_ERROR", -"FACE_WITHOUT_EMBEDDING_CONFIDENCE" -], -"enumDescriptions": [ -"", -"The gallery images don't have the required frontal pose properties.", -"Face was NOT detected in an image.", -"Multiple faces detected in an image.", -"Cannot create signature for a detected face.", -"Face detection confidence score is too low.", -"Face detection landmark confidence score is too low.", -"Face detection contains no confidence score, which is invalid.", -"Face detection was too small for enrollment.", -"Unable to read the image file from the gallery.", -"Unable to decode the jpeg image.", -"Unable to run face detector on the decoded image.", -"Face detection contains no confidence score for embedding." -], -"type": "string" -}, -"type": "array" -}, -"faceEnrollmentStatus": { -"description": "Indicates whether the user's face has been successfully enrolled on this device. See go/face-match-server-design for more info.", -"enum": [ -"UNKNOWN_STATUS", -"SUCCESS", -"FAILURE", -"PENDING" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"faceMatchEnabled": { -"description": "Indicates whether the user has enabled Face Match for this device. See go/face-match-server-design for more info on this project.", -"type": "boolean" -}, -"gcmSettings": { -"$ref": "AssistantApiSettingsGcmSettings", -"deprecated": true, -"description": "Stores GCM info associated with a device. See go/quartz-design-doc for more info." -}, -"homeGraphData": { -"$ref": "AssistantApiSettingsHomeGraphData", -"description": "Holds the data that should be written to HomeGraph. Note: this field is not persisted in Assistant Settings storage. It is simply used for transporting data when client calls UpdateSettings." -}, -"homeGraphId": { -"description": "The home graph ID that can be used to lookup the corresponding entry in HomeGraph. go/home-graph. Note: when this field is empty, it doesn't necessarily mean that the device is not in home graph. It just means that Assistant doesn't know about the mapping.", -"type": "string" -}, -"hospitalityModeStatus": { -"$ref": "AssistantApiSettingsHospitalityMode", -"deprecated": true, -"description": "Indicates whether the device is currently in Hospitality mode. go/hospitality-mode-design. This is moved to a per user setting in assistant settings. ref. go/hospitality-settings-v2" -}, -"hotwordSensitivity": { -"description": "The level of hotword sensitivity. go/hotword-sensitivity-prd", -"enum": [ -"UNKNOWN_HOTWORD_SENSITIVITY", -"HOTWORD_SENSITIVITY_LOW", -"HOTWORD_SENSITIVITY_NORMAL", -"HOTWORD_SENSITIVITY_HIGH", -"HOTWORD_SENSITIVITY_LOW_2", -"HOTWORD_SENSITIVITY_HIGH_2" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"hotwordThresholdAdjustmentFactor": { -"$ref": "AssistantApiSettingsHotwordThresholdAdjustmentFactor", -"description": "HotwordThresholdAdjustmentFactor contains threshold_adjustment_factor, and it's validity. TAF is a measure of adjustment applied to the hotword threshold as a result of go/autotune. Currently, this is updated from query_settings_frame, but if we move to updating it from the client, this could also contain TAFs as a result of Hotword Sensitivity, in addition to Autotune." -}, -"humanFriendlyName": { -"description": "The human-friendly name of the cast device, e.g., my living room tv. This field is set by the user and already localized.", -"type": "string" -}, -"internalVersion": { -"$ref": "AssistantApiSettingsInternalVersion", -"description": "Internal version of the DeviceSettings for measurement of the DeviceSettings mutation race conditions. See go/consistent-assistant-settings-update." -}, -"isCloudSyncDevice": { -"deprecated": true, -"description": "Indicates whether the device is also managed through HA cloud sync. go/ha-dev-guide", -"type": "boolean" -}, -"isDeviceActivationCacheEnabled": { -"description": "When true, the user has explicitly allowed audio and visual data collection on this device", -"type": "boolean" -}, -"kidsMode": { -"$ref": "AssistantApiSettingsKidsMode", -"deprecated": true, -"description": "Specifies if kids-mode is enabled for the device. See go/aff-parentalsupervision-dd." -}, -"lastCastRegistrationTimestamp": { -"description": "Device's latest registration timestamp provided by Cast side. This field is not necessarily up to date. The update frequency is defined in last_registration_update_frequency_in_days field of AssistantConfig in java/com/google/chrome/dongle/common/proto/home_assistant_config.proto. go/cast-last-registration-time", -"format": "google-datetime", -"type": "string" -}, -"lastUsedCoarseTimestamp": { -"description": "Coarsened hourly timestamp of when the device was last used.", -"format": "google-datetime", -"type": "string" -}, -"linkedDeviceId": { -"description": "Stores pairing between different devices. See go/quartz-design-doc for more info.", -"items": { -"$ref": "AssistantApiCoreTypesDeviceId" -}, -"type": "array" -}, -"linkedUsers": { -"deprecated": true, -"description": "Please do NOT use this field without prior approval from PWG. Users who have signed in onto this device, go/linked-users-in-pkg.", -"items": { -"$ref": "AssistantApiSettingsLinkedUser" -}, -"type": "array" -}, -"locale": { -"description": "The locale for the device: language + region, i.e., en-US, ja-JP.", -"type": "string" -}, -"locationCoordinates": { -"$ref": "AssistantApiCoreTypesLocationCoordinates", -"description": "Coordinate information of the device location." -}, -"locationFeature": { -"$ref": "GeostoreFeatureProto", -"description": "The feature proto of the location of the device. Note: client does not need to populate this. It will be auto-populated based on \"address\" field on server side. Currently, only \"bound\" and \"type\" are persisted, since the entire FeatureProto is too big." -}, -"marketplaceDisclosure": { -"$ref": "AssistantApiSettingsMarketplaceDisclosure", -"deprecated": true, -"description": "See go/marketplace-disclosure for more info." -}, -"masqueradeMode": { -"$ref": "AssistantApiSettingsMasqueradeMode" -}, -"notificationProfile": { -"$ref": "AssistantApiSettingsNotificationProfile", -"deprecated": true, -"description": "Information about how to send the user a notification. This won't be populated for fb-conv users (allo group chat users)." -}, -"oauthClientId": { -"description": "OAuth client id for the device. This field is available for Assistant SDK devices. It is written when the device is registered to the user (AssistantSettingsUiService.LinkAssistantDeviceUi). When user revokes grant on the Assistant device, Assistant Devices Platform Service will receive Pubsub notification with OAuth client id for the revoked device, and we will compare that with this stored id to identity device to remove.", -"type": "string" -}, -"onDeviceAppSettings": { -"$ref": "AssistantApiSettingsOnDeviceAppSettings", -"description": "Device specific app related settings." -}, -"optInStatus": { -"$ref": "AssistantApiSettingsDeviceLogsOptIn", -"description": "Specifies if device logs and crashes can be captured during SendFeedback" -}, -"paymentsEnabled": { -"deprecated": true, -"description": "DEPRECATED: Use DeviceCapabilities.OutputRestrictions.personal_data instead. Whether the user has enabled payments for this device.", -"type": "boolean" -}, -"personalizationMetadata": { -"$ref": "AssistantApiSettingsPersonalizationMetadata", -"description": "Metadata about how personalization settings were configured." -}, -"politeMode": { -"$ref": "AssistantApiSettingsPoliteMode", -"deprecated": true, -"description": "Specify whether polite mode is enabled for this device. See go/pretty-please-dd." -}, -"postalCode": { -"type": "string" -}, -"reauthTrustedDeviceSettings": { -"$ref": "AssistantApiSettingsReauthTrustedDeviceSettings", -"deprecated": true, -"description": "Trusted device preferences Assistant reauth. go/assistant-reauth-verify-skip." -}, -"shortenedAddress": { -"description": "A human-readable shortened address. This is usually the street address. Note: client does not need to populate this. It will be auto-populated based on \"address\" field on server side. Developers can use this field to avoid reading out the full address everytime.", -"type": "string" -}, -"speakerIdEnabled": { -"description": "Indicates whether the user has enabled speaker-id for this device. See go/google-assistant-multi-user for more info on this project.", -"type": "boolean" -}, -"speechOutputSettings": { -"$ref": "AssistantApiSettingsSpeechOutputSettings", -"description": "Settings related to TTS output." -}, -"speechSettings": { -"$ref": "AssistantApiSettingsSpeechSettings", -"description": "Speech/hotword detection related settings." -}, -"supervisionSettings": { -"$ref": "AssistantApiSettingsDeviceSupervisionSettings", -"description": "Restrictions on how and when certain users can use a device. See go/home-ft-prd." -}, -"surfaceType": { -"$ref": "AssistantApiCoreTypesSurfaceType", -"description": "The type of assistant surface. Only use this field when device type is ASSISTANT." -}, -"tetheredInfo": { -"$ref": "AssistantApiSettingsTetheredInfo", -"description": "Presence indicates a tethered wearable. go/wearable-device-ids." -}, -"timeZone": { -"$ref": "AssistantApiTimeZone", -"description": "Device time zone. It's mainly used for a one-time notification for new users when they just bought and activated their devices. They may not have used Search or Assistant before, so their timezone info may not available elsewhere when we want to send a notification. This should be used as a fallback only when other timezone sources such as assistant_settings:user_attribute#inferred_user_timezone are not available. Also, when both |time_zone| and |location| are set, the |location| should be preferred to derive the most up to date timezone. This info directly comes from the device through early device setting recording mechanism. See more details at go/early-device-setting-recording." -}, -"truncatedLocalNetworkId": { -"description": "Local network ID of the device (truncated to obfuscate devices and households globally). This is a temporary signal to determine proximity of Assistant devices in a house (HGS place).", -"type": "string" -}, -"type": { -"description": "The type of the device. Note: this should only be used for grouping devices for UI presentation purpose. Use |capabilities| to decide what the device can do.", -"enum": [ -"UNKNOWN_DEVICE_TYPE", -"ASSISTANT", -"HOME_AUTOMATION", -"CAST", -"CAST_GROUP", -"QUARTZ", -"QUARTZ_IOS", -"CLOUD_AUTO" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"Quartz in AGSA", -"Quartz in iOPA", -"Non-assistant Auto devices from Cloud." -], -"type": "string" -}, -"verboseTtsForChromecastEnabled": { -"description": "Indicates whether to play verbose tts for Elementary on chirp. See: go/opa-cast-a11y-impl-design fore more info on this project. Note: this should probably be in SpeechOutputSetting below.", -"type": "boolean" -}, -"vmLastUsedCoarseTimestamp": { -"description": "Coarsened hourly timestamp of when the user was last verified by VoiceMatch on this device. This is used for enforcing VoiceMatch model TTL. go/voicematch-pdd-ttl", -"format": "google-datetime", -"type": "string" -}, -"voiceEnrollmentStatus": { -"description": "Indicates whether the user's voice has been successfully enrolled on this device.", -"enum": [ -"VOICE_ENROLLMENT_UNKNOWN_STATUS", -"VOICE_ENROLLMENT_SUCCESS", -"VOICE_ENROLLMENT_FAILURE", -"VOICE_ENROLLMENT_PENDING" -], -"enumDescriptions": [ -"User has no utterances and no speaker id model.", -"User has speaker id model.", -"User has utterances but no speaker id model (for ex. hotword was not detected in utterances).", -"Speaker id model generation is in-progress." -], -"type": "string" -}, -"voiceInputEnabled": { -"description": "A boolean indicates whether voice input (mic-button, hotword, etc) is enabled.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsDeviceSettingsCrossSurfaceAvailability": { -"id": "AssistantApiSettingsDeviceSettingsCrossSurfaceAvailability", -"properties": { -"lastKnownClientLocale": { -"description": "Last known locale of the client.", -"type": "string" -}, -"lastParamsWriteTimestamp": { -"description": "This is the timestamp when the AssistantRequestParams (in ASSISTANT_SNAPSHOT corpus) were last written for this device.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsDeviceSupervisionSettings": { -"id": "AssistantApiSettingsDeviceSupervisionSettings", -"properties": { -"downtimeSettings": { -"$ref": "AssistantApiSettingsDeviceDowntimeSettings", -"description": "Specification of times that a device shouldn't respond to certain users. See go/home-ft-prd." -}, -"featureFilters": { -"$ref": "AssistantApiSettingsDeviceFeatureFilters", -"description": "Restrictions on features that certain users can access on a device. See go/home-ft-prd." -} -}, -"type": "object" -}, -"AssistantApiSettingsDowntimePeriod": { -"description": "Specifies a period of up to 24 hours when downtime should be enabled, starting at certain time on a certain day of the week, and ending at a later time on either that day or the following day.", -"id": "AssistantApiSettingsDowntimePeriod", -"properties": { -"enabled": { -"description": "True if downtime should be enabled during this period.", -"type": "boolean" -}, -"endTime": { -"$ref": "GoogleTypeTimeOfDay", -"description": "Time of day that this downtime period should end. Required. If end_time > start_time, end_time is relative to start_day. Otherwise, end_time is relative to the day after start_day. For example, start_day: MONDAY, start_time: 9 p.m., end_time: 6 a.m. means that the downtime period starts at 9 p.m. on Monday and ends at 6 a.m. on Tuesday." -}, -"startDay": { -"description": "The day of the week when this downtime period starts. Required.", -"enum": [ -"DAY_OF_WEEK_UNSPECIFIED", -"MONDAY", -"TUESDAY", -"WEDNESDAY", -"THURSDAY", -"FRIDAY", -"SATURDAY", -"SUNDAY" -], -"enumDescriptions": [ -"The day of the week is unspecified.", -"Monday", -"Tuesday", -"Wednesday", -"Thursday", -"Friday", -"Saturday", -"Sunday" -], -"type": "string" -}, -"startTime": { -"$ref": "GoogleTypeTimeOfDay", -"description": "Time of day that this downtime period should start. Required." -} -}, -"type": "object" -}, -"AssistantApiSettingsDowntimeSchedule": { -"description": "Specification of when downtime is enabled on different days of the week. Contains up to 7 DowntimePeriod messages, up to one per day of the week.", -"id": "AssistantApiSettingsDowntimeSchedule", -"properties": { -"enabled": { -"description": "True if this downtime schedule should be enabled.", -"type": "boolean" -}, -"periods": { -"description": "Downtime entries for the days of the week, in no particular order. There can be at most one period defined for each day of the week. Days of the week with no explicit period defined are treated as disabled, so the device is available all day (modulo an end time that may spill over from the previous day).", -"items": { -"$ref": "AssistantApiSettingsDowntimePeriod" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsDuoCallDeviceSettings": { -"description": "Duo related call settings on the device. Next ID: 5", -"id": "AssistantApiSettingsDuoCallDeviceSettings", -"properties": { -"allowKnockKnock": { -"description": "True if Duo Knock Kncok feature is enabled on the device.", -"type": "boolean" -}, -"shouldBeLinked": { -"description": "Boolean indicating if user has explicitly marked this device to be linked or not. This bit is used in case where unexpected errors occur and we have to check for account/device status and mark the device linked after verification.", -"type": "boolean" -}, -"state": { -"description": "The call state of the device (i.e. whether a Duo call account has been setup on the device).", -"enum": [ -"UNKNOWN_LINK_STATE", -"NOT_LINKED", -"LINKED", -"LINKED_WAITING", -"LINK_ERROR" -], -"enumDescriptions": [ -"", -"The user chose not to link the app.", -"The user completed linking the app.", -"The user chose to link the app, but the linking has not completed yet.", -"There was some error in link state and link state should be refreshed by querying tachyon backend. But this does not guarantee that state will be linked after this request." -], -"type": "string" -}, -"talkbackEnabled": { -"description": "Client device settings: settings which are populated by client to give to duocore. TalkBack is an accessibility service that helps blind and vision-impaired users interact with their devices. Indicates whether talkback is enabled for the device. Note: this is per device settings currently filled by client for all users.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsFeatureFilters": { -"description": "Specification of which assistant features are allowed for a particular device or user account.", -"id": "AssistantApiSettingsFeatureFilters", -"properties": { -"communicationsFilter": { -"$ref": "AssistantApiSettingsCommunicationsFilter" -}, -"musicFilter": { -"$ref": "AssistantApiSettingsMusicFilter" -}, -"newsFilter": { -"$ref": "AssistantApiSettingsNewsFilter" -}, -"podcastFilter": { -"$ref": "AssistantApiSettingsPodcastFilter" -}, -"searchFilter": { -"$ref": "AssistantApiSettingsSearchFilter" -}, -"thirdPartyAppsFilter": { -"$ref": "AssistantApiSettingsThirdPartyAppsFilter" -}, -"videoFilter": { -"$ref": "AssistantApiSettingsVideoFilter" -}, -"webviewFilter": { -"$ref": "AssistantApiSettingsWebviewFilter" -} -}, -"type": "object" -}, -"AssistantApiSettingsGcmSettings": { -"id": "AssistantApiSettingsGcmSettings", -"properties": { -"gcmId": { -"type": "string" -}, -"gcmPackage": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsHomeGraphData": { -"description": "Next ID: 8", -"id": "AssistantApiSettingsHomeGraphData", -"properties": { -"agentId": { -"description": "Agent ID, aka project ID. Used as the AgentDeviceId.agent_id of device when calling Home Graph Service.", -"type": "string" -}, -"attributes": { -"additionalProperties": { -"description": "Properties of the object.", -"type": "any" -}, -"description": "See go/ha-dev-guide and HomeGraphItem.attribute in //assistant/verticals/homeautomation/proto/home_graph.proto", -"type": "object" -}, -"deviceId": { -"description": "Device ID, used as AgentDeviceId.device_id of device when calling Home Graph Service.", -"type": "string" -}, -"deviceType": { -"description": "HGS device type. See java/com/google/home/graph/service/config/protoconf.pi for the exhaustive list of type strings.", -"type": "string" -}, -"shouldWriteToHomeGraph": { -"description": "Whether device data should be written to Home Graph via Assistant device_settings. Assistant SDK and Google Home write their devices into Home Graph through AssistantSettingsService, while Home Automation Partner devices (e.g. SmartThings, Philips Hue, Nest, TP-Link, etc.) don't need to be written to Home Graph through AssistantSettingsService. This field decides whether AssistantSettingsService writes devices to Home Graph or not.", -"type": "boolean" -}, -"supportedTraits": { -"description": "Supported traits of the device. See java/com/google/home/graph/service/config/protoconf.pi for the exhaustive list of trait-strings.", -"items": { -"type": "string" -}, -"type": "array" -}, -"supportsDirectResponse": { -"description": "Whether the device supports direct response. See HomeGraphItem.supports_direct_response in //assistant/verticals/homeautomation/proto/home_graph.proto", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsHospitalityCardSettings": { -"id": "AssistantApiSettingsHospitalityCardSettings", -"properties": { -"cardConfig": { -"description": "Config for Hospitality UI modules.", -"items": { -"$ref": "AssistantApiSettingsHospitalityCardSettingsCardConfig" -}, -"type": "array" -}, -"showMediaTapGestureTutorial": { -"description": "Toggle media tap gesture tutorial card.", -"type": "boolean" -}, -"showPhotoSwipeGestureTutorial": { -"description": "Toggle photo swipe gesture tutorial card.", -"type": "boolean" -}, -"youtubeCardConfig": { -"description": "Config for YouTube video cards.", -"items": { -"$ref": "AssistantApiSettingsHospitalityCardSettingsYouTubeCardConfig" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsHospitalityCardSettingsCardConfig": { -"description": "Configuration for hospitality card.", -"id": "AssistantApiSettingsHospitalityCardSettingsCardConfig", -"properties": { -"activeActionRequired": { -"deprecated": true, -"description": "Whether the UI module requires user action. If true, the UI module can peek on to the top of Ambient. See SmartDisplayModuleState::ACTIVE_ACTION_REQUIRED.", -"type": "boolean" -}, -"dismissable": { -"deprecated": true, -"description": "Whether the UI module is dismissable.", -"type": "boolean" -}, -"effectiveTime": { -"$ref": "AssistantApiTimestamp", -"description": "The time that the module is effective and visible to the user. If not set, the module is effective immediately." -}, -"expiryTime": { -"$ref": "AssistantApiTimestamp", -"description": "The time that the module is expired and invisible to the user. If not set, the module never expires." -}, -"imageUrl": { -"deprecated": true, -"description": "The image URL for the UI module.", -"type": "string" -}, -"moduleId": { -"description": "Module ID.", -"enum": [ -"UNKNOWN", -"MID_STAY_SURVEY", -"CHECK_OUT", -"CHECK_IN", -"RESET" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"payloadQuery": { -"deprecated": true, -"description": "Payload query to the partner AoG action when user responds to UI Module, e.g. \u201cTell the hotel how my stay is going\u201d.", -"type": "string" -}, -"title": { -"deprecated": true, -"description": "Title of the message to be shown to user at the top of the UI Module.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsHospitalityCardSettingsYouTubeCardConfig": { -"description": "Configuration for YouTube video card (Stargazer tile).", -"id": "AssistantApiSettingsHospitalityCardSettingsYouTubeCardConfig", -"properties": { -"imageUrl": { -"description": "URL of image to go on card. The URL must be a public link accessible from ZeroState.", -"type": "string" -}, -"playlistId": { -"description": "ID of YouTube playlist to play on card tap. A playlist is used instead of a single video id to avoid autoplaying related videos. The playlist and the videos it contains must be public or unlisted to be accessible from ZeroState.", -"type": "string" -}, -"text": { -"description": "Text on card (i.e., video title).", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsHospitalityMode": { -"description": "Hospitality mode config for the current device. go/hospitality-mode-design. Next ID: 17", -"id": "AssistantApiSettingsHospitalityMode", -"properties": { -"aogContextId": { -"description": "List of AOG app context ids that are linked to this device. These apps will have access to the structure information for the device.", -"items": { -"type": "string" -}, -"type": "array" -}, -"aogInvocationPhrase": { -"description": "Invocation phrase for hotel's AoG action. Used for ZS promotion card and \"talk to my hotel\" rewrites. Setting this to an empty value will mark it unset.", -"type": "string" -}, -"branding": { -"$ref": "AssistantApiSettingsHospitalityModeBranding" -}, -"cardSettings": { -"$ref": "AssistantApiSettingsHospitalityCardSettings" -}, -"deviceClearRequest": { -"$ref": "AssistantApiTimestamp", -"description": "The time when we received a request to reset the device." -}, -"dialogTtlOverrideMicros": { -"description": "Should the dialog have a shorter ttl. See go/ipp-consumer-prd#heading=h.ibu9b1ysdl4t and go/interpreter-device-clear#bookmark=id.hw8ey1bzjadn for context.", -"format": "int64", -"type": "string" -}, -"enterpriseId": { -"description": "Identifier for the enterprise which owns the device. Setting this to an empty value will mark it unset.", -"type": "string" -}, -"hospitalityModeEnabled": { -"description": "Indicates whether this device is in the hospitality mode.", -"type": "boolean" -}, -"lastDeviceClear": { -"$ref": "AssistantApiTimestamp", -"description": "Last time the device was cleared and placed in hospitality mode. Will be set when the switch is toggled on and reset when a guest checks out. On the device this triggers removing alarms, timers, etc." -}, -"lastModifiedTimestamp": { -"$ref": "AssistantApiTimestamp", -"description": "Indicates when hospitality settings were last updated." -}, -"lastWelcomed": { -"$ref": "AssistantApiTimestamp", -"description": "Last time the welcome message was played for the guest. If last_welcomed < welcome_request, the message should be replayed and this time set." -}, -"manualResetRequired": { -"description": "Indicates whether or not the device must be reset manually (by voice or touch), as opposed to being automatically reset. go/hospitality-manual-reset", -"type": "boolean" -}, -"promotedLanguages": { -"description": "In order promoted languages for interpreter devices. This represents languages by BCP-47 language strings, such as \"en\", \"en-US\", \"fr\", \"fr-CA\", \"sr-Latn\", \"zh-Hans-CN\", \"zh-Hant-HK\",etc.", -"items": { -"type": "string" -}, -"type": "array" -}, -"type": { -"enum": [ -"UNKNOWN_TYPE", -"HOTEL_ROOM", -"INTERPRETER", -"SENIOR_LIVING_ROOM", -"RETAIL_DEMO" -], -"enumDescriptions": [ -"", -"", -"go/hospitality-interpreter-mode", -"go/merrill-prd", -"go/assistant-retail-demo" -], -"type": "string" -}, -"verbalResetSupported": { -"description": "Whether we allow users to initiate clearing the device verbally. We generally allow this for private devices and not for public ones.", -"type": "boolean" -}, -"welcomeRequest": { -"$ref": "AssistantApiTimestamp", -"description": "The time when we received a request to welcome the user." -} -}, -"type": "object" -}, -"AssistantApiSettingsHospitalityModeBranding": { -"description": "TODO(b/169423976) Consider moving Branding out of user level settings into enterprise level settings. Partner branding fields used to customize the ui. Next ID: 7", -"id": "AssistantApiSettingsHospitalityModeBranding", -"properties": { -"displayName": { -"description": "Brand display in the UI", -"type": "string" -}, -"displayNameForLanguage": { -"additionalProperties": { -"type": "string" -}, -"description": "Brand display in the UI for languages that the enterprise has a localized name that is different from its global branding name. For example, Hilton is '\u30d2\u30eb\u30c8\u30f3' in Japanese and '\u5e0c\u723e\u9813' in Chinese. The keys are hospitality supported display locales, e.g. en, ja-JP, etc, defined in experiment parameter Hospitality__hospitality_display_supported_locales.", -"type": "object" -}, -"largeLogoUrl": { -"type": "string" -}, -"smallLogoUrl": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsHotwordThresholdAdjustmentFactor": { -"description": "HotwordThresholdAdjustmentFactor contains threshold_adjustment_factor, and it's validity. value should only be considered when is_valid = true.", -"id": "AssistantApiSettingsHotwordThresholdAdjustmentFactor", -"properties": { -"isValid": { -"description": "Currently, is_valid is set to false whenever the TAF is not an Autotune aware value. This includes hotword sensitivity users, or devices not eligible for autotune.", -"type": "boolean" -}, -"value": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiSettingsInternalAncillaryDeviceId": { -"description": "Represents supporting device ids.", -"id": "AssistantApiSettingsInternalAncillaryDeviceId", -"properties": { -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "Contains device ids known to devices. eg. For ATV, it contains client_instance_id and cast_id." -} -}, -"type": "object" -}, -"AssistantApiSettingsInternalVersion": { -"description": "Represents a version of a specifit setting, e.g. DeviceSettings.", -"id": "AssistantApiSettingsInternalVersion", -"properties": { -"generationTime": { -"description": "Contains the timestamp when this version was generated.", -"format": "google-datetime", -"type": "string" -}, -"id": { -"description": "Integer value of the version, it is a monotonically increasing number and starts at 0. On every update it is incremented by 1.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsKidsMode": { -"description": "Kids mode config for the current device. go/aff-parentalsupervision-dd", -"id": "AssistantApiSettingsKidsMode", -"properties": { -"kidsModeEnabled": { -"type": "boolean" -}, -"obfuscatedGaiaId": { -"description": "Identifier of the account currently specified to be used with kids mode.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsLabeledDowntimeSchedule": { -"id": "AssistantApiSettingsLabeledDowntimeSchedule", -"properties": { -"displayName": { -"description": "User-provided name for this schedule.", -"type": "string" -}, -"schedule": { -"$ref": "AssistantApiSettingsDowntimeSchedule" -} -}, -"type": "object" -}, -"AssistantApiSettingsLinkedUser": { -"description": "Represents the profile of the user who has signed in onto this device. Next id: 5", -"id": "AssistantApiSettingsLinkedUser", -"properties": { -"castLinkingTime": { -"description": "Time of linking of the device with the user provided by Cast.", -"format": "google-datetime", -"type": "string" -}, -"email": { -"description": "Primary email address of the user.", -"type": "string" -}, -"gaiaId": { -"format": "int64", -"type": "string" -}, -"names": { -"description": "Supports features which depend on profile name, when no matching contact is found.", -"items": { -"$ref": "AppsPeopleOzExternalMergedpeopleapiName" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsMarketplaceDisclosure": { -"id": "AssistantApiSettingsMarketplaceDisclosure", -"properties": { -"confirmed": { -"description": "True if the user has confirmed the marketplace disclosure.", -"type": "boolean" -}, -"timestampMs": { -"description": "The time user confirmed the marketplace disclosure.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsMasqueradeMode": { -"description": "Guest mode for the current device. go/assistant-guest-mode-summary", -"id": "AssistantApiSettingsMasqueradeMode", -"properties": { -"lastEnterGuestModeTimestamp": { -"$ref": "AssistantApiTimestamp" -}, -"lastExitGuestModeTimestamp": { -"$ref": "AssistantApiTimestamp" -}, -"masqueradeModeEnabled": { -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsMusicFilter": { -"description": "Specification of which music features can be used.", -"id": "AssistantApiSettingsMusicFilter", -"properties": { -"availableProviders": { -"description": "Providers available at the time user updated settings.", -"items": { -"enum": [ -"UNKNOWN_MUSIC_PROVIDER", -"YOUTUBE_MUSIC", -"GOOGLE_PLAY_MUSIC", -"SPOTIFY", -"APPLE_MUSIC", -"PANDORA" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"providerFilterState": { -"description": "Represents the state for the music provider filter.", -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL_PROVIDERS", -"ALLOW_WHITELISTED_PROVIDERS" -], -"enumDescriptions": [ -"", -"With this state, all music providers are allowed.", -"This limits the allowed providers to the whitelisted ones. This means that we can block all providers if the whitelist is empty." -], -"type": "string" -}, -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL", -"BLOCK_EXPLICIT", -"BLOCK_ALL" -], -"enumDescriptions": [ -"", -"Allow all music.", -"Block music with explicit lyrics.", -"Block all music." -], -"type": "string" -}, -"whitelistedProviders": { -"description": "Contains the list of whitelisted music providers.", -"items": { -"enum": [ -"UNKNOWN_MUSIC_PROVIDER", -"YOUTUBE_MUSIC", -"GOOGLE_PLAY_MUSIC", -"SPOTIFY", -"APPLE_MUSIC", -"PANDORA" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsNewsFilter": { -"description": "Specification of which news features can be used.", -"id": "AssistantApiSettingsNewsFilter", -"properties": { -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL_NEWS", -"BLOCK_ALL_NEWS" -], -"enumDescriptions": [ -"", -"Allow all news.", -"Block all news." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsNotificationProfile": { -"id": "AssistantApiSettingsNotificationProfile", -"properties": { -"alloNotificationProfile": { -"$ref": "AssistantApiSettingsNotificationProfileAlloNotificationProfile", -"description": "Each device can have only one type of notification profile." -} -}, -"type": "object" -}, -"AssistantApiSettingsNotificationProfileAlloNotificationProfile": { -"id": "AssistantApiSettingsNotificationProfileAlloNotificationProfile", -"properties": { -"botSendToken": { -"$ref": "ChatBotPlatformBotSendToken", -"description": "The send token of the conversation with the user." -}, -"id": { -"$ref": "ChatBotPlatformFireballId", -"description": "The fireball id of this user." -} -}, -"type": "object" -}, -"AssistantApiSettingsOnDeviceAppSettings": { -"id": "AssistantApiSettingsOnDeviceAppSettings", -"properties": { -"carrierCallDeviceSettings": { -"$ref": "AssistantApiSettingsCarrierCallDeviceSettings", -"description": "On device carrier call related settings." -}, -"duoCallDeviceSettings": { -"$ref": "AssistantApiSettingsDuoCallDeviceSettings", -"description": "On device duo call related settings." -} -}, -"type": "object" -}, -"AssistantApiSettingsPersonalizationMetadata": { -"id": "AssistantApiSettingsPersonalizationMetadata", -"properties": { -"faceMatch": { -"enum": [ -"PERSONALIZATION_FLOW_UNKNOWN", -"PERSONALIZATION_FLOW_DEVICE", -"PERSONALIZATION_FLOW_TWOOBE", -"PERSONALIZATION_FLOW_SLA", -"PERSONALIZATION_FLOW_DEVICE_DELEGATED_CUSTODIO" -], -"enumDescriptions": [ -"", -"", -"TWOOBE: go/twoobe-prd", -"SLA: go/sla-prd", -"Custodio: go/assistant-pc-prd" -], -"type": "string" -}, -"personalResults": { -"enum": [ -"PERSONALIZATION_FLOW_UNKNOWN", -"PERSONALIZATION_FLOW_DEVICE", -"PERSONALIZATION_FLOW_TWOOBE", -"PERSONALIZATION_FLOW_SLA", -"PERSONALIZATION_FLOW_DEVICE_DELEGATED_CUSTODIO" -], -"enumDescriptions": [ -"", -"", -"TWOOBE: go/twoobe-prd", -"SLA: go/sla-prd", -"Custodio: go/assistant-pc-prd" -], -"type": "string" -}, -"voiceMatch": { -"enum": [ -"PERSONALIZATION_FLOW_UNKNOWN", -"PERSONALIZATION_FLOW_DEVICE", -"PERSONALIZATION_FLOW_TWOOBE", -"PERSONALIZATION_FLOW_SLA", -"PERSONALIZATION_FLOW_DEVICE_DELEGATED_CUSTODIO" -], -"enumDescriptions": [ -"", -"", -"TWOOBE: go/twoobe-prd", -"SLA: go/sla-prd", -"Custodio: go/assistant-pc-prd" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsPodcastFilter": { -"description": "Specification of which podcast features can be used.", -"id": "AssistantApiSettingsPodcastFilter", -"properties": { -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL_PODCASTS", -"BLOCK_ALL_PODCASTS", -"BLOCK_EXPLICIT_PODCASTS" -], -"enumDescriptions": [ -"", -"Allow all podcasts.", -"Block all podcasts.", -"Block explicit podcasts." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsPoliteMode": { -"description": "Polite mode config for the current device. go/polite-mode-dd", -"id": "AssistantApiSettingsPoliteMode", -"properties": { -"politeModeEnabled": { -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSettingsReauthTrustedDeviceSettings": { -"description": "Settings related to Assistant reauth. go/assistant-reauth-verify-skip Next id: 2", -"id": "AssistantApiSettingsReauthTrustedDeviceSettings", -"properties": { -"trustSettingsForClient": { -"additionalProperties": { -"$ref": "AssistantApiSettingsReauthTrustedDeviceSettingsTrustSettings" -}, -"description": "Mapping from integrator client id to device's trust settings. Id from assistant/agent_platform/transactions/reauth/reauth_client.proto.", -"type": "object" -} -}, -"type": "object" -}, -"AssistantApiSettingsReauthTrustedDeviceSettingsTrustSettings": { -"description": "Next id: 6", -"id": "AssistantApiSettingsReauthTrustedDeviceSettingsTrustSettings", -"properties": { -"neverAskAgain": { -"description": "If true, don't ask user to trust this device again.", -"type": "boolean" -}, -"neverAskExpirationTimestamp": { -"deprecated": true, -"description": "DEPRECATED: Use never_ask_again instead. Expiration timestamp of \"never ask again\" status. If this field is set and is later than current timestamp, we should NOT ask the user whether they'd like to trust this device.", -"format": "google-datetime", -"type": "string" -}, -"trustDeviceExpirationTimestamp": { -"description": "Expiration timestamp of \"trusted\" status. If this field is set and is later than current timestamp, we can consider this device to be trusted.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsSearchFilter": { -"description": "Specification of which search features can be used.", -"id": "AssistantApiSettingsSearchFilter", -"properties": { -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_SAFE_SEARCH", -"BLOCK_SEARCH" -], -"enumDescriptions": [ -"", -"Allow only safe search for the Assistant.", -"Block (almost) all search features. Some \"safe\" features that users wouldn't consider search (like calculator) are still allowed in this mode." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsSpeechOutputSettings": { -"description": "Settings related to TTS output.", -"id": "AssistantApiSettingsSpeechOutputSettings", -"properties": { -"speechOutput": { -"enum": [ -"UNSPECIFIED", -"VERBOSE", -"MIN_VERBOSITY", -"HANDS_FREE_ONLY" -], -"enumDescriptions": [ -"", -"Provide speech output most of the time.", -"Optimize where to provide speech output.", -"Provide speech output only in hands-free mode. No TTS will be provided otherwise." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsSpeechSettings": { -"description": "Settings related to speech detection. See go/hotword-settings-on-cloud for more info. Next ID: 16", -"id": "AssistantApiSettingsSpeechSettings", -"properties": { -"continuedConversationEnabled": { -"description": "Indicates whether Continued Conversation is enabled for this device.", -"type": "boolean" -}, -"deviceModelType": { -"description": "Stores the device model type e.g Pixel.", -"type": "string" -}, -"dspAvailable": { -"description": "Whether the device has DSP chip to enable always on hotword detection.", -"type": "boolean" -}, -"hotwordInNavigationEnabled": { -"description": "Whether hotword has been enabled by the user during navigation.", -"enum": [ -"UNAVAILABLE", -"ENABLED", -"DISABLED", -"UNDECIDED", -"OPA_DISABLED", -"UNSUPPORTED_LOCALE", -"INCOMPLETE", -"ENABLED_WITHOUT_OPA_AVAILABILITY" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true, -false, -true -], -"enumDescriptions": [ -"Setting is unavailable to the user.", -"Setting is available and enabled by the user.", -"Setting is available but disabled by the user.", -"Setting is available, but user never touched this setting.", -"Setting is disabled because Opa is disabled by user.", -"Setting is unavailable because the locale is unsupported. Deprecated, use voice_match_available", -"Setting is incomplete, see go/hotword-incomplete-state", -"Setting is enabled by user without opa availability. go/no-opa-yes-vm" -], -"type": "string" -}, -"hotwordSetting": { -"description": "Stores hotword setting status for the locales which don't support voice match.", -"enum": [ -"UNAVAILABLE", -"ENABLED", -"DISABLED", -"UNDECIDED", -"OPA_DISABLED", -"UNSUPPORTED_LOCALE", -"INCOMPLETE", -"ENABLED_WITHOUT_OPA_AVAILABILITY" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true, -false, -true -], -"enumDescriptions": [ -"Setting is unavailable to the user.", -"Setting is available and enabled by the user.", -"Setting is available but disabled by the user.", -"Setting is available, but user never touched this setting.", -"Setting is disabled because Opa is disabled by user.", -"Setting is unavailable because the locale is unsupported. Deprecated, use voice_match_available", -"Setting is incomplete, see go/hotword-incomplete-state", -"Setting is enabled by user without opa availability. go/no-opa-yes-vm" -], -"type": "string" -}, -"lockscreenEnabled": { -"description": "Whether pin/pattern lockscreen has been enabled by the user.", -"type": "boolean" -}, -"opaEligibilityState": { -"description": "Stores if Assistant is available for the user's device/locale, where Enabled means it is available and disabled means it is not.", -"enum": [ -"UNAVAILABLE", -"ENABLED", -"DISABLED", -"UNDECIDED", -"OPA_DISABLED", -"UNSUPPORTED_LOCALE", -"INCOMPLETE", -"ENABLED_WITHOUT_OPA_AVAILABILITY" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true, -false, -true -], -"enumDescriptions": [ -"Setting is unavailable to the user.", -"Setting is available and enabled by the user.", -"Setting is available but disabled by the user.", -"Setting is available, but user never touched this setting.", -"Setting is disabled because Opa is disabled by user.", -"Setting is unavailable because the locale is unsupported. Deprecated, use voice_match_available", -"Setting is incomplete, see go/hotword-incomplete-state", -"Setting is enabled by user without opa availability. go/no-opa-yes-vm" -], -"type": "string" -}, -"opaEligible": { -"deprecated": true, -"description": "Stores if Assistant is available for the user's device/locale. Deprecated as bools do not give accurate true/false ratios due to old clients reporting the default value.", -"type": "boolean" -}, -"sdkVersion": { -"description": "Stores the Android SDK version. This comes from android.os.Build.VERSION.SDK_INT.", -"format": "int32", -"type": "integer" -}, -"speakerIdModelPresent": { -"description": "Whether speaker ID model is present for the user.", -"type": "boolean" -}, -"speakerIdRecognitionEnabled": { -"deprecated": true, -"description": "Indicates whether the user has enabled speaker-id (fromAnyScreen/alwaysOn) for this device. Deprecated - use voice_match_setting instead", -"type": "boolean" -}, -"trustedVoiceEnabled": { -"description": "Indicates whether the user has enabled trusted voice for this device.", -"type": "boolean" -}, -"unlockWithHotwordAvailable": { -"description": "A bool indicating whether device supports unlocking device with hotword.", -"type": "boolean" -}, -"userMigratedToDeclined": { -"description": "Stores if user was migrated from undecided to declined as apart of Mariko project. Used for potential growth targeting.", -"type": "boolean" -}, -"voiceMatchSetting": { -"description": "Stores the hotword/voice match setting status for the locales which support voice match.", -"enum": [ -"UNAVAILABLE", -"ENABLED", -"DISABLED", -"UNDECIDED", -"OPA_DISABLED", -"UNSUPPORTED_LOCALE", -"INCOMPLETE", -"ENABLED_WITHOUT_OPA_AVAILABILITY" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true, -false, -true -], -"enumDescriptions": [ -"Setting is unavailable to the user.", -"Setting is available and enabled by the user.", -"Setting is available but disabled by the user.", -"Setting is available, but user never touched this setting.", -"Setting is disabled because Opa is disabled by user.", -"Setting is unavailable because the locale is unsupported. Deprecated, use voice_match_available", -"Setting is incomplete, see go/hotword-incomplete-state", -"Setting is enabled by user without opa availability. go/no-opa-yes-vm" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsTetheredInfo": { -"id": "AssistantApiSettingsTetheredInfo", -"properties": { -"primaryHostDeviceId": { -"description": "The host this wearable is tethered to (e.g. phone). When host is AGSA then this is agsa_client_instance_id. When host is IOPA then this is opa_ios_device_id.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsThirdPartyAppsFilter": { -"description": "Specification of which third party apps can be used.", -"id": "AssistantApiSettingsThirdPartyAppsFilter", -"properties": { -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL", -"ALLOW_CERTIFIED_FOR_FAMILIES", -"BLOCK_ALL" -], -"enumDescriptions": [ -"", -"Allow all third party apps.", -"Only allow certified Apps for Families. See https://developers.google.com/actions/apps-for-families/.", -"Block all third party apps." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSettingsVideoFilter": { -"description": "Specification of which video features can be used.", -"id": "AssistantApiSettingsVideoFilter", -"properties": { -"autoplayToggleState": { -"description": "State that indicates whether autoplay is enabled for youtube videos.", -"enum": [ -"UNKNOWN_STATE", -"ENABLED", -"DISABLED" -], -"enumDescriptions": [ -"Default uninitialized state.", -"Allow autoplay for youtube videos.", -"Disable autoplay for youtube videos." -], -"type": "string" -}, -"availableProviders": { -"description": "Providers available at the time user updated settings.", -"items": { -"enum": [ -"UNKNOWN_VIDEO_PROVIDER", -"YOUTUBE", -"YOUTUBE_TV", -"YOUTUBE_KIDS" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"providerFilterState": { -"description": "Represents the state for the video provider filter.", -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL_PROVIDERS", -"ALLOW_WHITELISTED_PROVIDERS" -], -"enumDescriptions": [ -"", -"With this state, all video providers are allowed.", -"This limits the allowed providers to the whitelisted ones. This means that we can block all providers if the whitelist is empty." -], -"type": "string" -}, -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL", -"BLOCK_MATURE_CONTENT", -"BLOCK_ALL" -], -"enumDescriptions": [ -"", -"Allow all videos.", -"Block videos with mature content.", -"Block all videos." -], -"type": "string" -}, -"whitelistedProviders": { -"description": "Contains the list of whitelisted video providers.", -"items": { -"enum": [ -"UNKNOWN_VIDEO_PROVIDER", -"YOUTUBE", -"YOUTUBE_TV", -"YOUTUBE_KIDS" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSettingsWebviewFilter": { -"description": "Specification of which webview features can be used.", -"id": "AssistantApiSettingsWebviewFilter", -"properties": { -"jasperWebviewConsent": { -"description": "Indicates if user has consented Jasper warning message.", -"type": "boolean" -}, -"state": { -"enum": [ -"UNKNOWN_STATE", -"ALLOW_ALL_WEBSITES", -"BLOCK_ALL_WEBSITES" -], -"enumDescriptions": [ -"", -"Allow all websites.", -"Block all websites." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSignInMethod": { -"description": "The method of sign in which the client supports.", -"id": "AssistantApiSignInMethod", -"properties": { -"method": { -"enum": [ -"UNSPECIFIED", -"NOT_ALLOWED", -"PHONE_APP", -"ON_DEVICE_MENU", -"WEB_APP" -], -"enumDescriptions": [ -"Unspecified value. Defaults to the same sign-in behavior as PHONE_APP.", -"Sign in is not allowed, e.g., go/telephone.", -"Sign in with Home app, e.g. Google Home.", -"Sign in with on device menu, e.g., LGTV, go/web-assistant", -"Sign in with the Assistant sign in website. See go/baird-web-based-sign-in." -], -"type": "string" -}, -"signInRequired": { -"description": "Make Google sign-in mandatory for using Google Assistant on the device.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSodaCapabilities": { -"description": "Capabilities related to SODA (Speech On-Device API). Next ID: 5", -"id": "AssistantApiSodaCapabilities", -"properties": { -"supportsHotwordSensitivity": { -"description": "Whether the device supports different levels of hotword sensitivity. go/hotword-sensitivity-prd", -"type": "boolean" -}, -"supportsSimpleStop": { -"description": "Whether Simple Stop (go/simple-stop) is enabled on the device. Simple stop allows users to stop firing alarms and timers by just saying \"stop\" without first saying the hotword.", -"type": "boolean" -}, -"supportsSpeakerId": { -"description": "Whether the device supports speaker-id (speaker identification based on hotword and/or spoken query - go/speaker-id). Note: there are existing devices that support speaker-id but does not have this capability set. Not having this field populated doesn't necessarily mean the device doesn't support speaker-id.", -"type": "boolean" -}, -"supportsWarmWords": { -"description": "Whether the device supports WarmWords (go/warm-words-framework).", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSoftwareCapabilities": { -"description": "These capabilities represent what software features the client supports. This should be determined based on the client's various software versions (OS, GSA version, etc). Next ID: 28", -"id": "AssistantApiSoftwareCapabilities", -"properties": { -"appCapabilities": { -"description": "IMPORTANT: Only one of AppCapabilities and AppCapabilitiesDelta should be in the SoftwareCapabilities. In the edge case if the client sends up both AppCapabilities and AppCapabilitiesDelta, AppCapabilitiesDelta is ignored. Complete list of app capabilities.", -"items": { -"$ref": "AssistantApiAppCapabilities" -}, -"type": "array" -}, -"appCapabilitiesDelta": { -"description": "Incremental update for app capabilities.", -"items": { -"$ref": "AssistantApiAppCapabilitiesDelta" -}, -"type": "array" -}, -"appIntegrationsSettings": { -"additionalProperties": { -"$ref": "AssistantApiAppIntegrationsSettings" -}, -"description": "App integrations settings for each packge name.", -"type": "object" -}, -"carAssistantCapabilities": { -"$ref": "AssistantApiCarAssistantCapabilities", -"description": "Capabilities related to Assistant on Auto surfaces." -}, -"clockCapabilities": { -"$ref": "AssistantApiClockCapabilities", -"description": "Capabilities related to clock functionality, like alarms, timers, etc." -}, -"conversationVersion": { -"$ref": "AssistantApiSupportedConversationVersion", -"description": "A top-level version of Conversation protocol where the versions are explicitly defined at go/conversation-versions." -}, -"crossDeviceExecutionCapabilities": { -"$ref": "AssistantApiCrossDeviceExecutionCapability", -"description": "For torus x-device execution support" -}, -"gacsCapabilities": { -"$ref": "AssistantApiGacsCapabilities" -}, -"gcmCapabilities": { -"$ref": "AssistantApiGcmCapabilities" -}, -"liveTvChannelCapabilities": { -"$ref": "AssistantApiLiveTvChannelCapabilities", -"description": "Capabilities related to live TV channels." -}, -"oemCapabilities": { -"$ref": "AssistantApiOemCapabilities", -"description": "List of actions OEM supports. This includes built-in actions and custom actions." -}, -"onDeviceAssistantCapabilities": { -"$ref": "AssistantApiOnDeviceAssistantCapabilities", -"description": "on-device Assistant capabilities" -}, -"onDeviceSmartHomeCapabilities": { -"$ref": "AssistantApiOnDeviceSmartHomeCapabilities", -"description": "Capability bits for on-device Smart Home. go/framework-for-local-semex" -}, -"onDeviceStorageCapabilities": { -"$ref": "AssistantApiOnDeviceStorageCapabilities", -"description": "Reflects the storage capabilities on the device." -}, -"operatingSystem": { -"description": "The operating system of the device.", -"enum": [ -"OS_TYPE_UNKNOWN", -"OS_TYPE_ANDROID", -"OS_TYPE_CAST", -"OS_TYPE_FUCHSIA", -"OS_TYPE_LINUX" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"orderedLiveTvProviders": { -"description": "An ordered list containing the live tv providers available in the client. The order of the providers reflects the ranking in the client and will be respected by server as well.", -"items": { -"$ref": "AssistantApiLiveTvProvider" -}, -"type": "array" -}, -"robinCapabilities": { -"$ref": "AssistantApiRobinCapabilities", -"description": "Capabilities of Robin client." -}, -"selinaCapabilities": { -"$ref": "AssistantApiSelinaCapabilites", -"description": "The Soli capabilities on Elaine. go/dingo-dc-software" -}, -"settingsAppCapabilities": { -"$ref": "AssistantApiSettingsAppCapabilities" -}, -"supportedClientOp": { -"items": { -"$ref": "AssistantApiSupportedClientOp" -}, -"type": "array" -}, -"supportedFeatures": { -"$ref": "AssistantApiSupportedFeatures" -}, -"supportedMsgVersion": { -"$ref": "AssistantApiSupportedProtocolVersion" -}, -"supportedProviderTypes": { -"$ref": "AssistantApiSupportedProviderTypes" -}, -"surfaceProperties": { -"$ref": "AssistantApiSurfaceProperties" -} -}, -"type": "object" -}, -"AssistantApiSpeechCapabilities": { -"description": "DEPRECATED These capabilities are associated with speech detection on devices.", -"id": "AssistantApiSpeechCapabilities", -"properties": { -"dspAvailable": { -"description": "A bool indicating whether device supports dsp based hotword detection.", -"type": "boolean" -}, -"unlockWithHotwordAvailable": { -"description": "A bool indicating whether device supports unlocking device with hotword.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSuggestionsSupport": { -"description": "Next ID: 18", -"id": "AssistantApiSuggestionsSupport", -"properties": { -"clickImpersonationSupported": { -"description": "Whether client supports user impersonation on suggestion chip click. go/suggestion-click-impersonation", -"type": "boolean" -}, -"debugDataSupported": { -"description": "Whether client supports suggestions debug data to be displayed.", -"type": "boolean" -}, -"drlHistoryChipSupported": { -"description": "Whether DRL history chip is supported. Related bug: http://b/241837879, http://b/171854732 Design doc: http://go/panthera-history-chip-dd DRL history chip was originally rolled out to Panthera in http://google3/googledata/experiments/mobile/agsa/studies/agsa_nga/opa_panthera_one_input_ui_launch.gcl?l=55&rcl=384682900. We plan to roll it out to NGA and TNG. drl_history_chip_supported bit specifies whether the client support (and should have) DRL history chip.", -"type": "boolean" -}, -"escapeHatchSupported": { -"description": "Whether client supports escape hatches aka post execution suggestions go/nga-escape-hatch-prd", -"enum": [ -"UNSUPPORTED", -"NGA_ESCAPE_HATCH" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"executedTextSupported": { -"description": "Whether the client can rewrite suggestion query text into executed text, if the latter is present. If this feature is disabled, the rewrite happens in Assistant Server.", -"type": "boolean" -}, -"executionContextSupported": { -"description": "Whether the client supports passing back `execution_context` from |assistant.api.client_op.SuggestionProcessingParams| when the suggestion is clicked or spoken.", -"type": "boolean" -}, -"featureSpecificActionSupport": { -"$ref": "AssistantApiFeatureSpecificActionSupport", -"description": "Whether the client supports features in |SuggestionFeatureSpecificAction|." -}, -"featureSpecificAppActionsNotificationSupported": { -"description": "Whether the client supports handling App Actions' notification when the suggestion is clicked. This will allow the server to populate the `app_actions_notification_data` extension field from |SuggestionFeatureSpecificAction| proto message.", -"type": "boolean" -}, -"multiStepTrySayingSupported": { -"description": "Whether the multi-step try saying suggestion feature is supported. dd: go/tng-multi-step-simplified", -"type": "boolean" -}, -"ruleIdInExecutionContextSupported": { -"description": "Whether the rule_id field in the execution_context is supported. This is a temporary workaround to be able to identify clicks on Person entity suggestions on Sabrina and is expected to be eventually deprecated. TODO(b/185517153) : Deprecate (but do not delete) once click tracking is correctly sent up from the Katniss client.", -"type": "boolean" -}, -"showExecutedTextSupported": { -"description": "Whether the client can show executed_text after the click on the suggestion chip. Must be set to false on TNG. TNG disregards |SuggestionProcessingParams.show_executed_text| field and always treats it as if |show_executed_text=true|.", -"type": "boolean" -}, -"showTranslationSupported": { -"description": "Whether the client can show chip as (text | translation). go/lang-partner-doc", -"type": "boolean" -}, -"supportedDisplayTargets": { -"description": "A list of suggestions display targets supported by this client. If unset only DEFAULT SuggestionDisplayTarget is supported.", -"items": { -"$ref": "AssistantApiSuggestionsSupportDisplayTargetSupport" -}, -"type": "array" -}, -"widgetDataSupported": { -"description": "Whether client supports widget suggestion chip to be displayed.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSuggestionsSupportDisplayTargetSupport": { -"id": "AssistantApiSuggestionsSupportDisplayTargetSupport", -"properties": { -"executedTextSupported": { -"description": "Whether the client can rewrite suggestion query text into executed text, if the latter is present for the display target.", -"type": "boolean" -}, -"headerTextSupported": { -"description": "Whether PresentationParams.header_text is supported for the display target.", -"type": "boolean" -}, -"repressImpressionSupported": { -"description": "Whether Suggestion.repress_impression is supported. If not repressed suggestions are not returned.", -"type": "boolean" -}, -"target": { -"description": "Display target that is supported.", -"enum": [ -"DEFAULT", -"NGA_INPUT_PLATE", -"CONVERSATION_STARTERS", -"QUICK_ACTIONS", -"TACTILE_ASSISTANT_SUGGESTS", -"TACTILE_MY_ACTIONS", -"TRY_SAYING", -"RICH_SUGGESTIONS" -], -"enumDeprecated": [ -false, -false, -true, -true, -true, -true, -true, -true -], -"enumDescriptions": [ -"Use the default display target.", -"Show Escape Hatch chips. The chips are displayed outside of response layer for example over app that query was fulfilled in.", -"Show chips as Conversation Starters in Opa and Panthera.", -"Show chips as Panthera quick actions (go/quick-actions-dd).", -"Show chips in Tactile \"Assistant Suggests\" section.", -"Show chips in Tactile \"My Actions\" section.", -"Show chips as \"Try Saying\" suggestions.", -"Show suggestions as rich suggestions and chips; go/prs-redesign-dd" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSunriseFeaturesSupport": { -"id": "AssistantApiSunriseFeaturesSupport", -"properties": { -"sunriseSimulationSupported": { -"description": "If true, the device can slowly brighten the screen and simulate sunrise experience. Alarms with sunrise field enabled can be set on this device.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSupportedClientOp": { -"description": "These are the set of ClientOps that are supported by the device.", -"id": "AssistantApiSupportedClientOp", -"properties": { -"clientOpName": { -"description": "This should be the same as the name of the SemanticClientOp that is supported.", -"type": "string" -}, -"clientOpProperties": { -"$ref": "AssistantApiProtobuf", -"description": "The properties associated with the ClientOp. This proto should be associated with the client_op_name." -}, -"supportedExecution": { -"$ref": "AssistantApiSupportedClientOpSupportedExecution" -}, -"version": { -"description": "A version of 0 is the equivalent to not having support for that client_op type. Note that a client_op is also unsupported if it is not included at all in the list of supported client_ops.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiSupportedClientOpSupportedExecution": { -"description": "Additional properties that client can support for executing the client op. They are surface-specific execution properties and are unrelated to the execution model.", -"id": "AssistantApiSupportedClientOpSupportedExecution", -"properties": { -"supportsPartialFulfillment": { -"description": "ClientOp execution supports special rendering behavior while the user is in the middle of expressing their query. This behavior includes: 1) New partial output always over-writes prior partial output. 2) Canceling the interaction removes partial fulfilment from any user visible interaction history. If this is true, whether to apply the special rendering behavior will be determined by PartialFulfillmentRenderingParams. More details can be found at go/ma-natcon-pf-api.", -"type": "boolean" -}, -"supportsSynchronousExecution": { -"description": "Client can support synchronous execution of the client op. For tts.OUTPUT client op it means that client would honor |synchronous_playback_args| argument. Please see more at go/synchronous-sounds-design.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSupportedConversationVersion": { -"description": "The overall Conversation Protocol version. As we make fundamental changes to Conversation protocol that are non-backwards compatible, we will increment the protocol version. By default, all clients will support version 0. All versions are documented at go/conversation-versions.", -"id": "AssistantApiSupportedConversationVersion", -"properties": { -"supportsConversationProtocol": { -"description": "Whether conversation protocol is supported explicitly. If true, SingleDeviceCapabilityChecker::SupportsConversationProtocol will always return true.", -"type": "boolean" -}, -"version": { -"description": "The supported version number.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiSupportedFeatures": { -"description": "These are the set of features that are supported by the device. It's a part of the SoftwareCapabilities of the device. Next ID: 69", -"id": "AssistantApiSupportedFeatures", -"properties": { -"aaeNotificationSourceSupported": { -"description": "Whether the client supports the alternative message notification sources on AAE, in which case notification-related operations can access it.", -"type": "boolean" -}, -"acpSupport": { -"$ref": "AssistantApiAssistantContinuedPresenceSupport", -"description": "In what way is assistant continued presence supported. (go/opa-acp-prd)" -}, -"actionV2SupportedFeatures": { -"$ref": "AssistantApiActionV2SupportedFeatures" -}, -"alarmTimerManagerApiSupported": { -"description": "Whether the client supports AlarmTimerManager API (go/alarm-timer-manager-api).", -"type": "boolean" -}, -"appControlSupport": { -"$ref": "AssistantApiAppControlSupport", -"description": "The client information for app control support. More details in: go/acaia." -}, -"assistantExploreSupported": { -"description": "Whether the client supports the assistant explore section. This field will be active only when the Explore section is available to the user. This means that the user is (a) signed-in, (b) a IOPA / AGSA user, and (c) in a locale where explore is available.", -"type": "boolean" -}, -"assistantForKidsSupported": { -"description": "Whether Assistant for Kids (a.k.a. Designed for Family) features are supported.", -"type": "boolean" -}, -"bypassDiDcCheckForComms": { -"description": "Whether communications flows for the client can bypass the DI/DC check. The client will enforce some other equivalent permission as necessary concerning access to device contacts and apps.", -"type": "boolean" -}, -"bypassMsgNotificationDismissal": { -"description": "Whether or not Assistant should enforce the dismissal of communication notifications associated with messages.", -"type": "boolean" -}, -"client1mProvidersSupported": { -"description": "Whether the client supports 1m providers (go/1m-partner-expansion).", -"type": "boolean" -}, -"clientOpResultBatchingSupported": { -"description": "Whether the client can batch client op results before sending them to the server.", -"type": "boolean" -}, -"confirmationBeforeReadingMultipleMessagesSupported": { -"description": "Whether the client supports confirmation flow before announcement of multiple messages. If set to true the user will be prompted once and confirmation will be taken before all the messages are announced.", -"type": "boolean" -}, -"conversationalCareSupported": { -"description": "Whether the client can render conversational care responses. go/conversational-care", -"type": "boolean" -}, -"crossDeviceBroadcastSupported": { -"deprecated": true, -"description": "Whether the client supports cross-device broadcast (i.e. on Torus).", -"type": "boolean" -}, -"crossDeviceBroadcastVersion": { -"description": "The version of cross device broadcast (ie; broadcast on torus) which the client supports.", -"enum": [ -"CROSS_DEVICE_BROADCAST_NOT_SUPPORTED", -"CROSS_DEVICE_BROADCAST_V1" -], -"enumDescriptions": [ -"Broadcast with torus is not supported on this device.", -"This device supports the cross device broadcast V1 features." -], -"type": "string" -}, -"csatVisualOverlaySupported": { -"description": "Whether the client supports csat visual overlay. (go/sd-od-csat)", -"type": "boolean" -}, -"duoClientApiFeatures": { -"description": "The features set which duo client on the device supports. This should be serialized from proto {@code duo_client_api.DuoClientApiFeatures}.", -"format": "byte", -"type": "string" -}, -"duoGroupCallingSupported": { -"description": "Whether the client supports Duo group calling.", -"type": "boolean" -}, -"fitnessFeatureSupport": { -"$ref": "AssistantApiFitnessFeatureSupport", -"description": "Information about what support this device has for fitness." -}, -"fluidActionsSupport": { -"$ref": "AssistantApiFluidActionsSupport", -"description": "Fluid Actions features supported by the client. If this field is not set in the incoming request, it could mean that the client does not support Fluid Actions. Alternatively, it could mean that the client supports Fluid Actions, but syncs state with server using the old protocol, namely ConversationStateParams. When b/140733618 is resolved, Surface Adaptation Layer will add this field for old clients that support Fluid Actions framework." -}, -"funtimeSupported": { -"description": "Whether the surface client op performer supports Funtime alarms and timers. go/funtime-engdesign", -"type": "boolean" -}, -"gdiSupported": { -"description": "Whether account linking via Google Deep Integrations (GDI) is supported. go/opa-gdi-design", -"type": "boolean" -}, -"gearheadNotificationSourceSupported": { -"description": "Whether the client supports the Gearhead message notification source, in which case notification-related operations can access it.", -"type": "boolean" -}, -"hasPhysicalRadio": { -"description": "Whether the client has a physical radio installed.", -"type": "boolean" -}, -"immersiveCanvasConfirmationMessageSupported": { -"deprecated": true, -"description": "Whether the client supports confirmation messages in Immersive Canvas actions. Deprecated: use the filed in immersive_canvas_support.", -"type": "boolean" -}, -"immersiveCanvasSupport": { -"$ref": "AssistantApiImmersiveCanvasSupport" -}, -"inDialogAccountLinkingSupported": { -"description": "Whether the client supports account linking in-dialog (askForSignIn). This is used before this feature is moved to conversation protocol. To support this, the client needs to: - Integrate with Google Deep Integrations. - Have logic to send the result of account linking back to AS.", -"type": "boolean" -}, -"isPairedPhoneContactUploadNeededForComms": { -"description": "Whether paired-phone contact upload is needed for communications queries to work (e.g. on AAE).", -"type": "boolean" -}, -"isPairedPhoneNeededForComms": { -"description": "Whether a Bluetooth-paired phone is a core component of communications flows on the client.", -"type": "boolean" -}, -"justInTimeSupported": { -"description": "Whether the client supports confirmation flow when a permission is missing. If set to true, the user will be prompted and on confirmation the original flow will continue.", -"type": "boolean" -}, -"launchKeyboardSupported": { -"description": "Which way of launching the keyboard the client supports.", -"enum": [ -"LAUNCH_KEYBOARD_UNSUPPORTED", -"OPA_ANDROID_LAUNCH_KEYBOARD_URI" -], -"enumDescriptions": [ -"Launching the keyboard from a suggestion chip is not supported.", -"The keyboard can be opened by using the opa-android://launch_keyboard uri. Takes the url-encoded parameters: - query: the string to prefill the keyboard with. - start: (optional) the start position of the span to highlight. - end: (optional) the end position of the span to highlight." -], -"type": "string" -}, -"lensSupported": { -"description": "Whether the client has Google Lens (Assistant Eyes).", -"type": "boolean" -}, -"liveCardsSupported": { -"description": "Whether the surface supports LiveCards. In cases where the user intent flow cannot be completed within the Assistant, LiveCards are used to take the user to an external app or website. These cards will be pushed to the Google Home app via the PushMessage ClientOp.", -"type": "boolean" -}, -"lottieAnimationSupport": { -"description": "Whether the client supports the Lottie animation library for the broadcast feature.", -"enum": [ -"LOTTIE_ANIMATION_UNKNOWN", -"LOTTIE_ANIMATION_NOT_SUPPORTED", -"LOTTIE_ANIMATION_SUPPORTED" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"mapsDialogsSupported": { -"description": "Whether the client supports Assistant dialogs within Maps. This field will be set only when the Maps on the surface supports Assistant dialogs embedded within Maps. go/gsa-gmm.", -"type": "boolean" -}, -"masqueradeModeSupported": { -"description": "Whether the device supports masquerade mode (go/masquerade).", -"type": "boolean" -}, -"mediaControlSupport": { -"$ref": "AssistantApiMediaControlSupport", -"description": "Information about how client handles media controls (play, pause, skip ...)" -}, -"mediaSessionDetection": { -"description": "The ability of the client to detect media sessions on the device.", -"enum": [ -"UNKNOWN_MEDIA_SESSION_DETECTION", -"RELIABLE_MEDIA_SESSION_DETECTION", -"UNRELIABLE_MEDIA_SESSION_DETECTION", -"NO_MEDIA_SESSION_DETECTION", -"MEDIA_SESSION_DETECTION_DISABLED_SCREEN_CONTEXT" -], -"enumDescriptions": [ -"", -"The client has perfect knowledge of what is playing on the device. For example, Chirp falls into this category. We can reliably choose to control or punt based on media params.", -"The client has some knowledge about the media sessions on the device. For example, OPA TV falls into this category. For these cases, we should probably always try to return the media control client op to the client.", -"The client has no idea about the media playback on the device. For these cases, we should either punt or attempt to control media and silently fail.", -"Media session detection is supported by the client but disabled by the user (for example: OPA_CROS) by turning off screen context setting." -], -"type": "string" -}, -"meetSupported": { -"description": "Whether the client supports joining a Google Meet meeting.", -"type": "boolean" -}, -"messageSummarySupport": { -"$ref": "AssistantApiMessageSummarySupport", -"description": "The client information for message summary. More details in: go/roadwayrecap-prd" -}, -"noInputResponseSupported": { -"description": "Whether the client can render no input response or just ignore it. No input response is returned when client has a no speech input interaction, eg. user tapped mic but didn't say anything.", -"type": "boolean" -}, -"opaOnSearchSupported": { -"description": "When the entry source is search, whether the client supports rendering a similar response as OPA one does. Entry source is defined at http://cs/symbol:assistant.api.params.DeviceProperties.EntrySource", -"type": "boolean" -}, -"parentalControlsSupported": { -"description": "Whether or not the client supports enabling parental controls. When a device to supports parental controls, it has the software necessary to store the relevant information required for parental controls to work. This information includes a boolean \"enabled bit\" as well as the obfuscated gaia ID of the kid account selected for use with parental controls. Devices supportings kids mode send this information to S3 via S3ClientInfo in every request. See go/aff-kidsproduct for details.", -"type": "boolean" -}, -"persistentDisplaySupported": { -"description": "Whether the client supports persistent display. The new feature allows Assistant devices with screen to display a continuously updating permanent display, such as ambient weather, without the need for a user to ask the Assistant. Design doc: go/assistant-persistent-display.", -"type": "boolean" -}, -"privacyAwareLockscreenSupported": { -"description": "Whether the client supports the privacy-aware lockscreen protocol (go/assistant-lockscreen-spec).", -"type": "boolean" -}, -"readMessagesTtsTaperingSupported": { -"description": "Whether the client supports tts tapering for the read messages flow. If set to true the tapered tts variant would be used during the read messages flow.", -"type": "boolean" -}, -"readNotificationSummarizationSupported": { -"deprecated": true, -"description": "Whether the client supports message summarization. Deprecated, use message_summary_support instead.", -"type": "boolean" -}, -"remoteCloudCastingEnabled": { -"description": "Whether the client has remote casting enabled. For ex: we want to disable this for clients like Auto.", -"type": "boolean" -}, -"serverGeneratedFeedbackChipsEnabled": { -"description": "Whether the Assistant Server should generate feedback suggestion chips.", -"type": "boolean" -}, -"shLockScreenSupported": { -"description": "Whether the client supports SmartHome lock screen logic (i.e. on Tangor).", -"type": "boolean" -}, -"signInMethod": { -"$ref": "AssistantApiSignInMethod", -"description": "Which kind of sign in the client supports." -}, -"sleepSensingSupported": { -"deprecated": true, -"description": "Whether the client can monitor sleep. This allows us to show sleep CUJ related information: go/TwilightDesign Use for development only, see the same field in DeviceCapabilities.SoftwareCapabilities.SelinaCapabilities.", -"type": "boolean" -}, -"smartspaceCrossDeviceTimerSupported": { -"description": "Whether the client supports smart space cross-device timers. (go/ss-x-device-timer)", -"type": "boolean" -}, -"soliGestureDetectionSupported": { -"description": "Whether or not the client supports gesture detection via soli chips. The reason to prepend the name with soli is to distinguish it from computer vision based methods, e.g. Newman devices.", -"type": "boolean" -}, -"suggestionsSupport": { -"$ref": "AssistantApiSuggestionsSupport", -"description": "Suggestion chips features, supported by the client." -}, -"sunriseFeaturesSupport": { -"$ref": "AssistantApiSunriseFeaturesSupport", -"description": "Whether the client supports the sunrise screen brightening feature before the alarm fires. This is used to indicate whether sunrise alarms can be set on the device. http://cs/symbol:assistant.api.core_types.governed.RingtoneTaskMetadata.GentleWakeInfo" -}, -"tapToReadOptimizationSupported": { -"description": "Whether the client supports faster optimization for tap_to_read feature.", -"type": "boolean" -}, -"thirdPartyGuiSupported": { -"description": "Whether the device supports the 3p GUI framework, which allows third parties to enter the conversation with the user, showing their logo next to their chat bubbles, etc. go/3p-phone", -"type": "boolean" -}, -"transactionFeaturesSupport": { -"$ref": "AssistantApiTransactionFeaturesSupport", -"description": "Transactions features, supported by the client. Transactions feature may includes how Transactions team want to populate additional information from the device to the server." -}, -"transactionsVersion": { -"description": "The version of transactions which the client supports.", -"enum": [ -"NO_TRANSACTIONS", -"TRANSACTIONS_INITIAL_LAUNCH", -"TRANSACTIONS_V2", -"TRANSACTIONS_V3" -], -"enumDeprecated": [ -false, -false, -true, -false -], -"enumDescriptions": [ -"Transactions are not supported on this device.", -"This device supports the client features present at the initial consumer launch of transactions.", -"This device supports the transaction version v2 client features, includes transactions clientops. Deprecated given there're some iOS version populate this enum but don't support clientops.", -"This device supports the transaction version v3 client features, includes transactions clientops." -], -"type": "string" -}, -"usesSeparateFullViewer": { -"description": "If set, it indicates that the client can open a separate HTML browser/webviewer (full viewer) to display certain visual results. These visual results usually require more memory to render (e.g. high resolution photos). Compared to the regular viewer that display all other Assistant result, the full viewer does not have memory limit. The field is copied from the device model. See http://google3/assistant/devices_platform/proto/device_model_capabilities.proto?l=225&rcl=312576471 Also see go/webassistant-full-card-viewer.", -"type": "boolean" -}, -"viewReminderHubPageNotSupported": { -"description": "Whether the client supports viewing of reminder hub page or not. Default is supported. Set to true to disable returning reminder hub page url in reminder responses.", -"type": "boolean" -}, -"warmWelcomeTutorialSupported": { -"description": "Whether the client supports the programmatic warm welcome tutorial. Design doc: go/opal-pww-design.", -"type": "boolean" -}, -"webBrowserSupported": { -"description": "Whether the supports opening a URL in a web browser. For example, we want to disable this for clients like Chirp.", -"type": "boolean" -}, -"zoomSupported": { -"description": "Whether the client supports joining a Zoom meeting.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSupportedProtocolVersion": { -"description": "Contains versions of protocol buffer messages. This is the equivalence of a proto3 map, keyed by a protocol buffer message\u2019s name, and the value is the version of this message. e.g. {\"assistant.api.core_types.Timer\": 2, \"assistant.api.core_types.Alarm\": 1} See go/assistant-protocol-versioning for more details.", -"id": "AssistantApiSupportedProtocolVersion", -"properties": { -"messageVersion": { -"items": { -"$ref": "AssistantApiSupportedProtocolVersionMessageVersionPair" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSupportedProtocolVersionMessageVersionPair": { -"id": "AssistantApiSupportedProtocolVersionMessageVersionPair", -"properties": { -"messageName": { -"description": "The full path of a message which should start from the package name. e.g. \"assistant.api.core_types.Timer\".", -"type": "string" -}, -"version": { -"description": "The supported version number.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiSupportedProviderTypes": { -"description": "Types of providers that are supported by the client. For example, ChromeOS support both web app and Android app (for eligible devices).", -"id": "AssistantApiSupportedProviderTypes", -"properties": { -"supportedTypes": { -"items": { -"enum": [ -"PROVIDER_TYPE_UNSPECIFIED", -"ANDROID_APP", -"CAST_APP", -"CLOUD_PROVIDER", -"SIP_PROVIDER", -"IOS_APP", -"INTERNAL_PROVIDER", -"WEB_PROVIDER", -"KAIOS_APP", -"HOME_APP", -"CHROMEOS_APP" -], -"enumDescriptions": [ -"", -"For AndroidAppInfo", -"For CastAppInfo", -"For CloudProviderInfo", -"For SipProviderInfo", -"For IosAppInfo", -"For InternalProviderInfo", -"For WebProviderInfo", -"For KaiOsAppInfo", -"For HomeAppInfo", -"For ChromeOsAppInfo" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiSurfaceProperties": { -"description": "Properties of the surface that are not hardware related or feature specific.", -"id": "AssistantApiSurfaceProperties", -"properties": { -"executionCapabilities": { -"$ref": "AssistantApiSurfacePropertiesExecutionCapabilities" -}, -"prefersGlanceableUi": { -"deprecated": true, -"description": "DEPRECATED: This field is used only by the Hummingbird effort, which has culminated and is being cleaned up (b/325065733). Indicates if the surface prefers glanceable UI when rendering Assistant response. (see go/mobile-dr-hbird-glanceable-comms-cuj for more context).", -"type": "boolean" -}, -"responseDisplayFormat": { -"description": "If this field is unset, the response format is unknown", -"enum": [ -"SINGLE_ITEM", -"MULTIPLE_ITEMS", -"FULL_HISTORY" -], -"enumDescriptions": [ -"The surface can only show a single item (card, display text, etc) of the response at a time. For example, if both a display text and a card are sent in a response, the surface may only show the card to the user.", -"The surface can show a simple multi item response (e.g. both a display text and a card), but can't show response items from a previous response.", -"The surface can show complex multi item responses (e.g. display text, card, display text) as well as a history of previous responses." -], -"type": "string" -}, -"supportsMultiResponse": { -"description": "If true, the client supports receiving multiple responses. See go/multiple-response-in-media-use-cases for more details.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSurfacePropertiesExecutionCapabilities": { -"description": "Describes the capabilities that are related to the execution of client ops on the device.", -"id": "AssistantApiSurfacePropertiesExecutionCapabilities", -"properties": { -"supportsClientOpPreloading": { -"description": "Completes the preloading ie., sets up the stage for the execution of client ops on the device while the previous conv delta is being executed. Refer to go/preload-convdelta for more information.", -"type": "boolean" -}, -"supportsNonFinalizedResponses": { -"description": "A value of true indicates that the client supports streaming of non-finalized responses by use of ClientExecutionParams.response_stream_id. and ClientExecutionParams.to_be_finalized.", -"type": "boolean" -}, -"supportsNonMaterializedInteractions": { -"description": "If true, the client supports receiving non-materialized interactions (go/as-streaming-protocol-nm).", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiSystemNotificationRestrictions": { -"description": "Restrictions related to system-level notifications.", -"id": "AssistantApiSystemNotificationRestrictions", -"properties": { -"categoryState": { -"items": { -"$ref": "AssistantApiSystemNotificationRestrictionsNotificationCategoryState" -}, -"type": "array" -}, -"channelState": { -"items": { -"$ref": "AssistantApiSystemNotificationRestrictionsNotificationChannelState" -}, -"type": "array" -}, -"notificationCapabilities": { -"description": "Specifies whether the surface is able to display notifications.", -"enum": [ -"NO_NOTIFICATION_CAPABILITY", -"NOTIFICATIONS_DISABLED", -"NOTIFICATIONS_ENABLED" -], -"enumDescriptions": [ -"The surface is not able to display notifications.", -"The notifications are disabled on the surface.", -"The notifications are enabled." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSystemNotificationRestrictionsNotificationCategoryState": { -"description": "Notification channels state for the new server driven channels.", -"id": "AssistantApiSystemNotificationRestrictionsNotificationCategoryState", -"properties": { -"categoryId": { -"description": "Notification channel type.", -"format": "int32", -"type": "integer" -}, -"disabled": { -"description": "Weather the notifications on this channel are disabled.", -"type": "boolean" -}, -"disabledReason": { -"enum": [ -"NONE", -"ASSISTANT_CATEGORY_SETTING", -"ASSISTANT_OVERALL_SETTING", -"OS_APP_DISABLED", -"OS_CHANNEL_GROUP_DISABLED", -"OS_CHANNEL_DISABLED" -], -"enumDescriptions": [ -"", -"The category was turned off in Assistant Notification Settings.", -"All notifications for Assistant was turned off in settings.", -"All notifications for the Assistant (GSA) app was turned off.", -"Notifications for the channel group was turned off.", -"Notifications for the channel was turned off." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiSystemNotificationRestrictionsNotificationChannelState": { -"description": "Notification channels state.", -"id": "AssistantApiSystemNotificationRestrictionsNotificationChannelState", -"properties": { -"channelType": { -"description": "Notification channel type.", -"enum": [ -"TYPE_UNKNOWN", -"TYPE_OPA_PROACTIVE", -"TYPE_OPA_HANDOFF", -"TYPE_OPA_MISC", -"TYPE_OPA_RECOMMENDATIONS", -"TYPE_OPA_PRODUCT_UPDATES", -"TYPE_OPA_THIRD_PARTY" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"enabled": { -"description": "Whether the notifications on this channel are enabled.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiThirdPartyActionConfig": { -"description": "3P Action Metadata. Next ID: 3", -"id": "AssistantApiThirdPartyActionConfig", -"properties": { -"deviceActionCapability": { -"$ref": "AssistantDevicesPlatformProtoDeviceActionCapability", -"description": "DeviceActionCapability from DeviceModelPackage." -}, -"projectConfigs": { -"description": "List of Action project capabilities.", -"items": { -"$ref": "AssistantApiThirdPartyActionConfigProjectConfig" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantApiThirdPartyActionConfigProjectConfig": { -"description": "Metadata for ActionPackage. Device Actions are disabled by default unless explicitly enabled for the device here, see go/3p-device-actions-v2-design.", -"id": "AssistantApiThirdPartyActionConfigProjectConfig", -"properties": { -"projectId": { -"description": "Google cloud project id for which the Action Package or Device Model is registered.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiThirdPartyCapabilities": { -"id": "AssistantApiThirdPartyCapabilities", -"properties": { -"dataSharingRestrictions": { -"description": "Restrictions for the device to share any data with third party apps. See details in go/atv-dsc.", -"enum": [ -"DEFAULT_NO_DATA_SHARING_RESTRICTION", -"NO_SHARING_ALLOWED_WITH_THIRD_PARTY", -"SHARING_STATUS_NOT_SET", -"NO_SHARING_ALLOWED_WITH_THIRD_PARTY_FROM_OOBE" -], -"enumDescriptions": [ -"User has accepted to share the data with third party apps. The device has no data sharing restrictions.", -"User has declined to share the data with third party apps.", -"User hasn't made a choice on whether to share data with third party apps.", -"User has declined to share the data with third party apps during OOBE flow. With this state, the user should see the data sharing consent compact screen. See cetails in http://shortn/_JsLg0OeEJj." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiTimeOfDay": { -"description": "A civil time relative to a timezone.", -"id": "AssistantApiTimeOfDay", -"properties": { -"hour": { -"description": "The hour, in 0...23.", -"format": "int32", -"type": "integer" -}, -"minute": { -"description": "The minute, in 0...59.", -"format": "int32", -"type": "integer" -}, -"nanosecond": { -"description": "The fraction of seconds in nanoseconds, in 0..999999999.", -"format": "int32", -"type": "integer" -}, -"second": { -"description": "The second, in 0...59. Leap seconds are not supported.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantApiTimeZone": { -"description": "A time zone. Conceptually, a time zone is a set of rules associated with a location that describes a UTC offset and how it changes over time (e.g. Daylight Saving Time). The offset is used to compute the local date and time.", -"id": "AssistantApiTimeZone", -"properties": { -"ianaId": { -"description": "Time zone in IANA format, e.g. America/Los_Angeles for USA Pacific Time.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiTimestamp": { -"description": "An absolute point in time independent of timezone or calendar, based on the proto3 Timestamp (//google/protobuf/timestamp.proto). NOTE: THIS IS NO LONGER RECOMMENDED TO BE USED. It was originally defined separately from google.protobuf.Timestamp due to incompatibility with proto2 syntax. The incompatibility issues have since been resolved and so the Google-wide standard representation of google.protobuf.Timestamp should be preferred. In fact, google.protobuf.* protos in general are now recommended to be used in new APIs.", -"id": "AssistantApiTimestamp", -"properties": { -"nanos": { -"description": "Non-negative fractions of a second at nanosecond resolution.", -"format": "int32", -"type": "integer" -}, -"seconds": { -"description": "Seconds of UTC time since the Unix epoch.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantApiTransactionFeaturesSupport": { -"id": "AssistantApiTransactionFeaturesSupport", -"properties": { -"voicePinSuppressed": { -"description": "If true, setting this boolean means the device should not support voice PIN. For example, although the phone supports both voice and PIN pad, but we don't want users using voice. https://docs.google.com/document/d/1M8iJQX3GuxGZGeidS8Gl4KJt3LuBWAIlolPlW10DkxU/edit#heading=h.8ovvdd3i2thv", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantApiVolumeProperties": { -"id": "AssistantApiVolumeProperties", -"properties": { -"defaultVolumePercentage": { -"description": "The volume percentages for spelled out values.", -"format": "int32", -"type": "integer" -}, -"highVolumePercentage": { -"format": "int32", -"type": "integer" -}, -"levelStepSize": { -"description": "The number of levels to move for a step.", -"format": "double", -"type": "number" -}, -"lowVolumePercentage": { -"format": "int32", -"type": "integer" -}, -"maximumVolumeLevel": { -"description": "The max number of volume levels the client supports.", -"format": "int32", -"type": "integer" -}, -"mediumVolumePercentage": { -"format": "int32", -"type": "integer" -}, -"veryHighVolumePercentage": { -"format": "int32", -"type": "integer" -}, -"veryLowVolumePercentage": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantDeviceTargetingDeviceTargetingError": { -"description": "The information associated with an error while selecting the target device. Next ID: 2", -"id": "AssistantDeviceTargetingDeviceTargetingError", -"properties": { -"type": { -"enum": [ -"UNKNOWN_ERROR", -"NO_DEVICE_IN_SAME_STRUCTURE", -"NO_DEVICE_IN_SAME_NETWORK_OR_LOCATION", -"NO_DEVICE_IN_SAME_STRUCTURE_OR_NETWORK_OR_LOCATION", -"NO_DEVICE_SATISFIES_SAME_STRUCTURE_OR_UNKNOWN_IF_OWNED", -"NO_DEVICE_SATISFIES_CAPABILITIES_REQUIREMENT", -"NO_DEVICE_MATCHED_DEVICE_ANNOTATION", -"MULTI_TARGET_DEVICE_NOT_SUPPORTED", -"NO_DEVICE_AFTER_LOCAL_DEVICE_EXCLUDED", -"UNABLE_TO_TARGET_ONLY_LOCAL_DEVICE", -"NO_DEVICE_MATCHED_REQUIRED_TRAITS", -"NO_DEVICE_MATCHED_REQUIRED_ATTRIBUTES", -"NO_DEVICE_MATCHED_REQUIRED_DEVICE_TYPE", -"NO_DEVICE_IN_SAME_OR_MENTIONED_STRUCTURE", -"NO_DEVICE_SATISFIES_PLAYBACK_REQUIREMENT", -"STRUCT_DISAMBIG_NOT_SUPPORTED", -"ROOM_DISAMBIG_NOT_SUPPORTED", -"UNRECOGNIZED_DEVICE_NAME", -"NO_LINKED_REMOTE_DEVICES", -"NO_LINKED_REMOTE_VIDEO_DEVICES", -"NO_SAFE_DEVICE_WITH_SCREEN", -"ALL_QUALIFIED_DEVICES_OFFLINE", -"CROSS_STRUCTURE_TARGETING_DISALLOWED", -"NO_DEVICE_MEETS_PROVIDER_REQUIREMENT", -"MISSING_LOCAL_DEVICE_SETTING", -"NO_DEVICE_HAS_REQUIRED_APP", -"HYBRID_DEVICE_NOT_QUALIFIED", -"NO_NEARBY_DEVICES", -"NO_MATCHING_LAST_USED_DEVICE" -], -"enumDescriptions": [ -"", -"Error about when there's no qualified devices in the same home graph structure as the local device.", -"Error about when there's no qualified devices in the same network or location as the local device.", -"Error about when there's no qualified devices in the same home graph structure or network or location as the local device.", -"Error about when no qualified device was found that matched the following conditions: 1) Both local device and target device are in the same home graph structure. 2) The distance to target device is unknown (i.e. either local device or target device is homeless) and target device is owned by the user. 3) If local device and target device are in different structures, only allowed when the query explicitly mentioned device name.", -"Error about when no qualified device satisfies the device capability requirement.", -"Error of no qualified devices matched requirements from device annotation.", -"Error of not supporting targeting multiple devices.", -"Error of no device left after local device is excluded within inclusiveness filter.", -"Error of unable to target only local device within inclusiveness filter.", -"Error of no qualified devices matched the required smart home traits.", -"Error of no qualified devices matched the required Smart Home attributes.", -"Error of no qualified devices matched the required smart home device types.", -"Error of no qualified devices in the mentioned structure or in the same structure as the local device.", -"Error of no device satisfying playback requirements.", -"Error when structure disambiguation is not supported in the config. When config is supported (both structure_disambiguation_supported and multi_target_supported are set to true), the library would return structures for disambiguation.", -"Error when room disambiguation is not supported in the config. When config is supported (both room_disambiguation_supported and multi_target_supported are set to true), the library would return rooms for disambiguation.", -"Error when the device mentioned in the query is not recognized. In other words, we can't find a device which matched the device mentioned in the query.", -"Used when user requests to target on a remote device and there are no remote devices linked.", -"Used when user requests to target on a remote video device and there are no remote video devices linked.", -"Error when there is no safe screen device. For example, requesting screen-requiring content from Auto surface.", -"Error when all qualified devices are offline.", -"When user tries to target a device which is in a different structure and is not allowed. For example, if user doesn't own the structure. See go/on-multi-user-access-transitivity for more details.", -"Used when user requests a device with certain provider but no device can satisfies provider requirement.", -"AvailableDevice device setting of local device is missing.", -"Required app is not installed", -"All remote hybrid devices fail the propertries requirement specific (lock/unlock, etc) for hybrid devices (such as Tangor).", -"No nearby hearing devices", -"No matching device for the LastUsedDevice requirement found." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoAlarmCapability": { -"description": "Capability with regard to support of alarms by the client.", -"id": "AssistantDevicesPlatformProtoAlarmCapability", -"properties": { -"maxSupportedAlarms": { -"description": "Maximum number of alarms that can be created on the client. Zero or unset indicates no maximum limit.", -"format": "int32", -"type": "integer" -}, -"restrictAlarmsToNextDay": { -"description": "Whether the client restricts alarms to ring within the next 24 hours.", -"type": "boolean" -}, -"supportsGenericMutations": { -"description": "Whether the client supports mutation of any alarm attribute such as time_pattern, recurrence_pattern, etc through the MUTATE action. If this is false, MUTATE may only be used to disable or dismiss an alarm. This field is true by default for backwards-compatibility; clients which don't support mutation outside of Disable/DismissAlarm should explicitly set this to false.", -"type": "boolean" -}, -"supportsStopAction": { -"description": "Whether the client supports the STOP alarm action. If this is false, stop actions will be represented by the MUTATE action, and the device may need to check alarm state to determine if there's a firing alarm that needs to be dismissed.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoArgSpec": { -"id": "AssistantDevicesPlatformProtoArgSpec", -"properties": { -"intValueSpec": { -"$ref": "AssistantDevicesPlatformProtoIntValueSpec" -}, -"optionValueSpec": { -"$ref": "AssistantDevicesPlatformProtoOptionValueSpec" -}, -"type": { -"enum": [ -"TYPE_UNSPECIFIED", -"BOOL", -"INTEGER", -"OPTION" -], -"enumDescriptions": [ -"A type must be specified.", -"", -"The arg value type is integer. The value can be any integer unless int_value_spec is provided below.", -"The arg value is from list of options. The valid options are specified from optional_value_spec below." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoCallCallCapability": { -"id": "AssistantDevicesPlatformProtoCallCallCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoClientReconnectCapability": { -"id": "AssistantDevicesPlatformProtoClientReconnectCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoCloudCapability": { -"description": "States the cloud capabilities of the device, i.e. the endpoint(s) to use for cloud execution of Actions or Registration.", -"id": "AssistantDevicesPlatformProtoCloudCapability", -"properties": { -"cloudEndpoints": { -"description": "The list of CloudEndpoints supported by this Device Model. Note that each should have a unique |name|. If any cloud endpoints are provided here, then the first one in the list will be used by default for all Cloud Execution. An Intent may override the default by providing an |execution_config|.", -"items": { -"$ref": "AssistantDevicesPlatformProtoCloudEndpoint" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoCloudEndpoint": { -"description": "A cloud endpoints associates with this device, it can be used for query parsing, or cloud execution.", -"id": "AssistantDevicesPlatformProtoCloudEndpoint", -"properties": { -"name": { -"description": "The name for this cloud endpoint. It's unique per Locale. This is not an API resource name. Ex: sample-nlu-endpoint", -"type": "string" -}, -"scopes": { -"description": "The list of scopes to be provided in the OAuth2 token. They must be a subset of the scopes registered in the Account Linking flow, or the request will fail. If the client itself provides the token, then this field is ignored.", -"items": { -"type": "string" -}, -"type": "array" -}, -"url": { -"description": "The URL for this endpoint, it must start with https.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoCoreDismissAssistantCapability": { -"id": "AssistantDevicesPlatformProtoCoreDismissAssistantCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoDeviceActionCapability": { -"description": "This capability represents device action needed capability. Next ID: 10", -"id": "AssistantDevicesPlatformProtoDeviceActionCapability", -"properties": { -"assistantDeviceInRoomOptOut": { -"description": "Integrate your device with Google's Smart Home solution by putting your device into Google's Home Graph, a database that stores and provides contextual data about the home and its devices. For example, Home Graph can store the concept of a living room that contains multiple types of devices, when you say \"turn on the light\" to a device, if you have light in the living room, that light will be turned on.", -"type": "boolean" -}, -"builtInIntentMode": { -"description": "Specifies behavior for built-in device actions for this device model. If not specified, defaults to ENABLE_CONFIGURED_INTENTS_ONLY.", -"enum": [ -"INTENT_MODE_UNSPECIFIED", -"ENABLE_ALL", -"ENABLE_ALL_AUTO_ACTIONS", -"ENABLE_CONFIGURED_INTENTS_ONLY", -"PUNT_FOR_UNCONFIGURED_INTENTS" -], -"enumDescriptions": [ -"", -"All intents are enabled, regardless of whether the intent is registered in the `intents` field.", -"All Auto device actions are enabled, regardless of whether the intent is registered in the `intents` field. Only use it for Auto device type.", -"Only the intents registered in the `intents` field will be enabled.", -"Only the intents registered in the `intents` field will be enabled. Additionally, read an error message when the unconfigured intents are invoked." -], -"type": "string" -}, -"customIntentMode": { -"description": "Specifies which custom device actions should be enabled for this device model. This will only affect the behavior of intents corresponding to those from the Action Package of this project. If not specified, defaults to ENABLE_ALL.", -"enum": [ -"INTENT_MODE_UNSPECIFIED", -"ENABLE_ALL", -"ENABLE_ALL_AUTO_ACTIONS", -"ENABLE_CONFIGURED_INTENTS_ONLY", -"PUNT_FOR_UNCONFIGURED_INTENTS" -], -"enumDescriptions": [ -"", -"All intents are enabled, regardless of whether the intent is registered in the `intents` field.", -"All Auto device actions are enabled, regardless of whether the intent is registered in the `intents` field. Only use it for Auto device type.", -"Only the intents registered in the `intents` field will be enabled.", -"Only the intents registered in the `intents` field will be enabled. Additionally, read an error message when the unconfigured intents are invoked." -], -"type": "string" -}, -"defaultExecutionConfig": { -"$ref": "AssistantDevicesPlatformProtoExecutionConfig", -"description": "Default instructions for routing of any Intent. The data here could be overridden for specific Intents if provided directly in the 'intents' field." -}, -"inlinedActionCapability": { -"$ref": "AssistantDevicesPlatformProtoInlinedActionCapability", -"description": "Specifies capabilities for device actions that are inlined in the google.assistant.embedded.v1.DeviceAction message." -}, -"intents": { -"description": "Intent configurations. Built-in and custom intents may be configured here. Note that built-in intents will always behave with IntentMode of ENABLE_CONFIGURED_INTENTS_ONLY. The IntentMode for custom intents can be changed using the custom_intent_mode. To configure an intent, list it here with its intent name, e.g. \"MY_CUSTOM_INTENT\", \"google.assistant.car.model.capabilities.AC_TEMPERATURE\".", -"items": { -"$ref": "AssistantDevicesPlatformProtoIntent" -}, -"type": "array" -}, -"providedData": { -"description": "Provided data which augments the device action capabilities. Some built-in intents may require additional configuration to be provided. One example could be the list of channels available for the `action.intent.SelectChannel` intent.", -"items": { -"$ref": "AssistantDevicesPlatformProtoProvidedData" -}, -"type": "array" -}, -"traits": { -"description": "List of built-in traits such as \"action.devices.traits.OnOff\" See java/com/google/home/graph/service/config/protoconf.pi As of Nov. 2017, we also support custom traits for EAP users. We'll eventually disable custom traits once custom actions are in place.", -"items": { -"type": "string" -}, -"type": "array" -}, -"understandingConfig": { -"$ref": "AssistantDevicesPlatformProtoUnderstandingConfig", -"description": "Specifies the format how Google routes queries to 3P cloud. By default, this field is unset, all partners should get shallow NLU. This is needed *ONLY* for specific partners for strong business reasons." -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoDeviceModifySettingCapability": { -"id": "AssistantDevicesPlatformProtoDeviceModifySettingCapability", -"properties": { -"clientOpProperty": { -"$ref": "AssistantApiClientOpPropertiesDeviceModifySettingClientOpProperty" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoDeviceTakePhotoCapability": { -"id": "AssistantDevicesPlatformProtoDeviceTakePhotoCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoExecutionConfig": { -"description": "Specifies the routing capabilities of the Intent. It will apply only when the Intent is triggered. Next ID: 8", -"id": "AssistantDevicesPlatformProtoExecutionConfig", -"properties": { -"cloudEndpointName": { -"description": "Instructions for performing a cloud execution request for the Intent when the execution_type is set to CLOUD. If non-empty, then the device execution would be routed to the CloudEndpoint specified by this name. The Account Linking exchange may be performed to fetch the OAuth access token, and the access token will be included in the HTTP header.", -"type": "string" -}, -"cloudIntentTranslationDisabled": { -"description": "If this field is set, then the Syndication cloud call will be disabled for this intent. Note this only applies if any Syndication cloud endpoint is associated with the Device Model, otherwise setting this field does nothing. By default, all Intents that are enabled and supported by the Syndication API will be routed through the Syndication cloud endpoint if it's provided.", -"type": "boolean" -}, -"intentCommandFormat": { -"description": "Specifies the intent command format for this Action. For example, in order to launch an Android intent instead of receiving the device action payload on the client, then this field can be set with \"intent:/#Intent;...;my_extra={$.params.channels[0].channelCode};end\" The parameter \"{$.params.channels[0].channelCode}\" is in JSON path format, and will be replaced with the content from the original device action payload. Thus, with # JSON \"execution\": [ { \"command\": \"action.devices.commands.SelectChannel\", \"params\": { \"channels\": [{ \"channelName\": \"exampleChannel\", \"channelCode\": \"1-1\" }] } } ] as the original action result, then the final result would look like \"intent:/#Intent;...;my_extra=\\\"1-1\\\";end\" ", -"type": "string" -}, -"localDisabled": { -"description": "If this field is set, then local execution capability is disabled for all matching intents.", -"type": "boolean" -}, -"localExecutionType": { -"description": "Specifies how to execute this Action when it is invoked locally (from the same device.)", -"enum": [ -"DEFAULT", -"CLOUD" -], -"enumDescriptions": [ -"Use the default mechanism (route requests directly to the device via the regular channel.)", -"Route all execution requests to cloud." -], -"type": "string" -}, -"remoteDisabled": { -"description": "If this field is set, then remote execution capability is disabled for all matching intents.", -"type": "boolean" -}, -"remoteExecutionType": { -"description": "Specifies how to execute this Action when it is invoked remotely (from a different device.)", -"enum": [ -"DEFAULT", -"CLOUD" -], -"enumDescriptions": [ -"Use the default mechanism (route requests directly to the device via the regular channel.)", -"Route all execution requests to cloud." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoExecutionWaitCapability": { -"id": "AssistantDevicesPlatformProtoExecutionWaitCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoGetHealthObservationCapability": { -"id": "AssistantDevicesPlatformProtoGetHealthObservationCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoInlinedActionCapability": { -"id": "AssistantDevicesPlatformProtoInlinedActionCapability", -"properties": { -"alarm": { -"$ref": "AssistantDevicesPlatformProtoAlarmCapability", -"description": "Specifies capabilities for handling on-device alarms. The presence of this field, even if empty, implies that the device supports alarms." -}, -"responseLimits": { -"$ref": "AssistantDevicesPlatformProtoResponseLimits", -"description": "Specifies the size limits on responses. If message is not defined then no limits exist." -}, -"supportSdkExecute": { -"description": "Whether this device model package support sdk.EXECUTE client_op (a.k.a action.devices.EXECUTE intent), which will be filled into google.assistant.embedded.v1.DeviceAction.device_request_json. It is default to true (and not public), since all 3P will depends on the device_request_json. Only internal projects like Edoras will set this to false.", -"type": "boolean" -}, -"supportedDeviceOps": { -"$ref": "AssistantDevicesPlatformProtoSupportedDeviceOps", -"description": "Specifies capabilities for handling assistant.embedded.v1.DeviceOp." -}, -"supportsMultiResponse": { -"description": "Specifies whether server can send a series of responses for a single query. Example: Routines where multiple actions to be executed one after another.", -"type": "boolean" -}, -"timer": { -"$ref": "AssistantDevicesPlatformProtoTimerCapability", -"description": "Specifies capabilities for handling on-device timers. The presence of this field, even if empty, implies that the device supports timers." -}, -"ttsOutput": { -"$ref": "AssistantDevicesPlatformProtoTtsOutputCapability", -"description": "Specifies whether client supports receiving `DeviceAction.tts_output`." -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoIntValueSpec": { -"id": "AssistantDevicesPlatformProtoIntValueSpec", -"properties": { -"maxValue": { -"format": "int64", -"type": "string" -}, -"minValue": { -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoIntent": { -"description": "An intent configures the behavior of a device action for this device. Next ID: 7", -"id": "AssistantDevicesPlatformProtoIntent", -"properties": { -"argSpecs": { -"additionalProperties": { -"$ref": "AssistantDevicesPlatformProtoArgSpec" -}, -"description": "List of arguments associated this intent. Each of which depends a template for the expected argument.", -"type": "object" -}, -"executionConfig": { -"$ref": "AssistantDevicesPlatformProtoExecutionConfig", -"description": "Instructions for the routing of this Intent." -}, -"name": { -"description": "The name of the intent.", -"type": "string" -}, -"providedDataNames": { -"description": "List of provided data names used by this intent. Note that some built-in intents will not function properly without provided data, such as `action.intent.SwitchChannel` or `action.intent.AppSelector`.", -"items": { -"type": "string" -}, -"type": "array" -}, -"securityConfig": { -"$ref": "AssistantDevicesPlatformProtoSecurityConfig", -"description": "Security configuration for this Intent." -}, -"triggerConditions": { -"description": "The conditions which must be met by the device before executing this Intent. More than one can be provided, in which case the conditions operate with the \"AND\" operator, i.e. the first condition which is failed will be used to restrict the execution of this Intent.", -"items": { -"$ref": "AssistantDevicesPlatformProtoTriggerCondition" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoInternalCapability": { -"description": "Capabilities that may only be set internally. Only internal callers (i.e. Googlers or Google owned projects) will be able to set these, thanks to go/assistant-device-model-package-ownership.", -"id": "AssistantDevicesPlatformProtoInternalCapability", -"properties": { -"allowedAssistantSdkAuthProjectIds": { -"description": "When using the Assistant SDK (Embedded Assistant API), the project id used to authenticate the gRPC request is checked and must match against the project id of the Device Model. We will additionally allow the project ids listed in the device model here to be let through. See https://docs.google.com/document/d/1InAczpQJs6LCH1l--2yy67JM9hsBJbiL57fusnL3A8A", -"items": { -"type": "string" -}, -"type": "array" -}, -"appCapabilitiesFromDeviceInstallApps": { -"description": "Load the assistant.api.AppCapabilities from DEVICE_INSTALLED_APP footprint corpus. See go/edoras-geller.", -"type": "boolean" -}, -"cloudDeviceActionEndpoint": { -"$ref": "AssistantDevicesPlatformProtoCloudEndpoint", -"description": "Uses this endpoint for device action fulfillment when there's no endpoint in syndication_metadata. 1p surfaces/devices such as telephone can enable this for its cloud action fulfillment without enabling the whole syndication experience." -}, -"deviceActionsEligibleForHighConfidence": { -"description": "Signals that the model will have updated ranking behavior as described in https://docs.google.com/document/d/1SN_AgadRr_cdIrFe-qgRbIX2J1sOE7lcRXAvM1GUPoU.", -"type": "boolean" -}, -"forceSignIn": { -"description": "Make Google sign-in mandatory for using Google Assistant on the device. (This bit is initially added for Samsung TV.)", -"type": "boolean" -}, -"forceThirdPartyDeviceIdForDeviceLookup": { -"description": "When looking up device (for example for disclosure consent check), then always use the third party device id for lookup instead of any other device id which would normally have higher precedence, such as cast_device_id.", -"type": "boolean" -}, -"forceTransactionsAuthentication": { -"description": "Adds \"transactions.AUTHENTICATION\" for car automation probers. Since the probers run as Assistant SDK requests, voice match always fails for car automation requests, so we add this client op as a hack to allow probers to appear as personal devices and bypass voice match. See b/137221645.", -"type": "boolean" -}, -"hasCustomSearchResultsRendering": { -"description": "Signals that this device can \"render\" raw search results even with no screen (e.g., using a text reader). If this is true, fallback search results can be returned as a custom device action in a SearchResults message. http://google3/assistant/embedded/proto_translation/utils/proto/search_results.proto", -"type": "boolean" -}, -"overrideProjectIdForDeviceLookup": { -"description": "When looking up device (for example for disclosure consent check), use this project id as part of the primary key for the device lookup (i.e. instead of the device_config.agent_id.) The precedence is as follows: 1) this field, if set for the device's device model 2) device_config.agent_id 3) device_model.project_id", -"type": "string" -}, -"stadiaAssistantConfig": { -"$ref": "AssistantDevicesPlatformProtoInternalCapabilityStadiaAssistantConfig" -}, -"telephoneAttribution": { -"description": "Telephone server is able to send attribution to user feature phone. See go/telephone-attribution.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoInternalCapabilityStadiaAssistantConfig": { -"description": "The StadiaAssistantConfig. This field should only be set if the device model is a Stadia.", -"id": "AssistantDevicesPlatformProtoInternalCapabilityStadiaAssistantConfig", -"properties": { -"stadiaPlatform": { -"enum": [ -"UNSPECIFIED", -"CHROMECAST", -"WEB_BROWSER" -], -"enumDescriptions": [ -"Platform unspecified.", -"Platform is Chromecast.", -"Platform is web browser." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaNextCapability": { -"id": "AssistantDevicesPlatformProtoMediaNextCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaPauseCapability": { -"id": "AssistantDevicesPlatformProtoMediaPauseCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaPlayMediaCapability": { -"id": "AssistantDevicesPlatformProtoMediaPlayMediaCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaPreviousCapability": { -"id": "AssistantDevicesPlatformProtoMediaPreviousCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaResumeCapability": { -"id": "AssistantDevicesPlatformProtoMediaResumeCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaShowControlsCapability": { -"id": "AssistantDevicesPlatformProtoMediaShowControlsCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoMediaStopCapability": { -"id": "AssistantDevicesPlatformProtoMediaStopCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoOptionValueSpec": { -"id": "AssistantDevicesPlatformProtoOptionValueSpec", -"properties": { -"values": { -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoProvidedData": { -"description": "Provided data which augments the device action capabilities. Some built-in intents may require additional configuration to be provided. One example could be the list of channels available for the `action.intent.SelectChannel` intent.", -"id": "AssistantDevicesPlatformProtoProvidedData", -"properties": { -"name": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoProviderFulfillCapability": { -"id": "AssistantDevicesPlatformProtoProviderFulfillCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoProviderOpenCapability": { -"id": "AssistantDevicesPlatformProtoProviderOpenCapability", -"properties": { -"clientOpProperty": { -"$ref": "AssistantApiClientOpPropertiesProviderOpenClientOpProperty" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoResponseLimits": { -"description": "Specifies the size limits on responses receivable by the client.", -"id": "AssistantDevicesPlatformProtoResponseLimits", -"properties": { -"maxAssistResponseSizeBytes": { -"description": "Max size in bytes of the total serialized AssistResponse receivable by the client. If response exceeds this max, response may be modified by the server.", -"format": "int32", -"type": "integer" -}, -"maxDisplayLinesBytes": { -"description": "Maximum size in bytes (not characters) of text the display can handle (which may be different from how much the display can show at a time due to scrolling).", -"format": "int32", -"type": "integer" -}, -"maxSuggestionChipBytes": { -"description": "Maximum size in bytes (not characters) for each suggestion chip.", -"format": "int32", -"type": "integer" -}, -"maxSuggestionChips": { -"description": "Maximum number of suggestion chips the device can handle to display.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoSecurityConfig": { -"description": "Encapsulates security configuration for a single intent of a device model.", -"id": "AssistantDevicesPlatformProtoSecurityConfig", -"properties": { -"authMechanismForRemoteRequests": { -"description": "Specifies auth mechanism to be used upon remote request for device action.", -"enum": [ -"NONE", -"ENABLED", -"FINGERPRINT_OR_PASSWORD", -"PIN" -], -"enumDeprecated": [ -false, -false, -true, -true -], -"enumDescriptions": [ -"Indicates that auth is not required.", -"Indicates that auth is required. Auth mechanism used is determined automatically at query time, based on various factors such as device type, user settings, etc.", -"Use fingerprint, with fallback to Google account password if fingerprint capabilities are not available on personal device. Note: explicit configuration of specific auth mechanism is being deprecated.", -"Prompt for pin, sent to cloud for validation. Note: explicit configuration of specific auth mechanism is being deprecated." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoSendChatMessageCapability": { -"id": "AssistantDevicesPlatformProtoSendChatMessageCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoSupportedDeviceOps": { -"description": "This message will specify supports for fields in |assistant.embedded.v1.DeviceOp|, for a device model package. See go/easi-client-op2 for more info. Next ID: 19", -"id": "AssistantDevicesPlatformProtoSupportedDeviceOps", -"properties": { -"callCall": { -"$ref": "AssistantDevicesPlatformProtoCallCallCapability", -"description": "|call_call| specifies the support for the call.CALL clientop, and the corresponding call_call field in assistant.embedded.v1.DeviceOp." -}, -"clientReconnect": { -"$ref": "AssistantDevicesPlatformProtoClientReconnectCapability", -"description": "|client_reconnect| indicates support for client.RECONNECT using assistant.embedded.v1.DeviceOp. There is an alternative API/capability for client.RECONNECT specified in RoutineCapability.supports_reconnect. Client should choose between this and RoutineCapability but not both." -}, -"coreDismissAssistant": { -"$ref": "AssistantDevicesPlatformProtoCoreDismissAssistantCapability", -"description": "[core_dismiss_assistant] specifies the support for core.DISMISS_ASSISTANT client_op and the corresponding core_dismiss_assistant field in assistant.embedded.v1.DeviceOp." -}, -"deviceModifySetting": { -"$ref": "AssistantDevicesPlatformProtoDeviceModifySettingCapability", -"description": "|device_modify_setting| specifies the support for device.MODIFY_SETTING client_op, and the corresponding device_modify_setting field in assistant.embedded.v1.DeviceOp." -}, -"deviceTakePhoto": { -"$ref": "AssistantDevicesPlatformProtoDeviceTakePhotoCapability", -"description": "[device_take_photo] specifies the support for the device.TAKE_PHOTO clientop, and the corresponding device_take_photo field in assistant.embedded.v1.DeviceOp." -}, -"executionWait": { -"$ref": "AssistantDevicesPlatformProtoExecutionWaitCapability", -"description": "|execution_wait| specifies the support for execution.WAIT client_op, and the corresponding execution_wait field in assistant.embedded.v1.DeviceOp." -}, -"getHealthObservation": { -"$ref": "AssistantDevicesPlatformProtoGetHealthObservationCapability", -"description": "|get_health_observation| specifies the support for get_health_observation client_op, and the corresponding get_health_observation field in assistant.embedded.v1.DeviceOp." -}, -"mediaNext": { -"$ref": "AssistantDevicesPlatformProtoMediaNextCapability" -}, -"mediaPause": { -"$ref": "AssistantDevicesPlatformProtoMediaPauseCapability" -}, -"mediaPlayMedia": { -"$ref": "AssistantDevicesPlatformProtoMediaPlayMediaCapability" -}, -"mediaPrevious": { -"$ref": "AssistantDevicesPlatformProtoMediaPreviousCapability" -}, -"mediaResume": { -"$ref": "AssistantDevicesPlatformProtoMediaResumeCapability" -}, -"mediaShowControls": { -"$ref": "AssistantDevicesPlatformProtoMediaShowControlsCapability" -}, -"mediaStop": { -"$ref": "AssistantDevicesPlatformProtoMediaStopCapability" -}, -"providerFulfill": { -"$ref": "AssistantDevicesPlatformProtoProviderFulfillCapability", -"description": "|provider_fulfill| specifies the support for provider.FULFILL client_op, and the corresponding provider_fulfill field in assistant.embedded.v1.DeviceOp." -}, -"providerOpen": { -"$ref": "AssistantDevicesPlatformProtoProviderOpenCapability", -"description": "|provider_open| specifies the support for provider.OPEN client_op, and the corresponding provider_open field in assistant.embedded.v1.DeviceOp." -}, -"sendChatMessage": { -"$ref": "AssistantDevicesPlatformProtoSendChatMessageCapability", -"description": "|send_chat_message| specifies the support for the chat_message.SEND clientop, and the corresponding send_chat_message field in assistant.embedded.v1.DeviceOp." -}, -"uiShowInterpreter": { -"$ref": "AssistantDevicesPlatformProtoUiShowInterpreterCapability", -"description": "|ui_show_interpreter| specifies the support for ui.SHOW_INTERPRETER client_op, and the corresponding ui_show_interpreter field in assistant.embedded.v1.DeviceOp." -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoTimerCapability": { -"description": "Capability with regard to support of timers by the client.", -"id": "AssistantDevicesPlatformProtoTimerCapability", -"properties": { -"maxSupportedExtendedTimerDuration": { -"$ref": "AssistantApiDuration", -"description": "Maximum extended timer duration supported by the client. The extended timer duration is the total start-to-finish duration after an AddTimeToTimer operation. E.g. if a user sets a timer for 30 minutes, and later adds 10 minutes, the extended duration is 40 minutes. Zero or unset indicates no maximum limit." -}, -"maxSupportedTimerDuration": { -"$ref": "AssistantApiDuration", -"description": "Maximum timer duration supported by the client. Zero or unset indicates no maximum limit." -}, -"maxSupportedTimers": { -"description": "Maximum number of timers that can be created on the client. Zero or unset indicates no maximum limit.", -"format": "int32", -"type": "integer" -}, -"supportsMutateAction": { -"description": "Whether the client supports the MUTATE timer action. If this is false, mutate operations may be handled by sending a pair of REMOVE and CREATE timer actions to replace the existing timer instead of mutating it.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoTriggerCondition": { -"description": "A TriggerCondition is described as a set of states which must be met by the device. It also includes instructions to the Assistant on what kind of response to execute when the condition is not met.", -"id": "AssistantDevicesPlatformProtoTriggerCondition", -"properties": { -"requiredStateValues": { -"additionalProperties": { -"$ref": "AssistantDevicesPlatformProtoArgSpec" -}, -"description": "The map of state keys along with their values which must be returned by the device, for example to start the dishwasher you may require states: {\"door\": \"CLOSED\", \"detergent_status\": \"READY\"}.", -"type": "object" -}, -"simpleTts": { -"description": "A simple TTS to play.", -"type": "string" -}, -"status": { -"description": "Refers to a defined ConditionalResult keyed by its status. It could be a built-in or custom ConditionalResult for this Intent. Note: the states provided by the device MUST contain all of the states required by the ConditionalResult.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoTtsOutputCapability": { -"description": "Capabilities with regard to support of outputting TTS.", -"id": "AssistantDevicesPlatformProtoTtsOutputCapability", -"properties": { -"supportsStructuredTts": { -"description": "Specifies whether client supports out-of-band TTS synthesis. When this is enabled, client could receive TTS request message in `SpeechOutputArgs.structured_tts` which it can then use to make request to S3 for synthesizing TTS audio.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantDevicesPlatformProtoUiShowInterpreterCapability": { -"id": "AssistantDevicesPlatformProtoUiShowInterpreterCapability", -"properties": {}, -"type": "object" -}, -"AssistantDevicesPlatformProtoUnderstandingConfig": { -"description": "Specifies the NLU level that Google performs, which determines the request format sent to the 3P cloud.", -"id": "AssistantDevicesPlatformProtoUnderstandingConfig", -"properties": { -"nluLevel": { -"description": "Specifies the NLU level for the intent.", -"enum": [ -"DEFAULT_SHALLOW_NLU", -"NO_NLU", -"DEEP_NLU" -], -"enumDescriptions": [ -"Default value: Shallow NLU, the cloud request contains a structured intent including the command (e.g. \"action.devices.commands.SelectChannel\"), the unparsed description (e.g. \"Tom Cruise action movies\"), the target device and the target application.", -"No NLU, the cloud request contains the transcribed query (raw query or rewritten query based on previous conversation).", -"Deep NLU, the cloud request specifies a structured description of the media the user wants to act upon. E.g.: for query \"Play an action movie with Tom Cruise\", the structured description would be: \"parsedMediaDescription\": { \"mediaType\": \"MOVIE\", \"genre\": { \"name\": \"action\", \"externalId\": { \"tmsId\": \"tmsIdForActionGenre\" }, }, \"actor\": { \"name\": \"Tom Cruise\", \"externalId\": { \"tmsId\": \"tmsRootIdForTomCruise\", }, }, }," -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsAllMediaStreamLog": { -"id": "AssistantLogsAllMediaStreamLog", -"properties": { -"streams": { -"description": "All active media streams while the user issues the query.", -"items": { -"$ref": "AssistantLogsMediaStreamLog" -}, -"type": "array" -}, -"targetStream": { -"$ref": "AssistantLogsMediaStreamLog", -"description": "The stream selected by stream transfer logic to be transferred to another device. It will be empty for other features. Target_stream is different from target_device since target_stream could have multiple devices." -} -}, -"type": "object" -}, -"AssistantLogsAmbiguousTargetDeviceLog": { -"description": "This message logs details on ambiguous device targeting logic. 1. It first takes a list of ambiguous devices 2. Then applies two filters: structure filter and playability filter. 3. If more than one device remains, it tiggers DeviceSelectionDialog to let the user pick one device.", -"id": "AssistantLogsAmbiguousTargetDeviceLog", -"properties": { -"ambiguousDeviceIndex": { -"description": "Device index of the initial ambiguous devices. The device index in this message is consistent with the device index in DeviceInfoLog. It would be used to track more detailed information of a device if needed.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"devicesAfterPromoters": { -"description": "DeviceInfo for devices after the filters and promoters. - When device targeting is only configured for single target, these are ambiguous devices that would have been the output of Lumos. Downstream may perform extra check before disambiguation dialog. For example, Media Initiation checks playability for devices. The output here is before the check. - When configured for multi-target, these are just the target devices. For privacy consideration, we may only log device id field inside.", -"items": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"type": "array" -}, -"finalTargetDevice": { -"$ref": "AssistantLogsDeviceInfoLog", -"description": "the final targeted device selected by playability filter or DeviceSelectionDialog" -}, -"playabilityFilteredDevicesIndex": { -"description": "Device index of the devices after playability filter", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"puntInfoLog": { -"description": "When there is no qualified devices after playability check, it would populate punt_info below. If all devices are filtered out for the same reason, there would only be one item. Otherwise, there will be multiple items.", -"items": { -"$ref": "AssistantLogsAmbiguousTargetDeviceLogPuntInfoLog" -}, -"type": "array" -}, -"structureFilteredDeviceIndex": { -"description": "Device index of the devices after structure filter", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsAmbiguousTargetDeviceLogPuntInfoLog": { -"description": "PuntInfoLog is used to log why devices get filtered out during media content playability check. It contains media excuse, provider mid and also index of devices filtered by them.", -"id": "AssistantLogsAmbiguousTargetDeviceLogPuntInfoLog", -"properties": { -"deviceIndex": { -"description": "Index of devices that have the same punt info during playability check, i.e. same media_excuse and provider_mid.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"mediaExcuse": { -"description": "Excuse for media action triggering. See: assistant/verticals/media/proto/media_excuse.proto.", -"format": "int32", -"type": "integer" -}, -"providerMid": { -"description": "Provider id that the excuse belongs to. This is the KG MID of the provider, e.g., \"/m/09jcvs\" for Youtube.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsAvailableDeviceAndCtfContextDiffLog": { -"description": "A list of per-device contexts and their respective diffs between AvailableDevice and CTF.", -"id": "AssistantLogsAvailableDeviceAndCtfContextDiffLog", -"properties": { -"localNetworkIdDiff": { -"description": "The diff between AvailableDevice and CTF for the local network ID of the device.", -"enum": [ -"CONTEXT_DIFF_TYPE_UNKNOWN", -"CONTEXT_PRESENT_AND_EQUAL", -"CONTEXT_PRESENT_AND_NOT_EQUAL", -"CONTEXT_PRESENT_ONLY_IN_AVAILABLE_DEVICE", -"CONTEXT_PRESENT_ONLY_IN_CTF", -"CONTEXT_MISSING_IN_BOTH" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"locationFeatureDiff": { -"description": "The diff between AvailableDevice and CTF for the feature proto of the location of the device.", -"enum": [ -"CONTEXT_DIFF_TYPE_UNKNOWN", -"CONTEXT_PRESENT_AND_EQUAL", -"CONTEXT_PRESENT_AND_NOT_EQUAL", -"CONTEXT_PRESENT_ONLY_IN_AVAILABLE_DEVICE", -"CONTEXT_PRESENT_ONLY_IN_CTF", -"CONTEXT_MISSING_IN_BOTH" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsAvailableDevicesLog": { -"description": "Contains the available devices in DeviceTargetingContext.", -"id": "AssistantLogsAvailableDevicesLog", -"properties": { -"availableDeviceInfoLog": { -"items": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationDeviceContactInfoLog": { -"description": "This is the log version of apps.people.oz.external.mergedpeopleapi.DeviceContactInfo Next ID: 2", -"id": "AssistantLogsCommunicationDeviceContactInfoLog", -"properties": { -"rawContactInfo": { -"description": "This list provides account information from the raw contact which is the source of this field.", -"items": { -"$ref": "AssistantLogsCommunicationRawDeviceContactInfoLog" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationFuzzyNgramMatchLog": { -"description": "This is the log version of fuzzy ngram match results that's used for generating the best fuzzy match. Next ID: 3", -"id": "AssistantLogsCommunicationFuzzyNgramMatchLog", -"properties": { -"relativeCost": { -"format": "float", -"type": "number" -}, -"type": { -"enum": [ -"NONE", -"EDIT_DISTANCE", -"GENIE_PLEXICON_DISTANCE", -"GENIE_ALTERNATIVE_RECOGNITION", -"JAPANESE_NAME_TRANSLITERATOR" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationGoogleAccountProvenance": { -"description": "From google3/quality/qrewrite/proto/account_provenance.proto;l=14 We need to copy this as the above proto has Enum field which is not compatible between proto2 and proto3. go/proto2-versus-proto3#enums", -"id": "AssistantLogsCommunicationGoogleAccountProvenance", -"properties": { -"email": { -"type": "string" -}, -"gaiaId": { -"format": "int64", -"type": "string" -}, -"isDasherAccount": { -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationPersonMetadataLog": { -"description": "This is the log version of apps.people.oz.external.mergedpeopleapi.PersonMetadata Next ID: 2", -"id": "AssistantLogsCommunicationPersonMetadataLog", -"properties": { -"deviceContactInfo": { -"items": { -"$ref": "AssistantLogsCommunicationDeviceContactInfoLog" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationPersonalContactDataLog": { -"description": "Contact meta data. Next ID: 31", -"id": "AssistantLogsCommunicationPersonalContactDataLog", -"properties": { -"accountProvenance": { -"$ref": "AssistantLogsCommunicationGoogleAccountProvenance", -"description": "Google AccountProvenance of the contact." -}, -"commonNameAliasConfidence": { -"description": "Populated if matched_name_type is GIVEN_NAME_ALIAS or FULL_NAME_ALIAS.", -"format": "float", -"type": "number" -}, -"conceptId": { -"description": "Concept id for relationships in English, e.g. \"Mother\" for all non-English locales. It's only populated for source = RELATIONSHIP.", -"type": "string" -}, -"deviceContactAttributes": { -"description": "Integer value corresponding to DeviceContactExtraMetadata.Attribute enum. http://google3/social/graph/wire/proto/merged_person.proto?l=933&rcl=320308954", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"emailIdCount": { -"description": "# emails stored for the contact.", -"format": "int32", -"type": "integer" -}, -"fuzzyNgramMatch": { -"description": "Populate only if ContactRecognitionAlternate.Source is 'FUZZY_CONTACT_MATCH'.", -"items": { -"$ref": "AssistantLogsCommunicationFuzzyNgramMatchLog" -}, -"type": "array" -}, -"gaiaId": { -"deprecated": true, -"description": "Contact owner's gaia id from cs/symbol::symbol:quality_qrewrite.PersonalContactData.shared_contact_owner_gaia_id. Only populated for is_shared = true and non sign-out mode and user is not the owner of the contact(shared contact from other user). This field is not being logged in P2.", -"format": "int64", -"type": "string" -}, -"isContactFromSecondaryAccount": { -"description": "Boolean value indicating whether selected contact is from different account than the logged in account.", -"type": "boolean" -}, -"isExactMatch": { -"description": "If the contact is matched exactly from the query name.", -"type": "boolean" -}, -"isShared": { -"deprecated": true, -"description": "If this is a shared contact. This is true in 2 cases: - User is calling their own contacts that have been marked as shared. - User is calling shared contacts from some other user's contact list. This field is not being logged in P2.", -"type": "boolean" -}, -"isTransliteratedMatch": { -"description": "Indicate the contact matches the transliterated query.", -"type": "boolean" -}, -"isVanityContact": { -"description": "True if the contact is a vanity contact(has email = user's email address).", -"type": "boolean" -}, -"isVisibleToGuestsRelationship": { -"description": "If the lookup was done using relationship which is visible to guests. This value will only be set if lookup was done using relationship. E.g. user has a guest relationship (doctor) -> (John) And user says \"call doctor\", then this value will be true.", -"type": "boolean" -}, -"matchedNameType": { -"description": "The matched name type of a contact candidate.", -"enum": [ -"UNSPECIFIED", -"GIVEN_NAME", -"FAMILY_NAME", -"FULL_NAME", -"NICKNAME", -"OTHER", -"INITIAL_WITH_FAMILY_NAME", -"EMAIL_USERNAME", -"VANITY_NICKNAME", -"GIVEN_NAME_ALIAS", -"FULL_NAME_ALIAS", -"HOMOPHONE_GIVEN_NAME", -"HOMOPHONE_FAMILY_NAME", -"HOMOPHONE_FULL_NAME", -"HOMOPHONE_NICKNAME", -"GIVEN_MIDDLE_NAME", -"GIVEN_NAME_WITH_FAMILY_NAME_INITIAL", -"EMAIL_OF_FAMILY_MEMBER" -], -"enumDescriptions": [ -"", -"Contact's given name, or first word in name, or last word if query is in CJK.", -"Contact's family name, or last word in name, or first word if query is in CJK.", -"Contact's display name, or given+family, or family+given if query is in CJK. Contact's middle name is optional.", -"Query span matches contact's nickname.", -"Not used yet.", -"Either or both of first/middle initials along with family name.", -"Email username, controlled by FocusNameParams.annotate_email_username.", -"Vanity nicknames like \"myself\", data is at //quality/qrewrite/servlets/internal/focus_name/data/vanity_nickname.txt", -"Contact's given name is a common name alias of query span, e.g. query [Bob] may match contact with given name \"Robert\".", -"Contact's given name is a common name alias of first word in query span, while remaining parts fully matched.", -"Homophone name match of given name, or last word. Only for CJK query.", -"Homophone name match of family name, or first word. Only for CJK query.", -"Homophone name match of full name, or family+given. Only for CJK query.", -"Homophone name match of nickname. Only for CJK query.", -"Contact's given name + middle name. Only for contacts with 3 words.", -"Contact's given name along with last name initial. Only support non-CJKT contact name.", -"This type indicates we find a match of the user's family member - as defined by families.google.com - by matching the email full address. Controlled by FocusNameParams.annotate_email_full_address." -], -"type": "string" -}, -"matchedRecognitionAlternateName": { -"description": "Alternate recognition term which was used to match this contact.", -"type": "string" -}, -"matchedStarlightLookupName": { -"description": "Ngram matched by starlight lookup for fuzzy matching in fulfillment. We need this to analyze how many contacts are returned by starlight lookup that is not matched by fuzzy matching. For example, \"Komal Dear\" is matched to \"Komal Dr\" by fuzzy match. When doing starlight lookup, \"Komal\" and \"Dr\" will be looked up separately. So \"Dr xxx\" will also be returned. We want to see how often this happens.", -"items": { -"type": "string" -}, -"type": "array" -}, -"metadata": { -"$ref": "AssistantLogsCommunicationPersonMetadataLog", -"description": "PersonMetadata of the selected contact." -}, -"nameMatchedContactIndex": { -"description": "The indices of the contact in |candidate_contact| whose name matches the |selected_contact_data|. |candidate_contact|: http://google3/logs/proto/assistant/contact.proto?l=111&rcl=306283376 |selected_contact_data|: http://google3/logs/proto/assistant/contact.proto?l=108&rcl=306283376", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"originalQueryName": { -"description": "The original name in the query as transcribed by ASR.", -"type": "string" -}, -"phone": { -"description": "Information regarding the phone endpoints of the selected contact. Currently it is only logged for selected candidate.", -"items": { -"$ref": "AssistantLogsCommunicationPhoneLog" -}, -"type": "array" -}, -"phoneNumberCount": { -"description": "# phone_numbers stored for the contact.", -"format": "int32", -"type": "integer" -}, -"pkgReferenceType": { -"description": "Encodes if pkg_person was resolved via a name or relationship reference.", -"enum": [ -"UNKNOWN_PKG_REFERENCE_TYPE", -"PKG_NAME_REFERENCE", -"PKG_RELATIONSHIP_REFERENCE" -], -"enumDescriptions": [ -"", -"Ann [Charlie]", -"my family dad my sister's kids" -], -"type": "string" -}, -"recognitionAlternateScore": { -"description": "Populate only if ContactRecognitionAlternate.Source is not NONE.", -"format": "float", -"type": "number" -}, -"recognitionAlternateSource": { -"description": "Recognition alternative source type. If not none, then it indicates the personal contact data is alternative and how the alternative is fulfilled.", -"enum": [ -"NONE", -"S3_HYPOTHESES", -"GENIE_QUERY_ALTERNATIVES", -"NAME_CORRECTION_LOG", -"FUZZY_CONTACT_MATCH", -"NEURAL_CONTACT_MATCH", -"NEURAL_CONTACT_MATCH_DARK_LAUNCH", -"PERSONALIZED_NAME_CORRECTION_LOG" -], -"enumDescriptions": [ -"", -"Alternate name from S3 recognition topN hypothesis.", -"Alternate name from Genie query alternatives.", -"Alternate name from contact correction history.", -"Fuzzy match with user's contacts.", -"Neural match. See go/phonetic-contact-match.", -"The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", -"Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." -], -"type": "string" -}, -"relationshipMemoryCount": { -"description": "The number of resolved relationship names and contact pointers from Assistant Memory.", -"format": "int32", -"type": "integer" -}, -"selectedPhone": { -"$ref": "AssistantLogsCommunicationPhoneLog", -"description": "Information regarding the selected phone endpoint. Currently it is only logged for selected candidate." -}, -"shortcutContactInfo": { -"$ref": "MajelContactInformationShortcutInformation", -"description": "Shortcut information of the contact." -}, -"source": { -"description": "The contact source of a contact candidate.", -"enum": [ -"UNKNOWN", -"FOCUS_CONTACT", -"DEVICE_CONTACT", -"GMAIL_INFERENCE", -"S3_DECORATOR", -"RELATIONSHIP", -"VANITY", -"SIGNED_OUT_DEVICE", -"SHARED_CONTACT", -"FAMILY_MEMBER", -"SHARED_DEVICE_USER", -"ON_DEVICE_CONTACT_LOOKUP", -"APP_SEARCH_CONTACT" -], -"enumDescriptions": [ -"", -"Contacts from Focus", -"Contacts from device, see go/device-content", -"Contacts inferred from Gmail", -"Contacts from S3 decorator", -"Whitelisted relationships no matter if user has such contact, see go/sls-personal_relationship_names_and_aliases.", -"Vanity nicknames or user's own profile name.", -"Signed out device contact names", -"Contacts shared from other users. See go/multi-user-shared-contact.", -"People in the user's family group in http://families.google.com who are not contacts.", -"People who share a device with the user who are not contacts.", -"Contacts from on device lookup during contact fulfillment.", -"Contacts from AppSearch." -], -"type": "string" -}, -"systemContactGroupId": { -"description": "Integer value corresponding to SystemContactGroup enum. http://google3/social/graph/wire/proto/merged_person.proto?l=3151&rcl=320308954", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"whatsappPhoneNumberCount": { -"deprecated": true, -"description": "DEPRECATED. Use phone instead. Used before 2020-01-13. Number of phone numbers annotated with Whatsapp.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationPhoneLog": { -"description": "This is the log version of apps.people.oz.external.mergedpeopleapi.Phone proto. Next ID: 3", -"id": "AssistantLogsCommunicationPhoneLog", -"properties": { -"rawDeviceContactInfo": { -"description": "This list provides account information from the raw contact which is the source of this field.", -"items": { -"$ref": "AssistantLogsCommunicationRawDeviceContactInfoLog" -}, -"type": "array" -}, -"type": { -"description": "Label for phone number in the Contacts app. It can have standard values provided by the app e.g. MOBILE, HOME, WORK etc, but users are allowed to modify. So essentially it becomes user content.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsCommunicationRawDeviceContactInfoLog": { -"description": "This is the log version of apps.people.oz.external.mergedpeopleapi.RawDeviceContactInfo proto. Next ID: 3", -"id": "AssistantLogsCommunicationRawDeviceContactInfoLog", -"properties": { -"accountType": { -"description": "Account type of raw contact, e.g. \"com.google\" or \"com.linkedin.android\".", -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsDefaultDeviceLog": { -"description": "Log device info of default speaker and tv", -"id": "AssistantLogsDefaultDeviceLog", -"properties": { -"defaultSpeaker": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"defaultTv": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"sourceDeviceId": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsDefaultDevicesLog": { -"id": "AssistantLogsDefaultDevicesLog", -"properties": { -"localDefaultDevices": { -"$ref": "AssistantLogsDefaultDeviceLog" -}, -"nearbyDefaultDevices": { -"description": "Default settings of nearby devices.", -"items": { -"$ref": "AssistantLogsDefaultDeviceLog" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsDeviceAnnotationLog": { -"description": "Device annotation mention from query", -"id": "AssistantLogsDeviceAnnotationLog", -"properties": { -"deviceIdLog": { -"description": "The identification of the device. The logging version of the full DeviceId.", -"items": { -"$ref": "AssistantLogsSettingsDeviceIdLog" -}, -"type": "array" -}, -"rawTextFromQuery": { -"description": "The raw text mentioning a device from the query, such as \"any tv\".", -"type": "string" -}, -"type": { -"description": "The annotation type mentioned in the query.", -"enum": [ -"NO_DEVICE_ANNOTATION", -"DEVICE_ID_ANNOTATION", -"DEVICE_TYPE_ANNOTATION", -"DEVICE_TEXT_ANNOTATION" -], -"enumDescriptions": [ -"No device is mentioned in the query.", -"Annotation specifying a device linked to user's account", -"Annotation specifying a general device, such as speaker, TV, etc.", -"Annotation with text. It could be random text, such as \"dummy thing\"." -], -"type": "string" -}, -"userDefinedName": { -"description": "The matched device name set by the user, such as \"big screen tv\".", -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsDeviceInfoLog": { -"description": "The information related to the device. Next ID: 19", -"id": "AssistantLogsDeviceInfoLog", -"properties": { -"arbitrationDeviceId": { -"description": "Device identifier string for the current device used in the arbitration service.", -"type": "string" -}, -"availableDeviceAndCtfContextDiff": { -"$ref": "AssistantLogsAvailableDeviceAndCtfContextDiffLog", -"description": "A list of diffs for the device's context between AvailableDevice and CTF." -}, -"connectivity": { -"enum": [ -"UNKNOWN_CONNECTIVITY", -"ONLINE_STATE", -"OFFLINE_STATE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"deviceId": { -"description": "The identification of the device. DeviceId (go/as-device-id) has multiple fields. To consloidate it to a single to make dremel easier, we use the string obtained by calling go/get-device-id.", -"type": "string" -}, -"deviceIdLog": { -"$ref": "AssistantLogsSettingsDeviceIdLog", -"description": "The identification of the device. The logging version of the full DeviceId." -}, -"deviceIndex": { -"description": "We index linked devices and log these index to avoid logging device_id. device_index should always be a positive number or -1. -1 means this device is not in homegraph.", -"format": "int32", -"type": "integer" -}, -"deviceModelId": { -"description": "This is the device_model_id field in device_settings proto. It has the same value for the same type of devices. e.g. Sonos.Sonos One.S13", -"type": "string" -}, -"distance": { -"description": "LINT.ThenChange(//depot/google3/assistant/context/proto/device_arbitration.proto:EstimatedRelativeDistance)", -"enum": [ -"UNKNOWN_DISTANCE", -"CLOSEST", -"EQUALLY_CLOSE", -"FURTHER" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"eliminatingLumosProcessor": { -"description": "The lumos processor which eliminated this device, if applicable", -"enum": [ -"UNKNOWN_LUMOS_PROCESSOR", -"CAPABILITIES_FILTER", -"DEVICE_ANNOTATION_FILTER", -"DEVICE_CONNECTIVITY_FILTER", -"LOCAL_DEVICE_INCLUSIVENESS_FILTER", -"LOCATION_FILTER", -"MEDIA_PLAYBACK_FILTER", -"SAFETY_FILTER", -"TRAITS_FILTER", -"DEVICE_TYPE_FILTER", -"APP_FILTER", -"HYBRID_DEVICE_PROPERTIES_FILTER", -"NEARBY_DEVICE_FILTER", -"DEVICE_ATTRIBUTES_FILTER", -"LAST_USED_DEVICE_FILTER", -"DEFAULT_MEDIA_OUTPUT_PROMOTER", -"DEVICE_GROUP_PROMOTER", -"LOCAL_DEVICE_PROMOTER", -"LOCATION_PROMOTER", -"MEDIA_FOCUS_PROMOTER", -"MEDIA_PLAYBACK_PROMOTER", -"SAME_NAME_DEVICE_PROMOTER", -"PHONE_TARGETING_PROMOTER", -"TRAITS_PROMOTER", -"DEVICE_TYPE_PROMOTER", -"FEATURE_EXTRACTOR", -"LABELER" -], -"enumDescriptions": [ -"", -"filters", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"promoters", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"feature extractor", -"labeler" -], -"type": "string" -}, -"homeStructureId": { -"description": "The home structure id of the device.", -"type": "string" -}, -"isRemote": { -"type": "boolean" -}, -"isTethered": { -"description": "This flag indicates this is a non-local device that is tethered to local/originating device. Tethered device is a special case of is_remote and typically used in wearable scenarios. This is always false for local device and when it is true, it implies is_remote is also true.", -"type": "boolean" -}, -"mediaCapabilities": { -"$ref": "AssistantLogsMediaCapabilities" -}, -"mediaDeviceType": { -"enum": [ -"UNKNOWN_DEVICE_TYPE", -"ASSISTANT", -"HOME_AUTOMATION", -"CAST", -"CAST_GROUP", -"QUARTZ", -"QUARTZ_IOS", -"CLOUD_AUTO" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"Quartz in AGSA", -"Quartz in iOPA", -"Non-assistant Auto devices from Cloud. go/cloud-registration-api-changes" -], -"type": "string" -}, -"name": { -"description": "User defined device name", -"type": "string" -}, -"sessions": { -"description": "This field should be populated only when there is at least one session on this device.", -"items": { -"$ref": "AssistantLogsDeviceMediaSessionLog" -}, -"type": "array" -}, -"surfaceType": { -"description": "This field should be populated only when the device is an Assistant device.", -"enum": [ -"UNKNOWN", -"ANDROID_ALLO", -"ANDROID_AUTO", -"ANDROID_THINGS_CUBE", -"ANDROID_THINGS_JASPER", -"ANDROID_TV", -"ANDROID_TV_KIDS", -"ANDROID_WEAR", -"AR_GLASSES", -"ASSISTANT_SDK", -"AUDIOWEAR", -"BUBBLE_CHARACTERS_IOS", -"CAPABILITY_BASED_SURFACE", -"CHROMECAST_ASSISTANT", -"CHROMECAST_MANHATTAN", -"CHROMECAST_SEARCH", -"CLOUD_DEVICE", -"COMPANION_SCREEN", -"DYNAMITE_WEB", -"ENSEMBLE", -"EYESFREE_AGSA", -"EYESFREE_GMM", -"GBOARD", -"GLASS", -"GOOGLE_HOME", -"HANGOUTS_CHATBOT", -"IOS_ALLO", -"IOS_GSA", -"IOS_WEAR", -"LIBASSISTANT", -"LINE_CHATBOT", -"MATTER", -"MULTIMODAL_AGSA", -"NON_ASSISTANT_SURFACE", -"OPA_AGSA", -"OPA_AGSA_CHROME_OS", -"OPA_ANDROID_AUTO", -"OPA_ANDROID_LITE", -"OPA_ANDROID_SCREENLESS", -"OPA_ANDROID_SMART_DISPLAY", -"OPA_ANDROID_TABLET", -"OPA_CROS", -"OPA_GACS", -"OPA_IOS", -"OPA_IOS_SCREENLESS", -"OPA_KAIOS", -"OPA_MOBILE_WEB", -"RTOS_PHONE", -"SMS_CHATBOT", -"TELEGRAM_CHATBOT", -"TELEPHONE_ASSISTANT", -"VERILY_ONDUO", -"YOUTUBE_APP", -"AGSA_BISTO_FOR_EVAL", -"COGSWORTH_FOR_EVAL", -"LOCKHART_MIC_FOR_EVAL", -"OPA_ANDROID_AUTO_EMBEDDED_FAKE", -"SPARK", -"WALLE", -"UNIT_TESTING" -], -"enumDeprecated": [ -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"Deprecated (never used). Use OPA_ANDROID_AUTO instead.", -"Assistant on small screen stationary device.", -"", -"", -"", -"", -"Assistant on AR Glasses with both visual and audio experiences. Ask ar-assistant@ for details.", -"For Assistant SDK gRPC client.", -"", -"go/assistant-lamda-overview-characters", -"Surface for capability based testing.", -"For chrome cast assistant web app.", -"For chrome cast with assistant + screen (e.g., Google Nest Hub).", -"", -"Virtual device for event-based triggering, e.g. executing time", -"scheduled routines: go/routine-cloud-ex For Quartz.", -"", -"", -"Deprecated. Please use OPA_ANDROID_SCREENLESS.", -"", -"For Gboard app", -"", -"", -"", -"", -"", -"", -"For standalone libassistant devices.", -"LINE 3P messaging app", -"For Matter devices. go/matter-prod", -"Deprecated.", -"Surfaces that do not speak to the Assistant, i.e. web HQ", -"Assistant on Android phones accessed through the Google App (velvet). This represents neither all of the assistant on android phones (ANDROID_ALLO, VERILY_ONDUO, etc.) nor all of the assistant built on top of AGSA (ANDROID_WEAR, OPA_ANDROID_SCREENLESS, OPA_AGSA_CHROME_OS, etc.).", -"Deprecated. Please use OPA_CROS.", -"", -"This is OPA on Android Go (not part of AGSA)", -"Assistant on Nexus with screen off/locked. Use go/bisto device to trigger.", -"Assistant on Titan hub mode (go/titan-hubmode-surface). This is OPA Android first party Smart Display devices. The UI experience is built on Opal (Gallium and Flutter) and native Android.", -"Assistant on Android tablet", -"Assistant on native Chrome OS (go/croissant).", -"For assistant on GACS devices (go/gacs-dd). Google Assistant Conversation Service (GACS) defines intents the device accepts. This surface serves the intents in the assistant response.", -"", -"Assistant on iOS with screen off/locked. Use go/bisto device to trigger.", -"Assistant on KaiOS. go/kaiosama", -"Assistant on Mobile Web. go/opa-spidey", -"Assistant on low-cost RTOS phones (go/conceRTOS).", -"", -"Telegram 3P messaging app", -"Assistant on a phone call (go/telephone).", -"", -"A Youtube app.", -"Eval-only surfaces. These surfaces are not real surfaces. They are only used in intermediate steps of eval query and request generation: 1. Eval samplers (OPA query sampler and Cannery sampler) checks logs and temporarily assigns these surface types to the logs, to put Bisto, Lockhart Mic, ... queries in the correct query pools. 2. Request builders uses these surface types to determine how TaskRequest for Bisto, Lockhart Mic, ... should be built, like whether some user user agent should be used, whether some entry source should be set. The surface types in the generated TaskRequest is still the production surface types, like OPA_ANDROID_SCREENLESS or OPA_AGSA. Temp surface for Bisto Android eval. Will be merged with OPA_ANDROID_SCREENLESS in the future.", -"", -"", -"OPA_ANDROID_AUTO has another sub-surface (embedded) that differs in the reported capabilities, client-ops, flags but uses the same surface_type string of OPA_ANDROID_AUTO. This fake surface would allow running turing2 tests and evals for this embedded surface. Deprecated as of Feb 2019. Please use \"OPA_ANDROID_AUTO_EMBEDDED\" client type for tests and eval. See go/client-onboarding for more details. https://cs.corp.google.com/piper///depot/google3/assistant/assistant_server/tools/util/consts.h?l=32&rcl=247481455", -"Unlaunched new surface prototype, ask spark-eng@.", -"Wall-E is an Area120 Project building assistant powered robots. The surface is an extended joplin which have same capabilities as a google home adding custom robot features. Currently in active development. Please reach out walle-software@ or bingxin@ for questions. For details see go/walle-as-pipeline-1-pager.", -"Surface used for unit-testing purposes. Unit-tests might still require the existence of a \"valid\" SurfaceType to succeed (eg. initialization of the CapabilityBuilder, proto matching, etc.). With the move away from SurfaceType checks, a generic \"TESTING\" SurfaceType can be used for such tests without affecting the test behavior. Unlike the CAPABILITY_BASED_SURFACE, this proto does not have any capabilities or ResponseType tied to it. It should only be used for unit-tests and should not be exposed in the ASDebugger." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsDeviceMediaSessionLog": { -"description": "Log about the media session on a device.", -"id": "AssistantLogsDeviceMediaSessionLog", -"properties": { -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId" -}, -"mediaSessionType": { -"enum": [ -"UNKNOWN", -"SINGLE_DEVICE_SESSION", -"STATIC_GROUP_SESSION", -"DYNAMIC_GROUP_SESSION" -], -"enumDescriptions": [ -"", -"", -"Media session is playing on a device group set by users.", -"Media session is playing on a temporary device group requested in query." -], -"type": "string" -}, -"mediaType": { -"description": "The type of the media session. If provider does not report this field, we ## compute it by mapping provider type to media type. Here is the mapping: |ProviderType |MediaItemMetadata.Type| |-------------------------------------- |MUSIC |TRACK | |VIDEO |VIDEO | |LIVE_TV |TV_CHANNEL | |AUDIOBOOK |AUDIO_BOOK | |PODCAST |PODCAST_EPISODE | ## |LIVE_STREAMING|VIDEO | ", -"enum": [ -"UNKNOWN", -"TRACK", -"ALBUM", -"ARTIST", -"PLAYLIST", -"EPISODE", -"MOVIE", -"PHOTO", -"TV_SHOW_EPISODE", -"MUSIC_GENRE", -"MUSIC_STATION", -"AUDIO_BOOK", -"CHAPTER", -"RADIO_STATION", -"MUSIC_MIX", -"SPORTS_EVENT", -"TV_CHANNEL", -"VIDEO", -"VIDEO_RECOMMENDED_PLAYLIST", -"YOUTUBE_CHANNEL", -"YOUTUBE_VIDEO_PLAYLIST", -"TV_SHOW", -"NEWS", -"NARRATED_WEB", -"NEWS_CALL_TO_ACTION", -"NEWS_OVERVIEW", -"AUDIO_STORY", -"PODCAST_SERIES", -"PODCAST_EPISODE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Video recommendations playlist. (e.g. fetched from Recommended feed on YouTube)", -"", -"", -"Representing a TV show which contains multiple TV seasons.", -"", -"", -"Repesents a Call To Action media item. See go/newsplayer-outro-gnews. Current use case is to ask users to continue their news journey in the Google News app / web app. Future cases could include asking users to adjust their news settings.", -"Represents a Narrative News Overview media item. This a multi-segment news media item with interactive elements. See go/gen-narrative-news-intro.", -"Representing a short audio story. go/tmas-design", -"Represent different kinds of podcast content", -"" -], -"type": "string" -}, -"playbackState": { -"description": "The playback states of the session.", -"enum": [ -"UNKNOWN_STATE", -"STOPPED", -"PAUSED", -"PLAYING", -"FAST_FORWARDING", -"REWINDING", -"BUFFERING", -"ERROR", -"CONNECTING", -"SKIPPING_TO_PREVIOUS", -"SKIPPING_TO_NEXT", -"SKIPPING_TO_QUEUE_ITEM" -], -"enumDescriptions": [ -"", -"State indicating this item is currently stopped.", -"State indicating this item is currently paused.", -"State indicating this item is currently playing.", -"State indicating this item is currently fast forwarding.", -"State indicating this item is currently rewinding.", -"State indicating this item is currently buffering and will begin playing when enough data has buffered.", -"State indicating this item is currently in an error state. The error message should also be set when entering this state.", -"State indicating the class doing playback is currently connecting to a new destination. Depending on the implementation you may return to the previous state when the connection finishes or enter STATE_NONE. If the connection failed STATE_ERROR should be used.", -"State indicating the player is currently skipping to the previous item.", -"State indicating the player is currently skipping to the next item.", -"State indicating the player is currently skipping to a specific item in the queue." -], -"type": "string" -}, -"providerMid": { -"description": "The KG mid of the media provider.", -"type": "string" -}, -"supportedTransportControl": { -"items": { -"enum": [ -"UNKNOWN_COMMAND", -"PLAY_FROM_SEARCH", -"PLAY_FROM_URI", -"SEND_CUSTOM_ACTION", -"SKIP_TO_NEXT", -"SKIP_TO_PREVIOUS", -"PLAY", -"PAUSE", -"STOP", -"SET_RATING", -"SEEK_TO", -"SHUFFLE", -"REWIND", -"FAST_FORWARD", -"SKIP_TO_QUEUE_ITEM", -"SET_REPEAT_MODE", -"SET_CAPTIONING_ENABLED" -], -"enumDescriptions": [ -"", -"Requests that the app start playback for a specific search query.", -"Requests that the app start playback for a specific uri.", -"Send a custom string action to the provide (go/media-controller-gsa).", -"Skip to the next item in the play queue.", -"Skip to the previous item in the play queue.", -"Requests that the player start its playback at its current position.", -"Requests that the player pause its playback and stay at its current position.", -"Requests that the player stop its playback.", -"Rate the current content.", -"Move to a new location in the media stream.", -"Requests that the app shuffle the currently loaded content.", -"Rewind the current content.", -"Fast forward the current content.", -"Play an item with a specific id in the play queue.", -"Set repeat mode for current content.", -"Enable or disable the closed caption for the current content." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsDeviceSelectionLog": { -"description": "Contains information logged in target device selection. See go/improve-device-targeting-logging for details. Next Id: 23", -"id": "AssistantLogsDeviceSelectionLog", -"properties": { -"allDefaultDevices": { -"deprecated": true, -"description": "Default settings of all nearby devices Deprecated, use default_devices_log instead.", -"items": { -"$ref": "AssistantLogsDefaultDeviceLog" -}, -"type": "array" -}, -"allMediaStreamLog": { -"$ref": "AssistantLogsAllMediaStreamLog", -"description": "Logs all active media sessions." -}, -"availableDevicesLog": { -"$ref": "AssistantLogsAvailableDevicesLog", -"description": "The list of `available_devices` as they appear in DeviceTargetingContext. This is very useful for debugging diffs between GB and CMP since they have different methods of populating the `available_devices` field." -}, -"counterfactualDeviceSelectionLog": { -"$ref": "AssistantLogsDeviceSelectionLog", -"description": "DeviceSelectionLog for counterfactual logging." -}, -"defaultDevices": { -"$ref": "AssistantLogsDefaultDeviceLog", -"deprecated": true, -"description": "Include default tv and default speaker Deprecated, use all_default_devices below." -}, -"defaultDevicesLog": { -"$ref": "AssistantLogsDefaultDevicesLog" -}, -"devicesStr": { -"description": "Temporaray field for debugging ANDROID_AUTO multi_target_devices punt. This will be removed once we identify the root cause.", -"items": { -"type": "string" -}, -"type": "array" -}, -"inputErrorLog": { -"items": { -"$ref": "AssistantLogsInputErrorLog" -}, -"type": "array" -}, -"localDevice": { -"$ref": "AssistantLogsDeviceInfoLog", -"deprecated": true, -"description": "Now we just log the media sessions on local device Deprecated, use NearbyDevicesLog::LocalDevice instead." -}, -"logDataSource": { -"description": "Indicates which library populated the device_selection_log for this query.", -"enum": [ -"UNKNOWN", -"MEDIA_FOCUS_SELECTOR", -"LUMOS_DEVICE_TARGETING_LIBRARY" -], -"enumDescriptions": [ -"", -"Indicates the logs is populated by Media Focus Selector.", -"Indicates the logs is populated by Lumos Device Targeting Library." -], -"type": "string" -}, -"mediaFocus": { -"$ref": "AssistantLogsMediaFocusInfoLog", -"deprecated": true, -"description": "The Media Focus information. This field should be populated only when there is a Media Focus. Deprecated, use media_focuses below instead." -}, -"mediaFocusesLog": { -"$ref": "AssistantLogsMediaFocusesLog", -"description": "Media focuses on all devices." -}, -"nearbyDevicesLog": { -"$ref": "AssistantLogsNearbyDevicesLog", -"description": "All nearby devices and local device." -}, -"queryAnnotation": { -"$ref": "AssistantLogsQueryAnnotationLog", -"description": "This should log the query annotation features found in the device, such as the device annotation, the room annotation, and the structure annotation from the query." -}, -"selectionResult": { -"$ref": "AssistantLogsDeviceSelectionResultLog", -"description": "The result of device selection." -}, -"testCodes": { -"items": { -"$ref": "AssistantLogsDeviceTargetingTestCode" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsDeviceSelectionResultLog": { -"description": "Log the selection result. Next ID: 11", -"id": "AssistantLogsDeviceSelectionResultLog", -"properties": { -"ambiguousTargetDevices": { -"$ref": "AssistantLogsAmbiguousTargetDeviceLog", -"deprecated": true, -"description": "Deprecated, please use qualified_devices." -}, -"deviceSelectionDecisionSummary": { -"$ref": "AssistantLogsMediaDeviceSelectionDecisionSummary" -}, -"deviceTargetingErrorType": { -"enum": [ -"UNKNOWN_ERROR", -"NO_DEVICE_IN_SAME_STRUCTURE", -"NO_DEVICE_IN_SAME_NETWORK_OR_LOCATION", -"NO_DEVICE_IN_SAME_STRUCTURE_OR_NETWORK_OR_LOCATION", -"NO_DEVICE_SATISFIES_SAME_STRUCTURE_OR_UNKNOWN_IF_OWNED", -"NO_DEVICE_SATISFIES_CAPABILITIES_REQUIREMENT", -"NO_DEVICE_MATCHED_DEVICE_ANNOTATION", -"MULTI_TARGET_DEVICE_NOT_SUPPORTED", -"NO_DEVICE_AFTER_LOCAL_DEVICE_EXCLUDED", -"UNABLE_TO_TARGET_ONLY_LOCAL_DEVICE", -"NO_DEVICE_MATCHED_REQUIRED_TRAITS", -"NO_DEVICE_MATCHED_REQUIRED_ATTRIBUTES", -"NO_DEVICE_MATCHED_REQUIRED_DEVICE_TYPE", -"NO_DEVICE_IN_SAME_OR_MENTIONED_STRUCTURE", -"NO_DEVICE_SATISFIES_PLAYBACK_REQUIREMENT", -"STRUCT_DISAMBIG_NOT_SUPPORTED", -"ROOM_DISAMBIG_NOT_SUPPORTED", -"UNRECOGNIZED_DEVICE_NAME", -"NO_LINKED_REMOTE_DEVICES", -"NO_LINKED_REMOTE_VIDEO_DEVICES", -"NO_SAFE_DEVICE_WITH_SCREEN", -"ALL_QUALIFIED_DEVICES_OFFLINE", -"CROSS_STRUCTURE_TARGETING_DISALLOWED", -"NO_DEVICE_MEETS_PROVIDER_REQUIREMENT", -"MISSING_LOCAL_DEVICE_SETTING", -"NO_DEVICE_HAS_REQUIRED_APP", -"HYBRID_DEVICE_NOT_QUALIFIED", -"NO_NEARBY_DEVICES", -"NO_MATCHING_LAST_USED_DEVICE" -], -"enumDescriptions": [ -"", -"Error about when there's no qualified devices in the same home graph structure as the local device.", -"Error about when there's no qualified devices in the same network or location as the local device.", -"Error about when there's no qualified devices in the same home graph structure or network or location as the local device.", -"Error about when no qualified device was found that matched the following conditions: 1) Both local device and target device are in the same home graph structure. 2) The distance to target device is unknown (i.e. either local device or target device is homeless) and target device is owned by the user. 3) If local device and target device are in different structures, only allowed when the query explicitly mentioned device name.", -"Error about when no qualified device satisfies the device capability requirement.", -"Error of no qualified devices matched requirements from device annotation.", -"Error of not supporting targeting multiple devices.", -"Error of no device left after local device is excluded within inclusiveness filter.", -"Error of unable to target only local device within inclusiveness filter.", -"Error of no qualified devices matched the required smart home traits.", -"Error of no qualified devices matched the required Smart Home attributes.", -"Error of no qualified devices matched the required smart home device types.", -"Error of no qualified devices in the mentioned structure or in the same structure as the local device.", -"Error of no device satisfying playback requirements.", -"Error when structure disambiguation is not supported in the config. When config is supported (both structure_disambiguation_supported and multi_target_supported are set to true), the library would return structures for disambiguation.", -"Error when room disambiguation is not supported in the config. When config is supported (both room_disambiguation_supported and multi_target_supported are set to true), the library would return rooms for disambiguation.", -"Error when the device mentioned in the query is not recognized. In other words, we can't find a device which matched the device mentioned in the query.", -"Used when user requests to target on a remote device and there are no remote devices linked.", -"Used when user requests to target on a remote video device and there are no remote video devices linked.", -"Error when there is no safe screen device. For example, requesting screen-requiring content from Auto surface.", -"Error when all qualified devices are offline.", -"When user tries to target a device which is in a different structure and is not allowed. For example, if user doesn't own the structure. See go/on-multi-user-access-transitivity for more details.", -"Used when user requests a device with certain provider but no device can satisfies provider requirement.", -"AvailableDevice device setting of local device is missing.", -"Required app is not installed", -"All remote hybrid devices fail the propertries requirement specific (lock/unlock, etc) for hybrid devices (such as Tangor).", -"No nearby hearing devices", -"No matching device for the LastUsedDevice requirement found." -], -"type": "string" -}, -"finalLumosStage": { -"description": "The class name for the final filter/promoter used by Lumos for device targeting. This filter or promoter runs for all users, and contains no data specific to the individual user.", -"type": "string" -}, -"lowConfidenceTargetDevice": { -"$ref": "AssistantLogsLowConfidenceTargetDeviceLog", -"description": "////////////////////////////////////////////////////////////////////////// Ambiguous Results: the library failed to select the final target device(s) but it narrows down to a set of devices which are all valid target device candidates. The client needs to do further disambiguation, e.g., giving a dialog or having customized logic. The low confidence target device means the library falied to select the target device but it picked two devices for the client to do disambiguation." -}, -"mediaFocusSelectionErrorType": { -"description": "////////////////////////////////////////////////////////////////////////// This field log the error while selecting target device in media_focus_selector.", -"enum": [ -"UNKNOWN_ERROR", -"FOUND_MULTIPLE_DEVICES", -"REQUESTED_DEVICE_HAS_NO_SCREEN", -"NO_LINKED_REMOTE_DEVICES", -"NO_LINKED_REMOTE_VIDEO_DEVICES", -"UNRECOGNIZED_DEVICE_NAME", -"UNRECOGNIZED_VIDEO_DEVICE_NAME", -"NO_DEVICE_MEETS_PROVIDER_REQUIREMENT", -"MULTIPLE_DEVICES_MEET_PROVIDER_REQUIREMENT", -"REMOTE_CLOUD_CASTING_NOT_ALLOWED", -"NO_SAFE_DEVICE_WITH_SCREEN", -"NO_DEVICE_MEETS_PLAYBACK_REQUIREMENT", -"MULTIPLE_DEVICES_MEET_PLAYBACK_REQUIREMENT", -"NO_VALID_DEVICE_IN_REQUESTED_ROOM", -"NO_DEVICE_FOUND_IN_REQUESTED_ROOM", -"MULTIPLE_DEVICES_FOUND_IN_REQUESTED_ROOM", -"ALL_QUALIFIED_DEVICES_IN_DIFFERENT_STRUCTURE", -"QUALIFIED_DEVICE_OFFLINE", -"ALL_QUALIFIED_DEVICES_OFFLINE", -"CROSS_STRUCTURE_TARGETING_DISALLOWED", -"NO_DEVICE_MEETS_STADIA_GAMING_CAPABILITY", -"MEDIA_STATES_MISSING", -"NO_DEVICE_SATISFIES_CAPABILITIES_REQUIREMENT", -"HYBRID_DEVICE_NOT_QUALIFIED" -], -"enumDescriptions": [ -"Used when the type of error is not known explicitly.", -"Used when there are multiple devices satisfying the constraints.", -"Used when the user requested to play a content on device without screen and focus selection has the (unsatisfied) REQUIRES_SCREEN restriction. Note that this is error is used if device is recognized, and otherwise UNRECOGNIZED_DEVICE or UNRECOGNIZED_VIDEO_DEVICE will be used.", -"Used when user requests to play on a remote device and there are no linked devices.", -"Used when user requests to play on a remote video device and there are no linked video devices.", -"Used when requested device with the given name in the annotation can not be recognized, that is user requested to play on a device that is not found in linked devices. If device is unrecognized and focus selection has the (unsatisfied) REQUIRES_SCREEN or type of unrecognized device is video then more specific UNRECOGNIZED_VIDEO_DEVICE_NAME is used.", -"Used when requested device with the given name in the annotation can not be recognized and focus selection has the (unsatisfied) REQUIRES_SCREEN or type of unrecognized device is video.", -"Used when user requests a device with certain provider but no device can satisfies provider requirement.", -"Used when user requests a device with certain provider but more than one device can satisfies the provider requirement.", -"Used when requested device does not allow remote cloud casting.", -"Used when there is no safe screen device. For ex: requesting screen-requiring content from Auto surface.", -"Used when user requests a device with certain playback but no device can satisfies playback requirement.", -"Used when user requests a device with certain playback but more than one device can satisfies playback requirement.", -"Used when user requests a device to be in a particular room but none of the devices in that room satisfies all requirements.", -"Used when user requests a room but no device is found in that room.", -"Used when user requests a room and there are multiple devices in that room are eligible to selected.", -"Used when all qualified devices are not in the same structure as the primary device.", -"", -"When all qualified devices are offline.", -"When user tries to target a device which is in a different structure and is not allowed. For example, if user doesn't own the structure. See go/on-multi-user-access-transitivity for more details.", -"When all devices do not qualify for stadia support.", -"Used when any critical media state is missing so the selection decision cannot be made. See go/partial-states-in-targeting-library for more details.", -"Error about when no qualified device satisfies the device capability requirement.", -"All remote hybrid devices fail the propertries requirement specific (lock/unlock, etc) for hybrid devices (such as Tangor)." -], -"type": "string" -}, -"processorInfo": { -"description": "The log for each stage of Lumos, showing the number of eliminated devices from each processor.", -"items": { -"$ref": "AssistantLogsLumosProcessorInfo" -}, -"type": "array" -}, -"qualifiedDevices": { -"$ref": "AssistantLogsAmbiguousTargetDeviceLog", -"description": "We will apply several filters and dialogs to select a target device if media_focus_selector fail to select one. This field should log the devices left after each filter or dialog. It also log the detailed info of the final target device." -}, -"singleTargetDevice": { -"$ref": "AssistantLogsDeviceInfoLog", -"deprecated": true, -"description": "////////////////////////////////////////////////////////////////////////// Unambiguous Results: the library successfully selected the final target device(s) and no further disambiguation is needed. Deprecated, please use target_device." -}, -"targetDevice": { -"$ref": "AssistantLogsTargetDeviceLog" -} -}, -"type": "object" -}, -"AssistantLogsDeviceTargetingTestCode": { -"description": "Test code is used to track special events happening in Device Targeting Library. Next Id: 2", -"id": "AssistantLogsDeviceTargetingTestCode", -"properties": { -"type": { -"enum": [ -"UNKNOWN", -"IGNORE_NESTED_DEVICE_MENTION_WITH_ID", -"INCOMPLETE_LOCAL_AUTO_SETTINGS_FOUND", -"FINAL_RESULT_RESOLVED_BY_NEARBY_DEVICE" -], -"enumDescriptions": [ -"", -"Used to track the case when the nested DeviceMention with id is ignored inside a RoomMention. See: b/184750168 and go/mixed-room-and-device-mention", -"Used to track if there is incomplete or bad local android auto settings passed to Lumos.", -"Used to track if the final result is resolved by using nearby device." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsInputErrorLog": { -"id": "AssistantLogsInputErrorLog", -"properties": { -"errorCode": { -"format": "int32", -"type": "integer" -}, -"errorType": { -"enum": [ -"ERROR_UNKNOWN", -"ERROR_DEVICE_PROPERTIES", -"ERROR_HOME_GRAPH", -"ERROR_CAPABILITIES_ACROSS_DEVICES", -"ERROR_SURFACE_IDENTITIES_ACROSS_DEVICES" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsLowConfidenceTargetDeviceLog": { -"description": "Represents the case where there is a target device with low confidence so that the library didn't directly target it. Instead, the library returns the low confidence target device and the fallback device for the client to decide to either trigger a dialog to disambiguate or select one of them based on extra business logic. Next ID: 3", -"id": "AssistantLogsLowConfidenceTargetDeviceLog", -"properties": { -"fallbackDeviceLog": { -"$ref": "AssistantLogsDeviceInfoLog", -"description": "The fallback device." -}, -"lowConfTargetDeviceLog": { -"$ref": "AssistantLogsDeviceInfoLog", -"description": "The selected low confidence focus device." -} -}, -"type": "object" -}, -"AssistantLogsLumosProcessorInfo": { -"id": "AssistantLogsLumosProcessorInfo", -"properties": { -"devicesAfterRun": { -"description": "Number of candidate devices after this stage is run.", -"format": "int32", -"type": "integer" -}, -"devicesBeforeRun": { -"description": "Number of candidate devices before this stage is run.", -"format": "int32", -"type": "integer" -}, -"processorName": { -"description": "Name of the processor for this stage.", -"enum": [ -"UNKNOWN_LUMOS_PROCESSOR", -"CAPABILITIES_FILTER", -"DEVICE_ANNOTATION_FILTER", -"DEVICE_CONNECTIVITY_FILTER", -"LOCAL_DEVICE_INCLUSIVENESS_FILTER", -"LOCATION_FILTER", -"MEDIA_PLAYBACK_FILTER", -"SAFETY_FILTER", -"TRAITS_FILTER", -"DEVICE_TYPE_FILTER", -"APP_FILTER", -"HYBRID_DEVICE_PROPERTIES_FILTER", -"NEARBY_DEVICE_FILTER", -"DEVICE_ATTRIBUTES_FILTER", -"LAST_USED_DEVICE_FILTER", -"DEFAULT_MEDIA_OUTPUT_PROMOTER", -"DEVICE_GROUP_PROMOTER", -"LOCAL_DEVICE_PROMOTER", -"LOCATION_PROMOTER", -"MEDIA_FOCUS_PROMOTER", -"MEDIA_PLAYBACK_PROMOTER", -"SAME_NAME_DEVICE_PROMOTER", -"PHONE_TARGETING_PROMOTER", -"TRAITS_PROMOTER", -"DEVICE_TYPE_PROMOTER", -"FEATURE_EXTRACTOR", -"LABELER" -], -"enumDescriptions": [ -"", -"filters", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"promoters", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"feature extractor", -"labeler" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsMediaCapabilities": { -"id": "AssistantLogsMediaCapabilities", -"properties": { -"canReceiveRemoteAction": { -"type": "boolean" -}, -"hasScreen": { -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantLogsMediaDeviceSelectionDecisionSummary": { -"description": "A summary of the reasons that we chose a certain target device.", -"id": "AssistantLogsMediaDeviceSelectionDecisionSummary", -"properties": { -"deviceSelectionPreferenceUsed": { -"enum": [ -"UNKNOWN_PRIORITY", -"SINGLE_QUALIFIED_SESSION_PREFERRED", -"FOCUS_SESSION_PREFERRED", -"FOCUS_DEVICE_SESSION_PREFERRED", -"LOCAL_DEVICE_SESSION_PREFERRED", -"PLAYING_DEVICE_STATE_PREFERRED", -"BUFFERING_DEVICE_STATE_PREFERRED", -"PAUSED_DEVICE_STATE_PREFERRED", -"STOPPED_DEVICE_STATE_PREFERRED" -], -"enumDescriptions": [ -"", -"If there is only one media session, it is selected by default.", -"A media session with focus was preferred over other media sessions.", -"A media session on a device with focus was preferred over other media sessions.", -"A media session on a local device was chosen.", -"", -"A device in the BUFFERING state was preferred over other devices.", -"A device in the PAUSED state was preferred over other devices.", -"A device in the STOPPED state was preferred over other devices." -], -"type": "string" -}, -"deviceSelectionReason": { -"enum": [ -"UNKNOWN_REASON", -"SYNDICATION_DEVICE_TARGETED", -"AUTO_DEVICE_TARGETED", -"QUERY_DEVICE_ANNOTATION_TARGETED", -"SINGLE_QUALIFIED_DEVICE_TARGETED", -"CAST_GROUP_TARGETED", -"MEDIA_SESSION_TARGETED", -"FOCUS_DEVICE_TARGETED", -"DEFAULT_DEVICE_TARGETED", -"LOCAL_DEVICE_TARGETED", -"DEVICE_IN_SAME_ROOM_TARGETED", -"AMBIGUOUS_DEVICES_TARGETED", -"LOCAL_GROUP_RETARGETED", -"FOCUS_OF_CLOSE_DEVICE_TARGETED", -"DEFAULT_OF_CLOSE_DEVICE_TARGETED", -"SINGLE_QUALIFIED_CLOSE_DEVICE_TARGETED", -"DEVICE_IN_CLOSE_ROOM_TARGETED", -"TETHERED_DEVICE_TARGETED" -], -"enumDescriptions": [ -"", -"For syndication devices (3P TV), we sometimes prefer syndication devices over any other remote device, since the query comes from a linked remote.", -"Auto surfaces can never cast to remote devices for safety reasons, so we force the local auto device if auto is the surface.", -"A device explicitly mentioned in the query was targeted.", -"Only one device was qualified.", -"We target a cast group only if all the qualified devices are in the same media session. This happens before applying any of the below filters.", -"Media sessions are targeted when session targeting is enabled, supported, a session exists with more than one device, and a session outranks all other sessions. The rankings are the keys below.", -"A device with media focus was targeted.", -"A default device was targeted.", -"The local device was targeted.", -"A device in the same room was targeted.", -"An ambiguous device result including multiple devices was targeted.", -"When local device is playing as a group, we retarget to the group if local device doesn't have focus. See go/group-targeting for the details.", -"==== Sandstone related DeviceSelectionReason ==== Entries below refer to other devices that are close by. Details at: go/sandstone-lumos. The media focus device of an equally close device was targeted.", -"The default target of an equally close device was targeted.", -"Only one device out of the ones that are equally close was qualified.", -"A device in the same room as an equally close device was targeted.", -"Sometimes wearable device (e.g., smartwatch) is tethered to a phone. This value indicates the tethered phone was targeted in these scenarios. This is related but slightly different from other remote targeting cases due to the strong binding/association between local (i.e., watch) and tethered device (i.e., phone). go/rohan-media-tethered-design" -], -"type": "string" -}, -"miscSelectionSignal": { -"items": { -"enum": [ -"NONE", -"BETTER_MATCH_DEVICE_WITH_HUMAN_FRIENDLY_NAME_FOUND", -"LOCAL_DEVICE_IMPLICITLY_MENTIONED", -"USED_LOOSE_PLAYBACK_STATE_REQUIREMENTS", -"QUERY_3P_DEVICE_ANNOTATION_IGNORED_REQUIREMENTS" -], -"enumDescriptions": [ -"", -"A better match device was found and targeted.", -"A local device was implicitly mentioned in the query, like \"on you\".", -"There were no devices which satisfy the playback states requirements, so the playback states requirements were removed and we tried again.", -"A 3P device annotated from the query was targeted. We ignore requirements sometimes for 3p devices." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsMediaFocusInfoLog": { -"description": "The information related to Media Focus. TODO(b/138952173) Deprecate MediaFocusState in logs/proto/majel_gws/media_action_triggering_info.proto and assistant/verticals/media/proto/target_device_info.proto", -"id": "AssistantLogsMediaFocusInfoLog", -"properties": { -"currentFocusDurationSec": { -"description": "How long the device is in focus so far", -"format": "uint32", -"type": "integer" -}, -"dialogTriggered": { -"description": "TODO(b/134944092) Log MediaFocusDialogTrigger Enum in focus_status.", -"type": "boolean" -}, -"focusDevice": { -"$ref": "AssistantLogsDeviceInfoLog", -"description": "LINT.ThenChange(//depot/google3/logs/proto/majel_gws/media_action_triggering_info.proto) The focus device." -}, -"mediaFocusState": { -"description": "The media focus state at the time of the request.", -"enum": [ -"NO_FOCUS", -"RECENT_FOCUS", -"STALE_FOCUS", -"HARD_FOCUS", -"SOFT_FOCUS" -], -"enumDescriptions": [ -"There is no focus device available or the focus expired.", -"Media Focus V1. The focus is valid and it is within the media_focus_dialog_threshold.", -"The focus is valid however it is over the media_focus_dialog_threshold and might require confirmation from user (dialog) before proceeding to use focus device.", -"Media Focus V2 The focus is valid and there is a playing session on focus device.", -"The focus is valid however there is no playing session on focus device and might require confirmation from user (dialog) before proceeding to use focus device." -], -"type": "string" -}, -"sourceDeviceId": { -"description": "The source device of media focus.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsMediaFocusesLog": { -"description": "Following are the MDA compatible loggings for media focus, default settings and nearby devices.", -"id": "AssistantLogsMediaFocusesLog", -"properties": { -"dialogTriggered": { -"type": "boolean" -}, -"localMediaFocus": { -"$ref": "AssistantLogsMediaFocusInfoLog" -}, -"mediaFocuses": { -"deprecated": true, -"description": "Deprecated, use nearby_media_focuses instead.", -"items": { -"$ref": "AssistantLogsMediaFocusInfoLog" -}, -"type": "array" -}, -"nearbyMediaFocuses": { -"description": "MediaFouces found on nearby devices.", -"items": { -"$ref": "AssistantLogsMediaFocusInfoLog" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsMediaStreamLog": { -"description": "Media stream is composed of a media session and one or more devices which are hosting (playing) the session. Usually, a session is only hosted by one devcie. However, with cast group or stream transfer/expansion, a session could be hosted by multiple devices, which are playing the same session simultaneously.", -"id": "AssistantLogsMediaStreamLog", -"properties": { -"deviceIndex": { -"description": "The device index in this message is consistent with the device index in DeviceInfoLog. This field refers to the devices that hosting the session.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"session": { -"$ref": "AssistantLogsDeviceMediaSessionLog" -} -}, -"type": "object" -}, -"AssistantLogsNearbyDevicesLog": { -"id": "AssistantLogsNearbyDevicesLog", -"properties": { -"deviceArbitrationCreationTimestampMs": { -"description": "The timestamp that DeviceArbitration is created in milliseconds.", -"format": "int64", -"type": "string" -}, -"deviceTargetingInputCreationTimestampMs": { -"description": "The timestamp that DeviceTargetingInput is built in milliseconds.", -"format": "int64", -"type": "string" -}, -"eliminatedByFurtherDistance": { -"format": "int32", -"type": "integer" -}, -"eliminatedByLocalClosest": { -"format": "int32", -"type": "integer" -}, -"eliminatedByUnknownDifferentRoom": { -"format": "int32", -"type": "integer" -}, -"eliminatedByUnregisteredDevice": { -"format": "int32", -"type": "integer" -}, -"localDevice": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"nearbyDevices": { -"items": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"type": "array" -}, -"numClosestDevices": { -"format": "int32", -"type": "integer" -}, -"numEquallyCloseDevices": { -"format": "int32", -"type": "integer" -}, -"numFurtherDevices": { -"format": "int32", -"type": "integer" -}, -"numHearingDevices": { -"format": "int32", -"type": "integer" -}, -"numUnknownDistanceDevices": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantLogsProviderAnnotationLog": { -"description": "Provider annotation annotated from the query. These fields contain the detailed information for the provider. (e.g. for Youtube, package_names contains \"com.google.android.youtube\", localized_names contains \"youtube\", and lang contains \"en\" from \"en-US\" which depends on user's setting.)", -"id": "AssistantLogsProviderAnnotationLog", -"properties": { -"lang": { -"type": "string" -}, -"localizedNames": { -"items": { -"type": "string" -}, -"type": "array" -}, -"packageNames": { -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsQueryAnnotationLog": { -"description": "Log about the query requirements", -"id": "AssistantLogsQueryAnnotationLog", -"properties": { -"deviceAnnotation": { -"$ref": "AssistantLogsDeviceAnnotationLog", -"deprecated": true, -"description": "Deprecated, please use room_annotations." -}, -"deviceAnnotations": { -"description": "Log the device annotations mentioned in the query.", -"items": { -"$ref": "AssistantLogsDeviceAnnotationLog" -}, -"type": "array" -}, -"providerAnnotation": { -"$ref": "AssistantLogsProviderAnnotationLog", -"description": "Log the provider annotations annotated from the query." -}, -"roomAnnotation": { -"$ref": "AssistantLogsRoomAnnotationLog", -"deprecated": true, -"description": "TODO(b/171250187) Deprecates the optional RoomAnnotationLog and DeviceAnnotationLog. Deprecated, please use device_annotations." -}, -"roomAnnotations": { -"description": "Log the room annotations mentioned in the query.", -"items": { -"$ref": "AssistantLogsRoomAnnotationLog" -}, -"type": "array" -}, -"structureAnnotations": { -"description": "Log the structure annotations mentioned in the query.", -"items": { -"$ref": "AssistantLogsStructureAnnotationLog" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantLogsReminderLog": { -"description": "Annotate a single reminder instance.", -"id": "AssistantLogsReminderLog", -"properties": { -"createdSecondsAgo": { -"description": "The reminder is created N seconds ago. This helps tracking how the user issues follow-up actions after reminder is created. For example, whether the user likes to issues another [show reminders] query right after reminder is created?", -"format": "int64", -"type": "string" -}, -"retrievedRankingClass": { -"description": "If the reminder is retrieved by a ranking class (see go/opa-reminders-ranker), this will be populated with the class info. Refer to assistant.productivity.ReminderRankingClass.RankingType. Since that proto is in proto2 format, we can only wire by int type.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantLogsRoomAnnotationLog": { -"description": "Room annotation mentioned in query.", -"id": "AssistantLogsRoomAnnotationLog", -"properties": { -"rawTextFromQuery": { -"description": "The raw text mentioning a room from the query, such as \"my living room\".", -"type": "string" -}, -"roomCount": { -"description": "The number of rooms annotated, if there are multiple structures. They are guaranteed to have the same text_from_query and name due to exact matching.", -"format": "int32", -"type": "integer" -}, -"roomId": { -"description": "The identification of the room.", -"items": { -"type": "string" -}, -"type": "array" -}, -"type": { -"description": "The annotation type mentioned in the query.", -"enum": [ -"NO_ROOM_ANNOTATION", -"ROOM_ID_ANNOTATION", -"ROOM_TYPE_ANNOTATION", -"ROOM_TEXT_ANNOTATION" -], -"enumDescriptions": [ -"No room is mentioned in the query.", -"Annotation specifying a room set up by users.", -"Annotation specifying a general room type, such as bedroom.", -"Annotation with text. It could be random text, such as \"my room\", \"all rooms\"." -], -"type": "string" -}, -"userDefinedName": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsSettingsDeviceIdLog": { -"description": "The logging-version of DeviceId proto, which identifies a device. This mirrors cs/google3/assistant/api/core_types/device_type.proto?q=symbol:DeviceId Next ID: 9", -"id": "AssistantLogsSettingsDeviceIdLog", -"properties": { -"agsaClientInstanceId": { -"description": "The client_instance_id on devices with GSA. See 'client_instance_field' in go/androidids.", -"type": "string" -}, -"alloDeviceId": { -"description": "Allo Id. Corresponds to the GBotRequest.Sender.sender. This ID type should be deprecated by now, yet we've observed it serving prod traffic in GB.", -"type": "string" -}, -"canonicalDeviceId": { -"description": "A unique device ID for Assistant devices as proposed by go/ocelot-team.", -"type": "string" -}, -"castDeviceId": { -"description": "If set, indicates that the device is a cast device, and contains the UUID of the cast device. Corresponds to the device_id field of the CastDevice proto.", -"type": "string" -}, -"clientInstanceId": { -"description": "DUSI (go/dusi) is used as the identifier here. This identifier is unique to the user and device. This will help identify which device or application the user's request originated from. This is not to be confused with the client_instance_id that android devices provide. This is currently used by surfaces that use the assistant-legacy-nexus and assistant-legacy-clockwork pipelines. DUSI is created and set in S3. This field is only filled for GAIA requests.", -"type": "string" -}, -"deviceConfigId": { -"description": "The unique DeviceConfig to the specific third party device. It is also used by Android Auto Embedded first party device. See go/opa-ids.", -"type": "string" -}, -"homeGraphDeviceId": { -"description": "The unique device ID for HomeGraph devices. This is the HomeGraph ID, created when the device is registered into HomeGraph. It is immutable for the same device unless it is completely deleted and recreated. See go/home-graph for details. }", -"type": "string" -}, -"libassistantDeviceId": { -"description": "The unique ID for libassistant based devices.", -"type": "string" -}, -"opaIosDeviceId": { -"description": "The unique device ID for the Assistant App on iOS. See go/opa-ios-design for details.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsStructureAnnotationLog": { -"description": "Structure annotation mentioned in query.", -"id": "AssistantLogsStructureAnnotationLog", -"properties": { -"rawTextFromQuery": { -"description": "The raw text mentioning a structure from the query, such as \"my house\".", -"type": "string" -}, -"structureId": { -"description": "The identification of the structure.", -"items": { -"type": "string" -}, -"type": "array" -}, -"type": { -"description": "The annotation type mentioned in the query.", -"enum": [ -"NO_STRUCTURE_ANNOTATION", -"STRUCTURE_ID_ANNOTATION", -"STRUCTURE_TEXT_ANNOTATION" -], -"enumDescriptions": [ -"No structure is mentioned in the query.", -"Annotation specifying a structure set up by users.", -"Annotation with text. It could be random text, such as \"this house\"." -], -"type": "string" -}, -"userDefinedName": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantLogsTargetDeviceLog": { -"description": "Represents the case where the library successfully selects the target device. It could be one or multiple devices. Next ID: 4", -"id": "AssistantLogsTargetDeviceLog", -"properties": { -"devices": { -"items": { -"$ref": "AssistantLogsDeviceInfoLog" -}, -"type": "array" -}, -"lowConfidenceReason": { -"enum": [ -"UNKNOWN_REASON", -"LOCAL_FALLBACK", -"MANUAL_DEFINED_REASON", -"SINGLE_NEARBY_DEVICE", -"PERSONAL_RESPONSE_BIT_OPTOUT_ON_LOCKED_PHONE", -"FURTHER_LOCAL_DEVICE", -"RESULT_FROM_DEFAULT_MEDIA_OUTPUT_PROMOTER" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"Lumos result from a FURTHER hearing device will be marked as low confidence.", -"Lumos result with the last stage as DEFAULT_MEDIA_OUTPUT_PROMOTER will be marked as low confidence." -], -"type": "string" -}, -"resultConfidenceLevel": { -"enum": [ -"UNKNOWN", -"LOW_CONFIDENCE", -"HIGH_CONFIDENCE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantPfrDeviceRdMetadata": { -"id": "AssistantPfrDeviceRdMetadata", -"properties": { -"deviceName": { -"type": "string" -}, -"deviceTypes": { -"items": { -"type": "string" -}, -"type": "array" -}, -"effectiveArgSpanLength": { -"format": "float", -"type": "number" -}, -"hasAmbiguousResolutions": { -"description": "True if there are more than one possible resolutions to the RD.", -"type": "boolean" -}, -"hasResolvedDeviceId": { -"description": "True if there's at least one device id that gets resolved. Note this is different from is_grounded = true: it is possible that is_grounded = false (num_grounded_args =0) but there is resolved device ids. E.g.: \"turn on the blue light\" Power_on(device_object = [d1, d2]:RD(category=DeviceObject(name='blue light')) where there are \"blue light 1\" and \"blue light 2\" hence two resolved device ids. But since the quantifier is single, GB can't resolve the ambiguity and would set num_grounded_args = 0 to indicate such unresolved ambiguity.", -"type": "boolean" -}, -"roomName": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantPfrSmartHomeIntentMetadata": { -"description": "Used by the Smarthome Business Rules twiddler to determine whether to replace the top-scoring Smarthome intent with another.", -"id": "AssistantPfrSmartHomeIntentMetadata", -"properties": { -"deviceRdMetadata": { -"description": "Each DeviceRdMetadata represents one device RD (device slot) in the intent. Note that each device RD could have multiple device groundings inside of it.", -"items": { -"$ref": "AssistantPfrDeviceRdMetadata" -}, -"type": "array" -}, -"intentName": { -"type": "string" -}, -"isExactMatch": { -"description": "When num_constraints == num_constraints_satisfied, indicating all slot matchings are exact match.", -"type": "boolean" -}, -"isGrounded": { -"description": "When num_grounded_args > 0, indicating there is at least one top-level argument is grounded.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantPfrTiebreakingMetadata": { -"description": "Features used by the PrefulfillmentRanker's scorer exclusively to break ties.", -"id": "AssistantPfrTiebreakingMetadata", -"properties": { -"fingerprint": { -"format": "uint64", -"type": "string" -}, -"sortedNameString": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantPrefulfillmentRankerPrefulfillmentSignals": { -"description": "Signals to be used by the Prefulfillment Ranker. Derived from the ParsingSignals and GroundingSignals carried by the FunctionCall. LINT.IfChange Next ID: 80", -"id": "AssistantPrefulfillmentRankerPrefulfillmentSignals", -"properties": { -"calibratedParsingScore": { -"description": "A parsing score that is independently calibrated by each parser/IG.", -"format": "double", -"type": "number" -}, -"deepMediaDominant": { -"description": "Whether the intent is dominant according to NSP deep-media.", -"type": "boolean" -}, -"dominant": { -"description": "Indicates interpretation dominance predicted by KScorer", -"type": "boolean" -}, -"effectiveArgSpanLength": { -"description": "The total effective length of the spans for the arguments used to construct the parse. May include vertical specific adjustments. Eg: For the query [delete my 7 p.m. alarm called chicken] and intent Delete_alarm(alarm_object=RD(category=AlarmObject( label=\"chicken\", trigger_time_datetime=<< 7 PM >>))), the effective argument span is \"7 p.m.\" + \"chicken\" (total length of 13).", -"format": "float", -"type": "number" -}, -"fulfillableDominantMedia": { -"description": "Whether this is a fulfillable, dominant Media intent.", -"type": "boolean" -}, -"generatedByLegacyAquaDomain": { -"description": "Whether or not the intent was generated by a legacy Aqua domain that PFR should care about.", -"type": "boolean" -}, -"hasAnswerGroup": { -"description": "Whether the interpretation has a Search answer group object, signifying it came from Search resolution.", -"type": "boolean" -}, -"hasIntentUpdate": { -"type": "boolean" -}, -"inQueryMaxEffectiveArgSpanLength": { -"description": "This is a cross-intent feature which is calculated by iterating all intent candidates. This feature should be populated in post-IG stage (before GB).", -"format": "float", -"type": "number" -}, -"intentName": { -"description": "intent_name is used by PFR ensemble model. See go/pfr_ha_launch_doc", -"type": "string" -}, -"intentNameAuisScore": { -"description": "QUS intent-based ranking signals. Assistant User Interaction Score which is aggregated using intent name.", -"format": "double", -"type": "number" -}, -"intentNameAuisScoreExp": { -"description": "Assistant User Interaction Score which is aggregated using intent name from exp laelaps.", -"format": "double", -"type": "number" -}, -"intentType": { -"description": "intent_type differentiates between intents that share the top level intent name. For eg: for TV_FALLBACK_SEARCH_INTENT, the top level intent name must be \"Find_media\" and the media_object argument within it must be of type \"Media_unspecified\".", -"enum": [ -"UNKNOWN_INTENT_TYPE", -"PLAY_MEDIA_MUSIC", -"PLAY_MEDIA_PODCAST", -"FIND_MEDIA_TV_FALLBACK", -"PLAY_MEDIA_VIDEO", -"ALARMS_AND_TIMERS", -"HEALTH_AND_FITNESS", -"PLAY_MEDIA_RADIO", -"PLAY_TVM", -"CALL", -"MESSAGE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"isAquaMediaIntent": { -"description": "Whether the intent aqua intent from media domain enabled in QBT", -"type": "boolean" -}, -"isCommunicationOpaRawTargetIntent": { -"description": "NSP-specific signals, used to determine if NSP intents should be selected or not.", -"type": "boolean" -}, -"isDummyIntent": { -"type": "boolean" -}, -"isFullyGrounded": { -"description": "Whether the intent is fully grounded.", -"type": "boolean" -}, -"isHighConfidencePodcastIntent": { -"description": "Used for PFR manual rule to prefer high confidence podcast intent (e.g. topical, genre) over generic podcast intents.", -"type": "boolean" -}, -"isIntentFromOrbit": { -"description": "Whether the intent is from ORBIT.", -"type": "boolean" -}, -"isMediaControlIntent": { -"description": "Whether the intent is a media control intent.", -"type": "boolean" -}, -"isMediaIntent": { -"description": "Whether the intent is a media intent.", -"type": "boolean" -}, -"isNspDescopedIntent": { -"type": "boolean" -}, -"isNspEnabledIntent": { -"type": "boolean" -}, -"isNspIntent": { -"description": "Whether this interpretation was genearted by NSP.", -"type": "boolean" -}, -"isNspTargetIntent": { -"type": "boolean" -}, -"isPlayGenericMusic": { -"description": "Whether the intent is a PlayGenericMusic-type intent.", -"type": "boolean" -}, -"isPodcastGenericIntent": { -"description": "Used for PFR manual rule to prefer high confidence podcast intent (e.g. topical, genre) over generic podcast intents.", -"type": "boolean" -}, -"isPodcastIntent": { -"description": "Whether the intent is a podcast intent.", -"type": "boolean" -}, -"isRadioIntent": { -"description": "Whether the intent is a PlayMedia radio intent.", -"type": "boolean" -}, -"isSageDisabledIntent": { -"description": "Whether the intent is an intent marked disabled by the Sage IG.", -"type": "boolean" -}, -"isSageInNageIntent": { -"description": "Whether the intent is produced by the Sage IntentGenerator invoked by the NSP intent-generator (thus, Sage-in-NSP-invoking_Sage, or Sage-in-Nage).", -"type": "boolean" -}, -"isSageIntent": { -"description": "Whether this intent was generated by Sage.", -"type": "boolean" -}, -"isScoreBasedIntent": { -"description": "Whether this intent is a score-based intent, relying on PFR for scoring and pruning to the top intent.", -"type": "boolean" -}, -"isTvmIntent": { -"description": "Whether the intent is a media object tvm intent.", -"type": "boolean" -}, -"isValidSmarthomeIntent": { -"description": "Whether the intent is a Smarthome-domain intent with valid device arguments.", -"type": "boolean" -}, -"isVideoIntent": { -"description": "Whether the intent is a media object video intent.", -"type": "boolean" -}, -"kScore": { -"description": "Used for PFR manaul rule to prefer high kscore radio intent. The k-score comes from twiddled_ranking_scores in query interpretations.", -"format": "float", -"type": "number" -}, -"kscorerRank": { -"description": "The rank order of the interpretation as determined by kscorer. The kscorer-determined dominant interpretation, if any, gets a rank of 0. The remaining N interpretations get a rank of 1 through N.", -"format": "int32", -"type": "integer" -}, -"maxHgrScoreAcrossBindingSets": { -"description": "The maximum score assigned by the Horizontal Grounding Ranker (HGR) across all of the intent's binding sets.", -"format": "double", -"type": "number" -}, -"nspIntentParseScore": { -"description": "Parse score generated by NSP. If NSP intent has been pruned due to dededuplication, It will have highest parse score for deduplicated intent.", -"format": "double", -"type": "number" -}, -"nspRank": { -"description": "Rank of the intent as reported by NSP.", -"format": "int32", -"type": "integer" -}, -"numConstraints": { -"description": "Signals as proposed in go/improved-grounding-signals. Sum of the number of constraints used by the Grounding Box to ground each variable.", -"format": "double", -"type": "number" -}, -"numConstraintsSatisfied": { -"description": "Sum of the number of constraints satisfied for each variable. Depending on the match score for a constraint, this number can be fractional and is in the range [0, num_constraints]. Populated by the Grounding Box.", -"format": "double", -"type": "number" -}, -"numGroundableArgs": { -"description": "Grounding Signals. Number of groundable arguments the intent has, populated by the Grounding Box.", -"format": "double", -"type": "number" -}, -"numGroundedArgs": { -"description": "Number of grounded arguments the intent has, populated by the Grounding Box.", -"format": "double", -"type": "number" -}, -"parsingScoreMse8BucketId": { -"description": "A ID corresponding to which bucket a given parsing score belongs in.", -"format": "int32", -"type": "integer" -}, -"phase": { -"description": "Query understanding phase the intent was produced in.", -"enum": [ -"QU_PHASE_UNSPECIFIED", -"QU_PHASE_REQUEST", -"QU_PHASE_QREWRITE", -"QU_PHASE_QBT", -"QU_PHASE_PROBE_QUERY", -"QU_PHASE_MULTI_ACCOUNT", -"QU_PHASE_CQBT", -"QU_PHASE_QBT_RESOLUTION", -"QU_PHASE_HIGH_PRECISION", -"QU_PHASE_COMBINED_RBT_RESOLUTION", -"QU_PHASE_ANALYZER_INPUT", -"QU_PHASE_NAGE", -"QU_PHASE_SUPERCAT" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"Invalid value to use as a default.", -"Phase in which the QueryUnderstandingRequest for a non-identity Candidate is generated. Produces QueryCandidateInfo.", -"Phase in which QUS calls QRewrite. Produces QRewrite's reply.", -"Phase in which QBT interpretations are ranked. Produces ranked QBT interpretations.", -"Phase in which QUS does a probe query for incomplete search results and produces ranked RBT interpretations on the basis of these. Only runs if understand_mode is PRE_RESULT_AND_PROBE_QUERY.", -"Phase in which QUS does cross-account understanding. Produces AnalyzerResponse whose interpretations contain personal data from multiple accounts.", -"Deprecated, but see go/qus-contextual-ig for original purpose.", -"Phase in which QUS calls resolution service to perform slow intent resolution. Produces resolved intents.", -"Phase in which QUS calls the fast HighPrecision workflow. Only runs if understand_mode is PRE_RESULT_AND_HIGH_PRECISION.", -"Phase in which QUS combines the RBT result and Slow Resolution result. Only runs if understand_mode is PRE_RESULT_AND_PROBE_QUERY_AND_RESOLUTION.", -"Phase in which QUS returns the AnalyzerInput as a separate Phase. It copies the AnalyzerInput from QU_PHASE_QREWRITE and returns it as a QueryUnderstandingStreamingResult for the purpose of logging in ACE. See go/analyzerinput-new-qus-phase.", -"Phase in which QUS runs NSP in the QUS.", -"Phase in which QUS calls a stripped-down version of QRewrite and then calls the Supercat Orchestrator to generate Supercat QIs." -], -"type": "string" -}, -"platinumSource": { -"description": "Whether the intent comes from the Sage IntentGenerator's \"platinum\" source, signifying high-confidence in quality.", -"type": "boolean" -}, -"pq2tVsAssistantIbstCosine": { -"description": "Cosine similarity between predicted query-to-term model and assistant intent-type-based salient terms. This is intended to be only used for ACE ranking and only populated for assistant traffic.", -"format": "double", -"type": "number" -}, -"pq2tVsIbstCosine": { -"description": "Cosine similarity between predicted query-to-term model and intent-type-based salient terms. This is intended to be used as a backoff to pq2t_vs_qibst_cosine if it is missing.", -"format": "double", -"type": "number" -}, -"predictedIntentConfidence": { -"description": "Intent confidence predicted by the AssistantVerticalClassifier QRewrite servlet.", -"format": "float", -"type": "number" -}, -"searchDispatch": { -"description": "The determination made by the SearchDispatchingConfig as to whether and how this interpretation should be dispatched to Search.", -"enum": [ -"UNKNOWN", -"NONE", -"BRIDGE_API", -"FULFILL_INTENT", -"EMIT_ONLY", -"COUNTERFACTUAL_LOG_ONLY", -"CAPACITY_ACCOUNTING" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"These rules are no-ops wrt. dispatching. The only effect they produce is the logging of the associated |counterfactual_events|.", -"These rules only consume capacity constraints, but are not dispatched. This way the rules can be used for the suppression for intents lower in the list." -], -"type": "string" -}, -"smarthomeIntentMetadata": { -"$ref": "AssistantPfrSmartHomeIntentMetadata", -"description": "SmartHome intent metadata used for the SmartHome business-rules twiddler." -}, -"subIntentType": { -"enum": [ -"SUB_INTENT_TYPE_UNKNOWN", -"SUB_INTENT_TYPE_TV_FALLBACK_SEARCH_INTENT", -"SUB_INTENT_TYPE_PODCAST", -"SUB_INTENT_TYPE_MUSIC_INITIATION_INTENT", -"SUB_INTENT_TYPE_PLAY_RADIO_INTENT", -"SUB_INTENT_TYPE_PLAY_TVM_INTENT", -"SUB_INTENT_TYPE_PLAY_VIDEO_INTENT", -"SUB_INTENT_TYPE_BROWSE_VIDEO_INTENT" -], -"enumDescriptions": [ -"", -"Find_media", -"Play_media, Play_media, Play_media, Listen_to_media, Media_entity_bare, Play_and_shuffle_media", -"Play_media", -"Play_media", -"Play_media", -"Play media", -"Find_media, Media_entity_bare" -], -"type": "string" -}, -"tiebreakingMetadata": { -"$ref": "AssistantPfrTiebreakingMetadata" -}, -"usesGroundingBox": { -"description": "Whether the interpretation should run through grounding box or not.", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantProductivityListItem": { -"description": "A message to represent an item in a list. Just a basic string for now, but extensible for the future.", -"id": "AssistantProductivityListItem", -"properties": { -"name": { -"description": "[REQUIRED] The name of the list item.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantRemindersAttachment": { -"description": "Proto descrbing an attachment to an Assistant Reminder. If the attachment has different behavior on different surfaces (e.g., deeplinks), there will be multiple attachments attach to the Reminder. Each of them will specify the surface type and the corresponding deeplink.", -"id": "AssistantRemindersAttachment", -"properties": { -"id": { -"description": "REQUIRED. An unique identifier for the attachment. We have a plan to index this field, so it's marked as REQUIRED. Chat with opa-reminders-eng@ if you have a use case without an attachment ID.", -"type": "string" -}, -"link": { -"$ref": "AssistantRemindersAttachmentLink" -}, -"surfaceType": { -"description": "REQUIRED. Surface types this attachment should be shown.", -"items": { -"enum": [ -"UNSPECIFIED", -"ANDROID_PHONE" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantRemindersAttachmentLink": { -"id": "AssistantRemindersAttachmentLink", -"properties": { -"linkUrl": { -"description": "REQUIRED. The link to surface to frontends (e.g., Hubpage, notifications.) This could also be a surface-specific deeplink (be sure to set `surface_type` accordingly.)", -"type": "string" -}, -"notificationText": { -"$ref": "AssistantRemindersNlgTemplateKey", -"deprecated": true, -"description": "REQUIRED. The text for the notification link button. Note: We cannot take nlp_generation.TemplateData yet due to cyclic dependency. The plan is to cut dependency from TemplateData to quality.actions.Reminder. DEPRECATED. No longer used as part of the notification flow." -} -}, -"type": "object" -}, -"AssistantRemindersMemoryPayload": { -"description": "Since this is stored in BE, any update on this proto needs LGTM by ARIS storage owner", -"id": "AssistantRemindersMemoryPayload", -"properties": { -"hasReferencedEntityAtCreation": { -"description": "Whether the reminder created has a referenced_entity attached to it or not(go/hub-memory-payload). Since we plan to set this in Assistant reminder creation path flow, in case later the referenced_entity is removed from the reminder, then this bit might still remain true. Also in case referenced_entity is later added to reminder(for example when referenced_entity is attached by Server), then also this bit might remain false. This bit will be used to *guess* if the user has a memory-enabled AGSA, thus we'll surface the \"open memory\" button on hubpage. This check is not perfect, as the user might have other phones with older AGSA, so this is just a *best guess*. This field won't be stored in Memory backend, and will not be populated back when retrieving reminders.", -"type": "boolean" -}, -"recordId": { -"description": "Id of record that is associated with Reminder. This will be set for all Assistant reminders created after the first launch of the Reminder Memory integration, see go/reminders-memory for more details. Also, this might apply to all other types of reminders.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantRemindersNlgTemplateKey": { -"description": "Equivalent to nlp_generation.TemplateKey. We cannot use nlp_generation.TemplateKey message directly becasue that proto is defined in a relatively large proto and has other dependencies, which will increase the size unnecessary and might hit many limitations (e.g., 5MiB limitation for Spanner type environment.).", -"id": "AssistantRemindersNlgTemplateKey", -"properties": { -"messageSet": { -"description": "REQUIRED.", -"type": "string" -}, -"templateName": { -"description": "REQUIRED.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantTeleportTeleportNicknameSignals": { -"id": "AssistantTeleportTeleportNicknameSignals", -"properties": { -"hasLocationInterpretation": { -"description": "Whether the nickname could also refer to a location. For example, \"walmart\", \"starbucks\".", -"type": "boolean" -}, -"installInfo": { -"description": "Indicates whether the user has the app installed.", -"enum": [ -"INSTALLATION_INFORMATION_UNAVAILABLE", -"IS_INSTALLED_APP", -"IS_NOT_INSTALLED_APP" -], -"enumDescriptions": [ -"The annotator was not aware of installed apps, or the resource is not an app.", -"The resource is an app, and the annotator had the information that it is installed.", -"The resource is an app, and the annotator had the information that it is not installed." -], -"type": "string" -}, -"isGeneric": { -"description": "True when the name is generic, i.e when it could refer to multiple packages from different developrs. For example, \"mail\" is considered a generic name (since it can refer to \"gmail\", \"yahoo mail\" etc.) but \"facebook\" although could refer to both \"facebook\" and \"facebook lite\" is not considered generic (both packages are from the same third party).", -"type": "boolean" -}, -"nicknameTier": { -"description": "The tier of the nickname.", -"enum": [ -"UNKNOWN", -"UNRESTRICTED", -"INTENT_REQUIRED", -"APP_PHRASE_REQUIRED" -], -"enumDescriptions": [ -"Default value for unknown tier.", -"Nicknames that always refer to the respective application or url. For example, \"facebook\" always refers to the Facebook app / homepage.", -"Nicknames that refer to the app/url when the intent is to execute an action on the app/url. For example, \"most wanted\" refers to the \"Need for Speed Most Wanted\" app, only if the user asks, for example, to open the app - \"open most wanted\".", -"Nicknames that refer to the app/url when the app/url restriction is explicit. For example, \"banking\" is a nickname for the \"Bank of America Mobile Banking\" app, only if user explicitly states they are referring to the app - \"banking app\", \"open banking application\"." -], -"type": "string" -}, -"source": { -"enum": [ -"DEFAULT", -"GELLER", -"DEVICE_CAPABILITIES" -], -"enumDescriptions": [ -"", -"", -"These annotations are generated on the fly (server-side), and are not matched by SE Muppet using the offline-generated corpus." -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsCommonContactMatchSignal": { -"description": "Neural contact match signals.", -"id": "AssistantVerticalsCommonContactMatchSignal", -"properties": { -"matchScore": { -"description": "Neural contact match similarity score.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoActionProjectConfig": { -"description": "Metadata for Actions-on-Google configuration.", -"id": "AssistantVerticalsHomeautomationProtoActionProjectConfig", -"properties": { -"contextId": { -"description": "Actions-on-Google action context ID. See go/sdm-hospitality-design.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoAgentDeviceId": { -"description": "An agent + device pair that uniquely identifies a device.", -"id": "AssistantVerticalsHomeautomationProtoAgentDeviceId", -"properties": { -"agentId": { -"description": "The agent's ID. Generally it is the agent's Google pantheon project id.", -"type": "string" -}, -"deviceId": { -"description": "Device ID defined by the agent.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoAgentInformation": { -"description": "AgentInformation represents the details needed to support both 1P and 3P partnerships for Home Automation. Next ID: 7", -"id": "AssistantVerticalsHomeautomationProtoAgentInformation", -"properties": { -"authType": { -"enum": [ -"OPEN_AUTH_DEFAULT", -"NEST_AUTH_PROXY" -], -"enumDescriptions": [ -"Device uses 3p path with token authentication. This is the default value used when a different authentication type is not specified.", -"Device uses 1p Nest path (eg: Olive) with gaia credential authentication." -], -"type": "string" -}, -"deviceSource": { -"enum": [ -"UNKNOWN", -"CLOUD_SYNC", -"ASSISTANT_SETTING_OOBE", -"LOCAL_SYNC", -"CHIP_SYNC" -], -"enumDescriptions": [ -"", -"Device is created through HA2 SyncDevices flow, are HA2 cloud agent devices. ultravox is a cloud agent as well as surface device and it's using Sync flow.", -"Device created through Assistant Setting at OOBE time, they're things that are mastered within google internally.", -"Device is created through the local device platform to HGS sync flow. These devices are not necessarily mastered in a 3P cloud - they are locally detected by an assistant device and then synced to HGS directly. See go/aogh-dd.", -"Device created through local CHIP provision and sync up. Chip synced devices can be the same devices as the devices synced from cloud, and the sync and de-dup logic can be very complicated." -], -"type": "string" -}, -"executionPath": { -"enum": [ -"HARPOON_DEFAULT", -"STUBBY" -], -"enumDescriptions": [ -"Execute through Harpoon call. This is the default value used when the execution_path is not specified.", -"Execute through stubby RPC call." -], -"type": "string" -}, -"id": { -"description": "Unique Agent ID which maps to a specific Agent. Not using Agent Name here as it may change over time.", -"type": "string" -}, -"key": { -"description": "Agent's foreign key that uniquely identifies a user's device.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoAttribute": { -"description": "Protos representing device or structure attributes. See go/hgs-attributes-protos. Only protos approved and formalized by assistant/HG team should be added here.", -"id": "AssistantVerticalsHomeautomationProtoAttribute", -"properties": { -"structureBasedRoutine": { -"$ref": "AssistantVerticalsHomeautomationProtoCommonStructureBasedRoutine" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoAttributes": { -"description": "LINT.IfChange(proto_attributes)", -"id": "AssistantVerticalsHomeautomationProtoAttributes", -"properties": { -"attributeProtos": { -"items": { -"$ref": "AssistantVerticalsHomeautomationProtoAttribute" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoCommonEventTrigger": { -"description": "LINT.IfChange(proto_event_trigger) Next id: 5", -"id": "AssistantVerticalsHomeautomationProtoCommonEventTrigger", -"properties": { -"enabled": { -"type": "boolean" -}, -"eventTriggerPayload": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"description": "Detailed settings for the event trigger; unset if not applicable.", -"type": "object" -}, -"eventTriggerType": { -"description": "Different event type may have different settings. For example: * SCHEDULED will have event_trigger_payload of cs/symbol:assistant.verticals.voice_shortcut.proto.Schedule * LOCATION will have event_trigger_payload of cs/symbol:assistant.verticals.voice_shortcut.proto.LocationTriggerEvent", -"enum": [ -"UNKNOWN", -"SCHEDULED", -"LOCATION", -"DEVICE_CONTROL", -"DEVICE_SENSES" -], -"enumDescriptions": [ -"", -"", -"", -"For Routine triggered by Home Intelligence Service (IQS) based on DEVICE_STATE_CHANGE. NOTE: This trigger is currently handled by IQS team go/iqs-hhr.", -"For Routine triggered by Home Intelligence Service (IQS) based on DEVICE_SENSOR_SIGNAL. NOTE: This trigger is currently handled by IQS team go/iqs-hhr." -], -"type": "string" -}, -"triggerSource": { -"description": "Unique identifier for the EventTrigger, e.g. SCHEDULED_ROUTINES. See the enum values of cs/symbol:WorkflowTriggerInput.TriggerSource", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoCommonStructureBasedRoutine": { -"description": "Routines team stores the core Structure Based Routine data as the payload. We will add specific metadata on a per-need basis. LINT.IfChange(proto_structure_based_routine) Next id: 12 These two forms of payload are equivalent data in different formats and both will be stored in Home Graph. 1. The internal format will fan out to the DynamicEntity Footprints for read in Settings flow and Execution. 2. The UI format will be stripped out upon replication to DynamicEntity Footprints due to its redundancy and the Footprints data size limit, i.e. DE Footprints will only contain the internal format.", -"id": "AssistantVerticalsHomeautomationProtoCommonStructureBasedRoutine", -"properties": { -"enabled": { -"description": "Whether this Routine is enabled or not. If false, then this Routine can't be triggered by Voice.", -"type": "boolean" -}, -"googlePreconfigWorkflowId": { -"description": "The unique identifier for a class of workflows. For example: * \"sbr_001\" => Away * \"sbr_002\" => Home * \"category_template\" => CUSTOM", -"type": "string" -}, -"language": { -"type": "string" -}, -"payload": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"deprecated": true, -"description": "Internal format payload primarily for Routines team use.", -"type": "object" -}, -"securityLevel": { -"description": "The security level of the Structure Based Routine as determined by the most security-sensitive task.", -"enum": [ -"UNKNOWN", -"ALLOW_UNVERIFIED", -"ALLOW_VERIFIED" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"shared": { -"type": "boolean" -}, -"storagePayload": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"type": "object" -}, -"structureId": { -"type": "string" -}, -"triggers": { -"description": "Voice or event triggers.", -"items": { -"$ref": "AssistantVerticalsHomeautomationProtoCommonStructureBasedRoutineTrigger" -}, -"type": "array" -}, -"type": { -"enum": [ -"UNDEFINED", -"CURATED", -"ALARM", -"CUSTOM" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"uiPayload": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"description": "UI format payload primarily for external team use.", -"type": "object" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoCommonStructureBasedRoutineTrigger": { -"description": "Next id: 3", -"id": "AssistantVerticalsHomeautomationProtoCommonStructureBasedRoutineTrigger", -"properties": { -"eventTrigger": { -"$ref": "AssistantVerticalsHomeautomationProtoCommonEventTrigger" -}, -"voiceTrigger": { -"$ref": "AssistantVerticalsHomeautomationProtoCommonVoiceTrigger" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoCommonVoiceTrigger": { -"description": "LINT.IfChange(proto_voice_trigger) Next id: 2", -"id": "AssistantVerticalsHomeautomationProtoCommonVoiceTrigger", -"properties": { -"query": { -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoConciergeFeatures": { -"description": "The Concierge features a structure is eligible for. See {@link home.graph.common.ConciergeFeatures}.", -"id": "AssistantVerticalsHomeautomationProtoConciergeFeatures", -"properties": { -"conciergeProductFeatures": { -"items": { -"enum": [ -"UNKNOWN_PRODUCT_FEATURE", -"E911" -], -"enumDescriptions": [ -"LINT.IfChange", -"LINT.ThenChange(//depot/google3/home/graph/proto/common/enums.proto)" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoDeviceTargetingOutputQueryInfo": { -"description": "Next ID: 5", -"id": "AssistantVerticalsHomeautomationProtoDeviceTargetingOutputQueryInfo", -"properties": { -"annotatedSpanDevice": { -"description": "The query span for device mention.", -"type": "string" -}, -"annotatedSpanRoom": { -"description": "The query span for room mention.", -"type": "string" -}, -"annotatedSpanStructure": { -"description": "The query span for structure mention.", -"type": "string" -}, -"processedMentionedSpan": { -"description": "This field is from query_info.processed_mentioned_span in DTO.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoHomeAutomationDevice": { -"description": "Next ID: 9", -"id": "AssistantVerticalsHomeautomationProtoHomeAutomationDevice", -"properties": { -"deviceSelectionLog": { -"$ref": "AssistantLogsDeviceSelectionLog", -"description": "the next 3 fields are for Lumos output (DTO) that needs to be propagated to the Fulfilment through the HomeAutomationDevice proto." -}, -"dtoError": { -"$ref": "AssistantDeviceTargetingDeviceTargetingError" -}, -"dtoQueryInfo": { -"$ref": "AssistantVerticalsHomeautomationProtoDeviceTargetingOutputQueryInfo", -"description": "This field is from query_info in DTO." -}, -"homeautomationMetadata": { -"$ref": "AssistantVerticalsHomeautomationProtoHomeAutomation_MetaData", -"description": "Device meta data." -}, -"list": { -"description": "list of HomeAutomationDeviceItem. After migration completes, we will mark the above 4 field as deprecated and only use this field.", -"items": { -"$ref": "AssistantVerticalsHomeautomationProtoHomeAutomationDeviceItem" -}, -"type": "array" -}, -"matchedItemKey": { -"description": "Corresponding to casse matched_item CustomTypeItem key.", -"type": "string" -}, -"matchedItemRawvalue": { -"description": "Corresponding to casse Argument raw_value.", -"type": "string" -}, -"matchedItemValue": { -"description": "Corresponding to casse matched_item CustomTypeItem value.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoHomeAutomationDeviceItem": { -"id": "AssistantVerticalsHomeautomationProtoHomeAutomationDeviceItem", -"properties": { -"homeautomationMetadata": { -"$ref": "AssistantVerticalsHomeautomationProtoHomeAutomation_MetaData", -"description": "Device meta data." -}, -"matchedItemKey": { -"description": "Corresponding to casse matched_item CustomTypeItem key.", -"type": "string" -}, -"matchedItemRawvalue": { -"description": "Corresponding to casse Argument raw_value.", -"type": "string" -}, -"matchedItemValue": { -"description": "Corresponding to casse matched_item CustomTypeItem value.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoHomeAutomation_MetaData": { -"description": "Next ID: 45", -"id": "AssistantVerticalsHomeautomationProtoHomeAutomation_MetaData", -"properties": { -"actionProjectConfigs": { -"description": "Custom actions that this item supports.", -"items": { -"$ref": "AssistantVerticalsHomeautomationProtoActionProjectConfig" -}, -"type": "array" -}, -"agentInformation": { -"$ref": "AssistantVerticalsHomeautomationProtoAgentInformation", -"description": "Agent details." -}, -"assistantDeviceId": { -"description": "Device ID that matches the ID passed from the device to discourse_context when a user issues a query to an Assistant-enabled device that is registered with Cast (via CCS (see go/castservers)), or some other service.", -"type": "string" -}, -"attributes": { -"additionalProperties": { -"description": "Properties of the object.", -"type": "any" -}, -"description": "Attributes data as provided from SYNC. This gets used in mutation and execution and in some potential cases, in biasing.", -"type": "object" -}, -"creatorGaiaId": { -"description": "See Device.creator_gaia_ids in //home/graph/proto/service/types.proto. If empty, the GAIA ID from the request EUC is assumed to be the creator. We only need at most one creator_gaia_id.", -"format": "int64", -"type": "string" -}, -"derivedType": { -"description": "Any types that are not the given item type, but derived later. For example, if an item has type action.devices.types.OUTLET but is named \"floor lamp\" we can derive that it also has type action.devices.types.LIGHT. Also considered along with |type| when triggering type-based actions.", -"items": { -"type": "string" -}, -"type": "array" -}, -"deviceModelId": { -"description": "See note in home_graph.proto; loaded into DE now to avoid having to double-read assistant settings records as per go/smarthome-removing-assistant-settings", -"type": "string" -}, -"gcmExecutionAddress": { -"description": "GCM address for cloud execution across google cloud messaging rather than 3p cloud.", -"type": "string" -}, -"groupIds": { -"description": "List of parent group IDs, if the device is added to one or multiple device groups (see go/home-groups). Will be consumed by Smart Home APIs and (in the future) Assistant CTF to populate the group member list of device groups.", -"items": { -"type": "string" -}, -"type": "array" -}, -"hashValue": { -"description": "The hash value from go/de-consistency-check", -"type": "string" -}, -"lanscanOptedIn": { -"description": "Whether local home platform should discover new devices via LAN for the structure.", -"type": "boolean" -}, -"matterUniqueId": { -"$ref": "AssistantVerticalsHomeautomationProtoMatterUniqueId", -"description": "Matter Unique ID. Contains VID/PID information" -}, -"modelName": { -"description": "Model name from HomeGraph, populated from model_manifest.model_name. See b/200087451.", -"type": "string" -}, -"notificationEnabledByUser": { -"description": "Indicates whether notifications have been enabled by a user and will be announced for this device. This is set by the user within the Google app settings, and Google will announce the device notification only if both notification_supported_by_agent and notification_enabled_by_user are true.", -"type": "boolean" -}, -"notificationSupportedByAgent": { -"description": "Indicates whether the device is capable of sending notifications. This field will be set by the agent (partner) on an incoming SYNC. If a device is not capable of generating notifications, the partner should set this flag to false. If a partner is not capable of calling ReportStateAndNotification to send notifications to Google, the partner should set this flag to false. If there is a user setting in the partner app to enable notifications and it is turned off, the partner should set this flag to false.", -"type": "boolean" -}, -"opaqueCustomData": { -"description": "Store custom data for agent calls here. This will likely be short-lived -- we will replace this with calls to HGS. (Note: This may end up not temporary if we only need it for a couple partners -- more efficient to have it on a few users than require HGS reads for all users.", -"type": "string" -}, -"operationalNodeId": { -"description": "Operational CHIP Node ID that combines the fabric ID and node id in format of . (Hex format without 0x prefix, for example, 0F001234FA67AA39.1234ABCD1111DDDD).", -"type": "string" -}, -"otherDeviceIds": { -"description": "Other agent id + foreign id pairs associated with the device. This can be used to represent a group of devices (e.g. Sonos' bonded zone) as a single device, or a device that comes in through different sync flows (e.g. Newman with a Nest camera).", -"items": { -"$ref": "AssistantVerticalsHomeautomationProtoAgentDeviceId" -}, -"type": "array" -}, -"otherDeviceSources": { -"description": "Additional device sources. This can be the result of the device being merged with other devices with a different source.", -"items": { -"enum": [ -"UNKNOWN", -"CLOUD_SYNC", -"ASSISTANT_SETTING_OOBE", -"LOCAL_SYNC", -"CHIP_SYNC" -], -"enumDescriptions": [ -"", -"Device is created through HA2 SyncDevices flow, are HA2 cloud agent devices. ultravox is a cloud agent as well as surface device and it's using Sync flow.", -"Device created through Assistant Setting at OOBE time, they're things that are mastered within google internally.", -"Device is created through the local device platform to HGS sync flow. These devices are not necessarily mastered in a 3P cloud - they are locally detected by an assistant device and then synced to HGS directly. See go/aogh-dd.", -"Device created through local CHIP provision and sync up. Chip synced devices can be the same devices as the devices synced from cloud, and the sync and de-dup logic can be very complicated." -], -"type": "string" -}, -"type": "array" -}, -"parentNode": { -"description": "LINT.IfChange(home_graph_single_parent) At the moment, we just have a single string. In future this will expand with additional metadata from client or cloud execution data store. In today's 'tree' HomeGraph each object has a single parent. In the future this may have a mesh for complex cases -- zones, doors, etc -- so we make this a repeated element today. LINT.ThenChange(//depot/google3/assistant/assistant_server/settings/user_defined_actions/footprints/footprint_accessor.cc:home_graph_single_parent)", -"items": { -"type": "string" -}, -"type": "array" -}, -"parentType": { -"description": "The type of the parent. Currently only set for devices, to distinguish between structure and room parents. Items currently have only one parent, and entries after the first parent_type are ignored.", -"items": { -"enum": [ -"UNKNOWN_ITEM_TYPE", -"DEVICE", -"ROOM", -"PLACE", -"GROUP", -"SCENE", -"STRUCTURE" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"personalizedNicknames": { -"description": "User-given nicknames for an entity (e.g. \"My house\"). These nicknames are unique to the gaia user. Nickname in DeviceInfo is per-entity level nickname, while personalized_nicknames is per-user per-entity.", -"items": { -"type": "string" -}, -"type": "array" -}, -"physicalLocation": { -"$ref": "AssistantVerticalsHomeautomationProtoPhysicalLocation", -"description": "Stores the location for the STRUCTURE type." -}, -"plural": { -"description": "We use this to determine if the synonyms matched in the aqua interpretation is plural. Then we will return disambiguate dialog or execute commands with all the targets.", -"items": { -"type": "string" -}, -"type": "array" -}, -"primaryName": { -"description": "Which of the values was the original, user-provided name -- or our disambiguated, cleaned-up version of it. This is what we use in TTS when we need to identify an object that wasn't just spoken uniquely by the user -- in disambiguation dialogue, or in response to a collective interrogative (e.g. \"what lights are on in the kitchen?\")", -"type": "string" -}, -"reportStateStatus": { -"description": "Whether device report state is out of sync with Query response.", -"enum": [ -"RSS_UNSPECIFIED", -"RSS_IN_SYNC", -"RSS_OUT_OF_SYNC" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"roleInformation": { -"$ref": "AssistantVerticalsHomeautomationProtoRoleInformation", -"description": "User's role information for this device. This will be used in Home Automation server to decide if user has authority to fulfill its request." -}, -"routableViaGcm": { -"description": "Only present for a target device. Indicates this target device is reachable by a local (AoGH) path via an AoGH device.", -"type": "boolean" -}, -"saftDocument": { -"$ref": "NlpSaftDocument", -"description": "SAFT Document with linguistic annotations for the primary device name." -}, -"smartDeviceManagementData": { -"$ref": "AssistantVerticalsHomeautomationProtoSmartDeviceManagementData", -"description": "Data needed for SDM (fleet management). See go/enterprise-id-in-assistant." -}, -"smartHomeFeatures": { -"$ref": "AssistantVerticalsHomeautomationProtoSmartHomeFeatures", -"description": "SmartHome feature flags that may be enabled per-item." -}, -"supportedStructureFeatures": { -"$ref": "AssistantVerticalsHomeautomationProtoSupportedStructureFeatures", -"description": "The features that are available for a structure. Will only be populated if the item_type == STRUCTURE." -}, -"supportedTraitsByAgent": { -"additionalProperties": { -"$ref": "AssistantVerticalsHomeautomationProtoHomeAutomation_MetaDataSupportedTraits" -}, -"description": "Map from agent ID to supported traits. Some devices (e.g. Newman) have multiple agents, with each agent being associated with a specific set of traits. This could alternatively have been formatted as map as {trait, agent} pairs instead of the {agent, list of trait} pairs, but we retain this format to be consistent with HomeGraph's representation. In practice, a trait should only be paired with a single agent (i.e. we should not have two agents with the same trait in their value list). This field is optional and should only be provided if the item has multiple agents.", -"type": "object" -}, -"supportsDirectResponse": { -"description": "This device supports direct response -- if the device itself is issuing the query (which means it's also an assistant surface) we can return its payload directly rather than via cloud.", -"type": "boolean" -}, -"targetDeviceSignalStrengths": { -"additionalProperties": { -"format": "int64", -"type": "string" -}, -"description": "Only present for an AoGH device. HGS Device ID of a target device and the signal strength (RSSI in dB, higher is better) between that target device and the AoGH device. If this map is empty, there are no target devices reachable by this AoGH device.", -"type": "object" -}, -"tdssUpdateTimestamp": { -"description": "The timestamp at which the TDSS map was last updated. This information is used to help determine which hub would be preferred if multiple hubs report the same reach-ability for a device.", -"format": "google-datetime", -"type": "string" -}, -"traitRoutingHints": { -"description": "For SHED devices, some traits can only be executed on 3P cloud, e.g. \"action.devices.traits.MediaInitiation\", \"action.devices.traits.Channel\" go/shed-per-trait-routing", -"items": { -"$ref": "HomeGraphCommonTraitRoutingHints" -}, -"type": "array" -}, -"traitRoutingTable": { -"additionalProperties": { -"$ref": "HomeGraphCommonRoutingTable" -}, -"description": "Map from traits to routing table. Metadata includes preferred execution path per trait and, when Matter is preferred, which endpoint should be used for the trait.", -"type": "object" -}, -"traitToAttributeProtos": { -"additionalProperties": { -"$ref": "AssistantVerticalsHomeautomationProtoAttributes" -}, -"description": "Map of trait to a proto representing the attribute. This is different from the attributes field above which is represented as a struct. The attributes here are represented as protos and will require specific support per trait.", -"type": "object" -}, -"type": { -"description": "The item type, such as \"action.devices.types.VACUUM\" - to be used in triggering type-based actions, e.g. \"start vacuuming\": go/smarthome-type-based-actions.", -"type": "string" -}, -"userDefinedDeviceType": { -"description": "The priority order of speech targeting: 1. user_defined_device_type 2. derived_device_type 3. device_type", -"type": "string" -}, -"voiceMatchRequired": { -"description": "Set to which level of voice match is needed. Enum based on string input from the partner in json sync. Values accepted: \"none\" (but in this case partners won't set it), \"owner\" [requires matching one of the creator gaia IDs], or \"member\" [any recognized voice 'enrolled' on the device]. This may expand; only \"owner\" is in use for first partner, Tile.", -"enum": [ -"ANY", -"OWNER", -"MEMBER" -], -"enumDescriptions": [ -"Also default, and existing behavior == \"none\" in text", -"Must match an actual creator_gaia_id on device", -"Any member of the surface / structure interacting" -], -"type": "string" -}, -"willReportState": { -"description": "This device will report state; we can query realtime state from local HGS rather than slow QUERY intent to the 3p cloud.", -"type": "boolean" -}, -"zoneNameSaftDocument": { -"$ref": "NlpSaftDocument", -"description": "SAFT Document with linguistic annotations for the zone name, if applicable." -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoHomeAutomation_MetaDataSupportedTraits": { -"id": "AssistantVerticalsHomeautomationProtoHomeAutomation_MetaDataSupportedTraits", -"properties": { -"traits": { -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoMatterUniqueId": { -"description": "Matter unique Id. These values are provided by the device.", -"id": "AssistantVerticalsHomeautomationProtoMatterUniqueId", -"properties": { -"productId": { -"format": "int32", -"type": "integer" -}, -"uniqueId": { -"type": "string" -}, -"vendorId": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoPhysicalLocation": { -"id": "AssistantVerticalsHomeautomationProtoPhysicalLocation", -"properties": { -"address": { -"type": "string" -}, -"geoLocation": { -"$ref": "GoogleTypeLatLng" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoRoleInformation": { -"description": "Represents the user\u2019s role such as assistant only or manager for a device. Design doc: https://docs.google.com/document/d/1c1hnauEbBfDkywO3GZkI8ejHP765l2tLspmPgckEe2Y/", -"id": "AssistantVerticalsHomeautomationProtoRoleInformation", -"properties": { -"iamCheckRequired": { -"description": "When true, role_type will be ignored, Nest IAM RPC will called to check authority.", -"type": "boolean" -}, -"roleType": { -"enum": [ -"UNDEFINED", -"ASSISTANT_ONLY", -"CAST_LEGACY_LINKED", -"MANAGER" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoSmartDeviceManagementData": { -"id": "AssistantVerticalsHomeautomationProtoSmartDeviceManagementData", -"properties": { -"enterpriseId": { -"description": "The enterprise that owns the structure. E.g. Disney, Dream Hotel, etc. This is used for log/analytics purpose. For privacy reasons, we log at enterprise level instead of structure level.", -"type": "string" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoSmartHomeFeatures": { -"description": "SmartHome feature flags that may be enabled per-item. LINT.IfChange", -"id": "AssistantVerticalsHomeautomationProtoSmartHomeFeatures", -"properties": { -"circadianLightingEnabled": { -"description": "Flag indicating whether the background Circadian Lighting effect is enabled for a particular light (go/circadian-lighting-e2e).", -"type": "boolean" -}, -"energySavingsEnabled": { -"description": "Flag indicating whether automatic Energy Savings are enabled for this item.", -"type": "boolean" -}, -"gentleWakeupEnabled": { -"description": "Flag indicating whether Gentle Wake Up is enabled for this item (go/sleep-wake-design).", -"type": "boolean" -}, -"homeAwayOverMatterEnabled": { -"description": "Flag indicating whether the user has enabled / disabled sending Home/Away status updates to the device through the Google custom IntelligenceEvents Matter cluster. (go/google-clusters-design)", -"type": "boolean" -} -}, -"type": "object" -}, -"AssistantVerticalsHomeautomationProtoSupportedStructureFeatures": { -"description": "The features a structure supports.", -"id": "AssistantVerticalsHomeautomationProtoSupportedStructureFeatures", -"properties": { -"conciergeFeatures": { -"$ref": "AssistantVerticalsHomeautomationProtoConciergeFeatures" -} -}, -"type": "object" -}, -"AttentionalEntitiesMentionProperties": { -"description": "Represents the properties of a mention. Next ID: 13", -"id": "AttentionalEntitiesMentionProperties", -"properties": { -"deviceId": { -"$ref": "AssistantApiCoreTypesDeviceId", -"description": "The unique device on which the mention occurred. For example, if the user has two Google Home devices, this indicates which of the two was used." -}, -"eventId": { -"$ref": "EventIdMessage", -"description": "ID of the event that resulted in this entity mention. For user and system turn AEs, this is taken from the ConversationSnapshotId of the snapshot containing this mention. For client AEs, this is empty. This can be used to join back this particular mention to the specific \"turn\" in which this mention took place." -}, -"factoidScore": { -"description": "If this mention corresponds to a WebAnswer, then this defines the score associated with that answer.", -"format": "float", -"type": "number" -}, -"listEntryInfo": { -"$ref": "AttentionalEntitiesMentionPropertiesListEntryInfo", -"description": "If present, this entity was mentioned as part of a larger list." -}, -"recency": { -"description": "Estimates the recency of the mention. This is internally computed at runtime on a turn-by-turn basis.", -"enum": [ -"RECENCY_UNSPECIFIED", -"MOST_RECENT_TURN" -], -"enumDescriptions": [ -"", -"Corresponds to a mention from the most recent user-driven interaction. For example, this label would be applied to both mentions of \"Barack Obama\" in the following interaction: USER: How tall is Obama? GOOGLE: Barack Obama is six feet one inch tall. NOTE: This is currently only applicable to mentions that have a source of User or System." -], -"type": "string" -}, -"role": { -"$ref": "AttentionalEntitiesSemanticRoleId", -"description": "The semantic role that the entity was used in." -}, -"salience": { -"description": "How salient this mention is. This field will only be set if the mention is derived from a SearchAnswerValue. See go/webresultsdata-as-aes for more details.", -"enum": [ -"UNKNOWN_SALIENCE", -"PRIMARY", -"METADATA" -], -"enumDescriptions": [ -"Any mention that is not derived from a SearchAnswerValue will not have a Salience.", -"This mention corresponds to the primary entity in the response. For example, if the query is \"who was the 44th President of the US\", then the entity for \"Barack Obama\" would be marked as PRIMARY. Note that there may more than one PRIMARY entity in a response if a list of values is displayed (e.g. \"Stephen King books\").", -"This mention corresponds to metadata associated with the answer. For the US President query mentioned above, something like the inauguration date could be considered as METADATA. If it is difficult to identify a PRIMARY entity, then all entities may be marked as METADATA." -], -"type": "string" -}, -"source": { -"$ref": "AttentionalEntitiesMentionPropertiesSource", -"description": "Contains metadata about the source of the mention." -}, -"spatialProperties": { -"$ref": "AttentionalEntitiesSpatialProperties", -"description": "If present, properties of visual mentions (e.g., how they are displayed to the user, visibility, etc.)." -}, -"surfaceForm": { -"$ref": "AttentionalEntitiesSurfaceForm", -"description": "Details about how this mention was presented." -}, -"timestamp": { -"description": "Unix timestamp noting (approximately) when this mention occurred. We do not guarantee that the time will correspond precisely to when the user uttered/heard a response. If mentions within a single turn have *different* timestamps, they should accurately reflect the order in which the mentions occurred. If that order is unknown, they should all have the same timestamp.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"AttentionalEntitiesMentionPropertiesListEntryInfo": { -"description": "Contains information about how an entity was presented as part of a list.", -"id": "AttentionalEntitiesMentionPropertiesListEntryInfo", -"properties": { -"index": { -"description": "The index of the entity presented to the user. NOTE: Indexing starts from 0.", -"format": "int64", -"type": "string" -}, -"lexicalGroundingsId": { -"description": "A string which uniquely identifies the list item this entity represents in the list. For example, consider the \"OrderPizza\" intent with the \"size\" slot: U: I want to order a pizza A: Sure. What size do you want: large, medium, or small? U: Gigantic The lexical_groundings_id can be \"large\" to identify the large item in the list. This lexical_groundings_id together with the semantic role fields (i.e., role.intent_id & role.role_id) can be used to match the nlp_semantic_parsing::LexicalGroundings::ValueTermType to utilize lexical grounding for i18n of static list selection items. Note that this field only needs to be populated when developers expect to provide lexical groundings for the list item this entity represents. Effectively, this field will be populated when this entity is published by ListPresentationFrame and the ::quality::dialog_manager::IntentStageSignals::FieldCandidate.lexical_groundings_id field is populated. See go/lpf-i18nv2 & go/taskstate-ae-sync for more details. ", -"type": "string" -} -}, -"type": "object" -}, -"AttentionalEntitiesMentionPropertiesSource": { -"description": "The agent or system from which the mention was derived. Each mention corresponds to a single source.", -"id": "AttentionalEntitiesMentionPropertiesSource", -"properties": { -"client": { -"$ref": "AttentionalEntitiesMentionPropertiesSourceClient" -}, -"system": { -"$ref": "AttentionalEntitiesMentionPropertiesSourceSystem" -}, -"user": { -"$ref": "AttentionalEntitiesMentionPropertiesSourceUser" -} -}, -"type": "object" -}, -"AttentionalEntitiesMentionPropertiesSourceClient": { -"description": "The client provided this entity. Currently, this exclusively corresponds to an entity that was circulated by the client. See go/on-device-aes for more details.", -"id": "AttentionalEntitiesMentionPropertiesSourceClient", -"properties": {}, -"type": "object" -}, -"AttentionalEntitiesMentionPropertiesSourceSystem": { -"description": "The Assistant mentioned this entity. This corresponds to entities annotated during fulfillment. More specifically, these entities are typically provided by developers either via a Monastery frame or an InteractionBuilder.", -"id": "AttentionalEntitiesMentionPropertiesSourceSystem", -"properties": {}, -"type": "object" -}, -"AttentionalEntitiesMentionPropertiesSourceUser": { -"description": "The user mentioned this entity. It was extracted from a previous winning intent (IntentQuery or IntentUpdate). Such entities are computed at runtime from the interpretation history without any developer intervention.", -"id": "AttentionalEntitiesMentionPropertiesSourceUser", -"properties": {}, -"type": "object" -}, -"AttentionalEntitiesSemanticRoleId": { -"description": "Uniquely identifies a semantic role. When this role corresponds to a slot in a registered user intent (see go/assistant-intent-catalog), then the SemanticRoleId maps precisely onto that slot in the intent catalog. However, not all semantic roles corresponds to such user intent slots.", -"id": "AttentionalEntitiesSemanticRoleId", -"properties": { -"intentId": { -"description": "Semantic roles will be defined locally, within the context of a single task/feature. The |intent_id| is a unique identifier for such a local cluster. In most cases, this should be exactly the same as the name of the intent used for TaskState (see go/assistant-intent-catalog). In cases where the intent isn't well-defined, this can be an arbitrary, feature-defined identifier.", -"type": "string" -}, -"roleId": { -"description": "Identifier for a semantic role, unique within the namespace of |intent_id|. When this role corresponds to a slot in the intent, the |role_id| should be equal to the name of that argument. For example, consider an entry in the intent catalog: core_intent { id { id: \"BookARide\" } slot { name: \"provider\" type { string_type { } } } slot { name: \"num_riders\" type { number_type { } } } } Then, the |role_id| would be \"provider\" or \"num_riders\" when referring to one of these slots. NOTE: when responding to the user, the Assistant may actually make use of other roles such as \"ETA\" or \"driver\" that are not part of the intent declaration. These should still be assigned consistent semantic roles. For example, a dialog with the Shopping feature: User: Where can I buy XYZ? Google: [Best Buy in Sunnyvale] has [XYZ] in stock. User: Great! Give me directions. In this case, both \"Best Buy\" and \"XYZ\" would be pushed to attentional entities. Best Buy, in this case, may not be an argument in the ShoppingItemStockInquiry intent, but should still have a consistent |role_id| such as \"possessing_business\".", -"type": "string" -} -}, -"type": "object" -}, -"AttentionalEntitiesSpatialProperties": { -"description": "Properties of visual mentions (e.g., how they are displayed to the user, visibility, etc.).", -"id": "AttentionalEntitiesSpatialProperties", -"properties": { -"visibility": { -"enum": [ -"UNKNOWN_VISIBILITY", -"VISIBLE", -"HIDDEN" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"AttentionalEntitiesSurfaceForm": { -"description": "How the entity was presented in this mention at a surface level. For example, \"President Barack Obama\" or \"Barack Obama\" or \"he\" might all be reasonable surface forms for the MID /m/02mjmr.", -"id": "AttentionalEntitiesSurfaceForm", -"properties": { -"text": { -"type": "string" -} -}, -"type": "object" -}, -"BiasingPerDocData": { -"description": "This data is expected to appear in approximately 2 out of every 1,000 documents with an average of 2 fields per document. Rough order of size is in the hundreds of kilobytes per Mustang shard.", -"id": "BiasingPerDocData", -"properties": { -"biasingfield": { -"items": { -"$ref": "BiasingPerDocDataBiasingField" -}, -"type": "array" -} -}, -"type": "object" -}, -"BiasingPerDocData2": { -"description": "A replacement for BiasingPerDocData that is more efficient wrt size in the index.", -"id": "BiasingPerDocData2", -"properties": { -"biasingField": { -"items": { -"$ref": "BiasingPerDocData2BiasingField" -}, -"type": "array" -} -}, -"type": "object" -}, -"BiasingPerDocData2BiasingField": { -"id": "BiasingPerDocData2BiasingField", -"properties": { -"compressedName": { -"description": "A fingerprint of the actual name of the field.", -"format": "uint32", -"type": "integer" -}, -"value": { -"description": "The value, under various representations to get maximum compression. Exactly one of them is guaranteed to be filled. value as a double.", -"format": "double", -"type": "number" -}, -"valueFloat": { -"description": "a floating value, represented as an integer by converting using floating_value * 1000. Useable for all floating values that need 3 digits of precision, and are small enough.", -"format": "int32", -"type": "integer" -}, -"valueInt": { -"description": "value as an int32. When the value is encode-able as an integer.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"BiasingPerDocDataBiasingField": { -"description": "Metadata fields on which we can bias (sort) search results independently from the normal ranking using a ScoreAdjuster", -"id": "BiasingPerDocDataBiasingField", -"properties": { -"Name": { -"description": "Fingerprint of the attribute name (no need to keep long field names)", -"format": "uint64", -"type": "string" -}, -"Value": { -"description": "Biasing value translated into a double for uniform comparison", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"BlobstoreBlobRef": { -"description": "A BlobRef is used to refer to a blob in BlobStore. Clients may only manipulate blobs through BlobRefs. BlobRefs should not be sent in the clear outside of Google (for example, encoded in URLs, stored in a client cookie, or referred to in Javascript); for efficiency, the IDs expose internal details of the blobstore (such as machine IPs or cluster names). If clients need to store BlobRefs outside of Google, they must encrypt the BlobRef securely or use an alternative insecure identifier with an id->BlobRef mapping inside our network. ", -"id": "BlobstoreBlobRef", -"properties": { -"BlobID": { -"format": "byte", -"type": "string" -}, -"Options": { -"format": "int64", -"type": "string" -}, -"RefID": { -"format": "byte", -"type": "string" -}, -"ShardBin": { -"format": "int32", -"type": "integer" -}, -"Size": { -"description": "Size of the complete blob, in bytes.", -"format": "int64", -"type": "string" -}, -"SourceV2BlobID": { -"description": "The ID of the V2 blob this blob has", -"type": "string" -}, -"V2ReadBlobToken": { -"description": "Deprecated.", -"type": "string" -} -}, -"type": "object" -}, -"BlogPerDocData": { -"description": "Additional data for Blog/Posts", -"id": "BlogPerDocData", -"properties": { -"blogurlFp": { -"description": "used for blogurl crowding.", -"format": "uint64", -"type": "string" -}, -"clientSpamminess": { -"description": "This score captures how spammy the client is that the micropost was created with. The higher the score the worse.", -"format": "int32", -"type": "integer" -}, -"convTree": { -"$ref": "BlogsearchConversationTree", -"description": "For the threaded conversation view. Only populated in docs with provider type SYNTHETIC_CONVERSATION_DOC." -}, -"copycatScore": { -"format": "int32", -"type": "integer" -}, -"docQualityScore": { -"format": "int32", -"type": "integer" -}, -"isSyntacticReshare": { -"description": "A syntactic reshare is a document that is * created from an original and shared with friends and * we detect this resharing property by syntactically parsing the doc. . For example, a retweet is an example of a syntactic_reshare because we can detect that it's a reshare by grepping for \"RT @\".", -"type": "boolean" -}, -"microblogQualityExptData": { -"$ref": "Proto2BridgeMessageSet", -"description": "Experimental data for quality experiments. This will NOT be populated in prod, but we will use this for experiments." -}, -"numMentions": { -"description": "For replies/reshares. num_mentions = number of times the pattern @foo appears in the document.", -"format": "int32", -"type": "integer" -}, -"outlinks": { -"items": { -"$ref": "BlogPerDocDataOutlinks" -}, -"type": "array" -}, -"postContentFingerprint": { -"description": "The fingerprint for the body text of the microblog post. It is copied from MicroBlogPost.post_content_fingerprint.", -"format": "int32", -"type": "integer" -}, -"qualityScore": { -"format": "int32", -"type": "integer" -}, -"spamScore": { -"description": "Blog scoring signals.", -"format": "int32", -"type": "integer" -}, -"universalWhitelisted": { -"type": "boolean" -}, -"userQualityScore": { -"description": "User and doc quality scores for updates (aka microposts).", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"BlogPerDocDataOutlinks": { -"description": "Resolved url and site spamscore for outlinks in updates (aka microposts).", -"id": "BlogPerDocDataOutlinks", -"properties": { -"aggregationFp": { -"description": "Representative id for an equivalence class of URLs. E.g. http://youtube.com/watch?v=12 and http://youtube.com/watch?v=12&feature=related have the same aggregation id since they're effectively the same webpage", -"format": "uint64", -"type": "string" -}, -"resolvedUrl": { -"type": "string" -}, -"siteSpamScore": { -"format": "int32", -"type": "integer" -}, -"title": { -"type": "string" -} -}, -"type": "object" -}, -"BlogsearchConversationNode": { -"id": "BlogsearchConversationNode", -"properties": { -"authorName": { -"description": "The username of the author of the microblog post represented by this node.", -"type": "string" -}, -"children": { -"description": "A list of docids of child nodes.", -"items": { -"type": "string" -}, -"type": "array" -}, -"date": { -"description": "The creation date of the doc.", -"format": "int64", -"type": "string" -}, -"docid": { -"description": "Docid of the microblog post represented by this node.", -"type": "string" -}, -"parent": { -"description": "The docid of the parent node. The root of the tree will leave this empty.", -"type": "string" -} -}, -"type": "object" -}, -"BlogsearchConversationTree": { -"id": "BlogsearchConversationTree", -"properties": { -"convId": { -"description": "The id of this conversation.", -"type": "string" -}, -"nodes": { -"description": "The nodes in this conversation. No particular order is assumed.", -"items": { -"$ref": "BlogsearchConversationNode" -}, -"type": "array" -} -}, -"type": "object" -}, -"BlueGingerClientVisibleProtoBlueGingerSupportedServices": { -"description": "Information to indicate BG availability for businesses. This message is filled from Topic Server and stored in the GSR in Superroot.", -"id": "BlueGingerClientVisibleProtoBlueGingerSupportedServices", -"properties": { -"modules": { -"description": "List of supported modules for a business.", -"items": { -"$ref": "BlueGingerClientVisibleProtoBlueGingerSupportedServicesBlueGingerModule" -}, -"type": "array" -} -}, -"type": "object" -}, -"BlueGingerClientVisibleProtoBlueGingerSupportedServicesBlueGingerModule": { -"id": "BlueGingerClientVisibleProtoBlueGingerSupportedServicesBlueGingerModule", -"properties": { -"name": { -"description": "Module name, e.g. hairdresser_reservation. from quality/views/extraction/kcube/bg/modules/modules.bzl.", -"type": "string" -}, -"services": { -"description": "Services of this module that are supported by the business, e.g. haircuts.", -"items": { -"type": "string" -}, -"type": "array" -}, -"useCase": { -"enum": [ -"UNKNOWN_USE_CASE", -"OPENING_HOURS", -"ON_DEMAND_OPENING_HOURS", -"GEO_DATA_EXTRACTION", -"OPERATING_MODE_EXTRACTION", -"RESTAURANT_RESERVATION", -"MASSAGE_RESERVATION", -"HAIRDRESSER_RESERVATION", -"NAIL_SALON_RESERVATION", -"RUNNING_LATE", -"FOOD_ORDERING", -"LOCAL_INVENTORY_CHECK", -"ON_DEMAND_LOCAL_INVENTORY", -"WAITLIST", -"CHECK_WAIT", -"CHEFBOT", -"ADS_CALL_CENTER_AUTHENTICATION", -"PLAYSTORE", -"TAKING_RESTAURANT_RESERVATIONS", -"CALL_CENTER_DEMO", -"ASSISTED_CALL_DEMO", -"BUSINESS_OPT_IN", -"CALLJOY_PILOT", -"ASSISTANT_REMINDERS_DEMO", -"HAPPY_BIRTHDAY", -"ASSISTED_IVR", -"DUPLEX_FOR_BUSINESS_PILOT", -"SAY_THE_SAME_THING", -"COVID_FAQ", -"VANCOUVER", -"MEENAPLEX", -"REMOVED_USE_CASE_6", -"SEMI_DELEGATED_CALLING", -"HARDWARE_SETUP", -"DUMDA_BOT", -"SMART_REPLY", -"DUPLEX_ZERO", -"SPAM_FILTER", -"TEXT", -"IVR_CRAWLING", -"VOICEMAIL", -"INBOUND_SMB", -"CCAI_DEMO", -"DIALOGFLOW_DELEGATION", -"AD_LEAD_VERIFICATION", -"GET_HUMAN", -"CHECK_INSURANCE_ACCEPTANCE", -"FREE_TEXT", -"SMART_NOTES" -], -"enumDeprecated": [ -false, -false, -true, -false, -true, -false, -true, -false, -false, -true, -true, -true, -true, -false, -false, -true, -false, -true, -true, -false, -false, -false, -true, -true, -true, -false, -false, -true, -true, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"// Data extraction:", -"Opening hours extraction, triggered internally without an end-user in the loop. The trigger might be a user-edit, but the user is not being updated about the extraction results.", -"On-demand opening hours extraction, i.e. requests are issued by an end-user who is also notified about the results of the task.", -"", -"", -"", -"", -"See go/bg-hdr-2019.", -"", -"See go/duplex-running-late.", -"See go/duplex-fo-pilot.", -"Local Inventory Checks (go/duplex-dove).", -"On-Demand Local Inventory (go/duplex-li-on-demand).", -"", -"Check wait: go/duplex-check-wait-time", -"Chefbot (go/duplexify-cooking-goals).", -"", -"Play Store user authentication. See http://doc/16tApXKX1cc23AIuhIhPCWVkm1qkcajx2pHMkxW6kMAs", -"Taking restaurant reservations on behalf of a restaurant. i.e. getting calls from clients who want to make a reservation, and trying to match their request with the restaurant's schedule.", -"", -"", -"", -"", -"", -"", -"", -"Duplex for Business Pilot: go/duplex-voice-pilot", -"Say The Same Thing game bot.", -"Bot for answering FAQ about COVID-19 (go/bg-covid-faq).", -"", -"go/meenaplex-dd", -"Was: .", -"UC ID for testing ideas around a partial call delegation bot.", -"UC ID for hardware setup prototype", -"Experimental use case for a form of Meena+Duplex.", -"Personal Call Assist.", -"Duplex Zero Bot.", -"Spam filter bot.", -"Text extraction bot.", -"IVR crawling bot.", -"Voicemail bot.", -"Duplex for Business - Inbound SMB bot.", -"Duplex for Business - Demo for the CCAI integration.", -"Duplex for Business - Dialogflow delegation module for CCAI integration.", -"Duplex for Business - ad lead verification module for GLS integration.", -"Get Human bot.", -"Check health insurance acceptance.", -"Wrapper use case for creating an errand from conversational free-text. The true use case for the errand will be provided by a call to the LBT backend's task routing logic, such that this use case should NOT actually ever reach the caller.", -"Personal Call Assist." -], -"type": "string" -} -}, -"type": "object" -}, -"BookCitationPerDocData": { -"description": "Per-doc data for the web page about the cited book Approximate size is on average ~10bytes", -"id": "BookCitationPerDocData", -"properties": { -"bookId": { -"description": "the book id for the main citation", -"format": "int64", -"type": "string" -}, -"discretizedCitationScore": { -"description": "the discretized citation score for the main book. we map the raw score 1.0-20.0 to 0 - 127", -"format": "int32", -"type": "integer" -}, -"previewable": { -"description": "Is there a preview or excerpt of the book on this document?", -"type": "boolean" -}, -"secondBookId": { -"description": "book id for the second citation if we can't separate the two top citations (they are too close).", -"format": "int64", -"type": "string" -}, -"secondDiscretizedCitationScore": { -"description": "the discretized score for the second citation", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"BusinessHours": { -"description": "We divide up a week into individual open intervals. If any are present then they must be arranged in strictly increasing order, with non-empty spaces between successive intervals, and all times between 0 and 604800, the number of seconds in a week.", -"id": "BusinessHours", -"properties": { -"dayopen": { -"format": "int32", -"type": "integer" -}, -"interval": { -"items": { -"$ref": "BusinessHoursInterval" -}, -"type": "array" -} -}, -"type": "object" -}, -"BusinessHoursInterval": { -"id": "BusinessHoursInterval", -"properties": { -"end": { -"description": "The interval ends at the start of this second", -"format": "int32", -"type": "integer" -}, -"start": { -"description": "Time in seconds since Midnight-Monday-Morn", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"ChatBotPlatformBotSendToken": { -"description": "Token to be exposed and stored by the bot", -"id": "ChatBotPlatformBotSendToken", -"properties": { -"expiryTimeMicros": { -"description": "Time since epoch (micros) that this will expire", -"format": "int64", -"type": "string" -}, -"sendToken": { -"description": "Encrypted InternalSendToken", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"ChatBotPlatformFireballId": { -"id": "ChatBotPlatformFireballId", -"properties": { -"id": { -"$ref": "GoogleInternalCommunicationsInstantmessagingV1Id", -"description": "When used as a user ID, it's the phone number of the sender. When used as a session ID: For group conversation, it is the group ID. For 1 to 1, it is the receiver or sender phone number. For 1 to bot, it is the receiver phone number or empty." -} -}, -"type": "object" -}, -"ClassifierPornClassifierData": { -"description": "Next ID: 6", -"id": "ClassifierPornClassifierData", -"properties": { -"classification": { -"items": { -"$ref": "ClassifierPornClassifierDataClassification" -}, -"type": "array" -}, -"imageBasedDetectionDone": { -"deprecated": true, -"description": "Records whether the image linker is run already. This is only used for Alexandria but NOT for Segindexer.", -"type": "boolean" -}, -"timestamp": { -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"ClassifierPornClassifierDataClassification": { -"id": "ClassifierPornClassifierDataClassification", -"properties": { -"label": { -"type": "string" -}, -"score": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"ClassifierPornDocumentData": { -"description": "Next ID: 3", -"id": "ClassifierPornDocumentData", -"properties": { -"classifierdata": { -"$ref": "ClassifierPornClassifierData" -}, -"sitedata": { -"$ref": "ClassifierPornSiteData" -} -}, -"type": "object" -}, -"ClassifierPornQueryClassifierOutput": { -"description": "Generic output for one vertical.", -"id": "ClassifierPornQueryClassifierOutput", -"properties": { -"csaiClassification": { -"description": "This field is only filled for the CSAI vertical.", -"enum": [ -"NOT_PROTECTED", -"PROTECTED", -"STRONGLY_PROTECTED" -], -"enumDescriptions": [ -"Contract: (STRONGLY_PROTECTED & PROTECTED) == PROTECTED", -"", -"" -], -"type": "string" -}, -"debug": { -"description": "Human-readable debug information about the classification. This field is only set if output_debug is set in the classification input.", -"type": "string" -}, -"isPositive": { -"description": "The bit that shows if this classifier outputs positive classification for the input query. Set by thresholding with a recommended threshold.", -"type": "boolean" -}, -"score": { -"description": "The score that the classifier assigned to the input query. This is filled by all verticals.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"ClassifierPornQueryMultiLabelClassifierOutput": { -"description": "Multi-label classification output. It contains the output for each vertical. The output for some verticals can be empty, in case that vertical is not supported by the classifier or if the set of verticals was restricted using MultiLabelClassifierInput.verticals.", -"id": "ClassifierPornQueryMultiLabelClassifierOutput", -"properties": { -"csai": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"fringe": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"medical": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"minor": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"offensive": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"porn": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"spoof": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"violence": { -"$ref": "ClassifierPornQueryClassifierOutput" -}, -"vulgar": { -"$ref": "ClassifierPornQueryClassifierOutput" -} -}, -"type": "object" -}, -"ClassifierPornQueryStats": { -"description": "QuerySats contains the information about the queries that users typed to search for this image.", -"id": "ClassifierPornQueryStats", -"properties": { -"queryTextPornScore": { -"description": "A query text porn score for the queries which have clicks to the image: query_text_porn_score := sum(clicks(query) * text_porn_score(query)) / sum(clicks(query))", -"format": "float", -"type": "number" -}, -"totalClicks": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"ClassifierPornReferrerCounts": { -"description": "ReferrerCounts stores how many referrers an images has and how many of them were classified as porn and as adult/softporn respectively. Note that a referrer is usually a landing page, but as of March 2011 this also includes referrers which an image can 'inherit' by propagating counts from near duplicate images.", -"id": "ClassifierPornReferrerCounts", -"properties": { -"adult": { -"format": "int32", -"type": "integer" -}, -"porn": { -"description": "Number of referrers which are classified as porn and as adult.", -"format": "int32", -"type": "integer" -}, -"total": { -"description": "Total number of referrers.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"ClassifierPornSiteData": { -"description": "Next ID: 53", -"id": "ClassifierPornSiteData", -"properties": { -"avgPedoPageScore": { -"description": "The average pedo page score for the site.", -"format": "float", -"type": "number" -}, -"finalPedoSiteScore": { -"format": "float", -"type": "number" -}, -"numberOfPages": { -"description": "The number of pages that were used to compute the scores and ratios.", -"format": "uint64", -"type": "string" -}, -"numberOfPedoPages": { -"description": "The number of pages with pedo restrict.", -"format": "uint64", -"type": "string" -}, -"site": { -"description": "Sitechunk used to compute the signals. The field is present only for data created after December 2022.", -"type": "string" -}, -"sitePornRatio": { -"description": "The ratio of porn/softporn of the site this page belongs to.", -"format": "float", -"type": "number" -}, -"siteSoftpornRatio": { -"format": "float", -"type": "number" -}, -"versionedscore": { -"items": { -"$ref": "ClassifierPornSiteDataVersionedScore" -}, -"type": "array" -}, -"violenceScore": { -"description": "The cubic mean aggregation of violence page scores in the site.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"ClassifierPornSiteDataVersionedScore": { -"description": "The site porn score of the site to which the page of interest belongs to. Multiple versions are kept across large changes for some time. The Version-4 score is the average Universal Page Probability of all the site's pages, and will come with populated verticals4_score and site_rule (if any rule fires) fields. When using this score it is recommended to subscribe to the following mailing list: g/safesearch-announce.", -"id": "ClassifierPornSiteDataVersionedScore", -"properties": { -"score": { -"format": "float", -"type": "number" -}, -"siteRule": { -"items": { -"enum": [ -"HIGH_PORN_FRACTION_RULE", -"DEPRECATED_USER_GENERATED_CONTENT_RULE", -"DEPRECATED_SITES_WITH_IGNORED_SCORES_LIST" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"version": { -"format": "int32", -"type": "integer" -}, -"verticals4Score": { -"description": "Please talk to safesearch@ before relying on any of these internal fields:", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceCloudSqlInstanceConfig": { -"description": "The identity to configure a CloudSQL instance provisioned via SLM Terraform.", -"id": "CloudAiPlatformTenantresourceCloudSqlInstanceConfig", -"properties": { -"cloudSqlInstanceConnectionName": { -"description": "Output only. The CloudSQL instance connection name.", -"type": "string" -}, -"cloudSqlInstanceName": { -"description": "Input/Output [Optional]. The CloudSQL instance name within SLM instance. If not set, a random UUIC will be generated as instance name.", -"type": "string" -}, -"kmsKeyReference": { -"description": "Input [Optional]. The KMS key name or the KMS grant name used for CMEK encryption. Only set this field when provisioning new CloudSQL instances. For existing CloudSQL instances, this field will be ignored because CMEK re-encryption is not supported.", -"type": "string" -}, -"mdbRolesForCorpAccess": { -"description": "Input [Optional]. MDB roles for corp access to CloudSQL instance.", -"items": { -"type": "string" -}, -"type": "array" -}, -"slmInstanceName": { -"description": "Output only. The SLM instance's full resource name.", -"type": "string" -}, -"slmInstanceTemplate": { -"description": "Input [Required]. The SLM instance template to provision CloudSQL.", -"type": "string" -}, -"slmInstanceType": { -"description": "Input [Required]. The SLM instance type to provision CloudSQL.", -"type": "string" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceGcsBucketConfig": { -"description": "The identity to configure a GCS bucket.", -"id": "CloudAiPlatformTenantresourceGcsBucketConfig", -"properties": { -"admins": { -"items": { -"type": "string" -}, -"type": "array" -}, -"bucketName": { -"description": "Input/Output [Optional]. The name of a GCS bucket with max length of 63 chars. If not set, a random UUID will be generated as bucket name.", -"type": "string" -}, -"entityName": { -"description": "Input/Output [Optional]. Only needed for per-entity tenant GCP resources. During Deprovision API, the on-demand deletion will only cover the tenant GCP resources with the specified entity name.", -"type": "string" -}, -"kmsKeyReference": { -"description": "Input/Output [Optional]. The KMS key name or the KMS grant name used for CMEK encryption. Only set this field when provisioning new GCS bucket. For existing GCS bucket, this field will be ignored because CMEK re-encryption is not supported.", -"type": "string" -}, -"ttlDays": { -"description": "Input/Output [Optional]. Only needed when the content in bucket need to be garbage collected within some amount of days.", -"format": "int32", -"type": "integer" -}, -"viewers": { -"description": "Input/Output [Required]. IAM roles (viewer/admin) put on the bucket.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceIamPolicyBinding": { -"description": "The dynamic IAM bindings to be granted after tenant projects are created.", -"id": "CloudAiPlatformTenantresourceIamPolicyBinding", -"properties": { -"members": { -"description": "Input/Output [Required]. The member service accounts with the roles above. Note: placeholders are same as the resource above.", -"items": { -"type": "string" -}, -"type": "array" -}, -"resource": { -"description": "Input/Output [Required]. The resource name that will be accessed by members, which also depends on resource_type. Note: placeholders are supported in resource names. For example, ${tpn} will be used when the tenant project number is not ready.", -"type": "string" -}, -"resourceType": { -"description": "Input/Output [Required]. Specifies the type of resource that will be accessed by members.", -"enum": [ -"RESOURCE_TYPE_UNSPECIFIED", -"PROJECT", -"SERVICE_ACCOUNT", -"GCS_BUCKET", -"SERVICE_CONSUMER", -"AR_REPO" -], -"enumDescriptions": [ -"", -"The value of resource field is the ID or number of a project. Format is ", -"The value of resource field is the resource name of a service account. Format is projects//serviceAccounts/", -"The value of resource field is the name of a GCS bucket (not its resource name). Format is .", -"The value of resource field is the resource name of a service consumer. Format is services//consumers/", -"The value of the resource field is the AR Image Uri which identifies an AR REPO. Allowed formats are: /// ///: ///@" -], -"type": "string" -}, -"role": { -"description": "Input/Output [Required]. The role for members below.", -"type": "string" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceInfraSpannerConfig": { -"description": "The configuration for a spanner database provisioning. Next ID: 8", -"id": "CloudAiPlatformTenantresourceInfraSpannerConfig", -"properties": { -"createDatabaseOptions": { -"$ref": "CloudAiPlatformTenantresourceInfraSpannerConfigCreateDatabaseOptions", -"description": "Input [Optional]. The options to create a spanner database. Note: give the right options to ensure the right KMS key access audit logging and AxT logging in expected logging category." -}, -"kmsKeyReference": { -"description": "Input [Optional]. The KMS key name or the KMS grant name used for CMEK encryption. Only set this field when provisioning new Infra Spanner databases. For existing Infra Spanner databases, this field will be ignored because CMEK re-encryption is not supported. For example, projects//locations//keyRings//cryptoKeys/", -"type": "string" -}, -"sdlBundlePath": { -"description": "Input [Required]. The file path to the spanner SDL bundle.", -"type": "string" -}, -"spannerBorgServiceAccount": { -"description": "Input [Optional]. The spanner borg service account for delegating the kms key to. For example, spanner-infra-cmek-nonprod@system.gserviceaccount.com, for the nonprod universe.", -"type": "string" -}, -"spannerLocalNamePrefix": { -"type": "string" -}, -"spannerNamespace": { -"type": "string" -}, -"spannerUniverse": { -"description": "Input [Required]. Every database in Spanner can be identified by the following path name: /span//:", -"type": "string" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceInfraSpannerConfigCreateDatabaseOptions": { -"description": "The options to create a spanner database. KMS key access audit logging and AxT logging will be associated with the given resource name, resource type and service name. Please ensure to give right options to enable correct audit logging and AxT logging.", -"id": "CloudAiPlatformTenantresourceInfraSpannerConfigCreateDatabaseOptions", -"properties": { -"cmekCloudResourceName": { -"description": "The cloud resource name for the CMEK encryption. For example, projects//locations/", -"type": "string" -}, -"cmekCloudResourceType": { -"description": "The cloud resource type for the CMEK encryption. For example, contentwarehouse.googleapis.com/Location", -"type": "string" -}, -"cmekServiceName": { -"description": "The service name for the CMEK encryption. For example, contentwarehouse.googleapis.com", -"type": "string" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceServiceAccountIdentity": { -"description": "The identity to configure a service account.", -"id": "CloudAiPlatformTenantresourceServiceAccountIdentity", -"properties": { -"serviceAccountEmail": { -"description": "Output only. The service account email that has been created.", -"type": "string" -}, -"tag": { -"description": "Input/Output [Optional]. The tag that configures the service account, as defined in google3/configs/production/cdpush/acl-zanzibar-cloud-prod/activation_grants/activation_grants.gcl. Note: The default P4 service account has the empty tag.", -"type": "string" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceTenantProjectConfig": { -"description": "The identity to configure a tenant project.", -"id": "CloudAiPlatformTenantresourceTenantProjectConfig", -"properties": { -"billingConfig": { -"$ref": "GoogleApiServiceconsumermanagementV1BillingConfig", -"description": "Input/Output [Required]. The billing account properties to create the tenant project." -}, -"folder": { -"description": "Input/Output [Required]. The folder that holds tenant projects and folder-level permissions will be automatically granted to all tenant projects under the folder. Note: the valid folder format is `folders/{folder_number}`.", -"type": "string" -}, -"policyBindings": { -"description": "Input/Output [Required]. The policy bindings that are applied to the tenant project during creation. At least one binding must have the role `roles/owner` with either `user` or `group` type.", -"items": { -"$ref": "GoogleApiServiceconsumermanagementV1PolicyBinding" -}, -"type": "array" -}, -"services": { -"description": "Input/Output [Required]. The API services that are enabled on the tenant project during creation.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceTenantProjectResource": { -"description": "The tenant project and tenant resources. Next ID: 10", -"id": "CloudAiPlatformTenantresourceTenantProjectResource", -"properties": { -"cloudSqlInstances": { -"description": "The CloudSQL instances that are provisioned under the tenant project.", -"items": { -"$ref": "CloudAiPlatformTenantresourceCloudSqlInstanceConfig" -}, -"type": "array" -}, -"gcsBuckets": { -"description": "The GCS buckets that are provisioned under the tenant project.", -"items": { -"$ref": "CloudAiPlatformTenantresourceGcsBucketConfig" -}, -"type": "array" -}, -"iamPolicyBindings": { -"description": "The dynamic IAM bindings that are granted under the tenant project. Note: this should only add new bindings to the project if they don't exist and the existing bindings won't be affected.", -"items": { -"$ref": "CloudAiPlatformTenantresourceIamPolicyBinding" -}, -"type": "array" -}, -"infraSpannerConfigs": { -"description": "The Infra Spanner databases that are provisioned under the tenant project. Note: this is an experimental feature.", -"items": { -"$ref": "CloudAiPlatformTenantresourceInfraSpannerConfig" -}, -"type": "array" -}, -"tag": { -"description": "Input/Output [Required]. The tag that uniquely identifies a tenant project within a tenancy unit. Note: for the same tenant project tag, all tenant manager operations should be idempotent.", -"type": "string" -}, -"tenantProjectConfig": { -"$ref": "CloudAiPlatformTenantresourceTenantProjectConfig", -"description": "The configurations of a tenant project." -}, -"tenantProjectId": { -"description": "Output only. The tenant project ID that has been created.", -"type": "string" -}, -"tenantProjectNumber": { -"description": "Output only. The tenant project number that has been created.", -"format": "int64", -"type": "string" -}, -"tenantServiceAccounts": { -"description": "The service account identities (or enabled API service's P4SA) that are expclicitly created under the tenant project (before JIT provisioning during enabled API services).", -"items": { -"$ref": "CloudAiPlatformTenantresourceTenantServiceAccountIdentity" -}, -"type": "array" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceTenantResource": { -"description": "A collection of tenant resources.", -"id": "CloudAiPlatformTenantresourceTenantResource", -"properties": { -"p4ServiceAccounts": { -"description": "A list of P4 service accounts (go/p4sa) to provision or deprovision.", -"items": { -"$ref": "CloudAiPlatformTenantresourceServiceAccountIdentity" -}, -"type": "array" -}, -"tenantProjectResources": { -"description": "A list of tenant projects and tenant resources to provision or deprovision.", -"items": { -"$ref": "CloudAiPlatformTenantresourceTenantProjectResource" -}, -"type": "array" -} -}, -"type": "object" -}, -"CloudAiPlatformTenantresourceTenantServiceAccountIdentity": { -"description": "The identity of service accounts that have been explicitly created under tenant projects.", -"id": "CloudAiPlatformTenantresourceTenantServiceAccountIdentity", -"properties": { -"serviceAccountEmail": { -"description": "Output only. The email address of the generated service account.", -"type": "string" -}, -"serviceName": { -"description": "Input/Output [Required]. The service that the service account belongs to. (e.g. cloudbuild.googleapis.com for GCB service accounts)", -"type": "string" -} -}, -"type": "object" -}, -"CommerceDatastoreDeepTag": { -"description": "The basic message that contains a single decision output of go/deeptagger.", -"id": "CommerceDatastoreDeepTag", -"properties": { -"confidence": { -"description": "The confidence of the tag, encoded to 14 bits (range [0, 16383]). Due to modeling details, a large number of tags become trustworthy with confidence greater than 0.001, so two bytes of precision are required.", -"format": "uint32", -"type": "integer" -}, -"tag": { -"description": "A Deep Tag enum in uint32 form.", -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"CommerceDatastoreImageDeepTags": { -"description": "Image-level deep tags: essentially equivalent to the proto above but containing tags that are computed at the image level. These image signals are maintained by the Visual Shopping team (visual-shopping@). *** If you do use the signals, please add an entry in go/ShoppingImageAttributeClients to be notified for model upgrade. *** We recommend our clients against using the raw confidence value directly. Instead, the clients should use the library, cs/ads/shopping/visual/deeptags/public/single_tag.h and cs/ads/shopping/visual/deeptags/public/single_scored_tag.h to specify an operating point in terms of precision or recall. See the following code example: http://google3/shopping/visual/explore_looks/looks_offline_pipeline.cc?l=268&rcl=304165166 *** `model_outputs` is a repeated field. Please check `version` to get the model you desire to use, instead of indexing the model_outputs directly e.g. model_outputs(0). We will remove the old versions in the future and this will lead to incorrect model. *** Models: As of Q2 2020, we have two models running within Shopping: model one only has the overlay tag, which we are deprecating, and model two has the tags specified in go/VisualShoppingImageAttributes.", -"id": "CommerceDatastoreImageDeepTags", -"properties": { -"modelOutputs": { -"description": "The set of outputs for a series of model versions. The size of this field should not extend beyond 4 at any time: two versions for slow-update track dependencies, and two versions for fast-update track dependencies.", -"items": { -"$ref": "CommerceDatastoreImageDeepTagsModelOutput" -}, -"type": "array" -} -}, -"type": "object" -}, -"CommerceDatastoreImageDeepTagsModelOutput": { -"id": "CommerceDatastoreImageDeepTagsModelOutput", -"properties": { -"backgroundType": { -"$ref": "CommerceDatastoreDeepTag" -}, -"collage": { -"$ref": "CommerceDatastoreDeepTag" -}, -"cropping": { -"$ref": "CommerceDatastoreDeepTag", -"description": "We are looking to deploy a model for the Ads team to identify images with bad cropping. The model will be for Ads only and we will not populate the cropping field in CDS." -}, -"modelType": { -"$ref": "CommerceDatastoreDeepTag" -}, -"nfs": { -"$ref": "CommerceDatastoreDeepTag", -"description": "Tag corresponds to the shopping non-family safe (nfs) image signal." -}, -"objectCount": { -"$ref": "CommerceDatastoreDeepTag" -}, -"overlay": { -"$ref": "CommerceDatastoreDeepTag", -"description": "Tag corresponding to unwanted text overlay (watermarks, logos, promotional elements, artifacts, etc)." -}, -"selfie": { -"$ref": "CommerceDatastoreDeepTag" -}, -"textOverlay": { -"deprecated": true, -"description": "Tag corresponding to the text overlay classifier (watermarks, logos, promotional elements, artifacts, etc).", -"items": { -"$ref": "CommerceDatastoreDeepTag" -}, -"type": "array" -}, -"version": { -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"CompositeDoc": { -"description": "Protocol record used for collecting together all information about a document. Please consult go/dj-explorer for two basic questions about `CompositeDoc`: - Where should I look up certain information (e.g: pagerank, language)? - What does each field in CompositeDoc mean and who should I contact if I have questions? To add a new field into CompositeDoc, or change existing field's size significantly, please file a ticket at go/dj-new-field, fill in necessary information and get approved by docjoin-access@ team. Next id: 194", -"id": "CompositeDoc", -"properties": { -"ContentChecksum96": { -"description": "Visible content checksum as computed by repository::parsehandler::checksum::Checksum96bitsParseHandler. The value is a Fprint96 in \"key format\" (i.e., by Fprint96::AsKey()).", -"format": "byte", -"type": "string" -}, -"accessRequirements": { -"$ref": "IndexingPrivacyAccessAccessRequirements", -"description": "Contains necessary information to enforce row level Docjoin access control." -}, -"additionalchecksums": { -"$ref": "CompositeDocAdditionalChecksums" -}, -"alternatename": { -"items": { -"$ref": "CompositeDocAlternateName" -}, -"type": "array" -}, -"anchorStats": { -"$ref": "IndexingDocjoinerAnchorStatistics", -"description": "Mark as non-personal since it's an aggregation of anchors. For more details of Search personal data, see go/dma52-search-cdoc-fields." -}, -"anchors": { -"$ref": "Anchors", -"description": "Mark as non-personal since no personal fields will be populated in `anchors.link_additional_info` and `anchors.additional_info`. For more details of Search personal data, see go/dma52-search-cdoc-fields." -}, -"badSslCertificate": { -"$ref": "IndexingBadSSLCertificate", -"description": "This field is present iff the page has a bad SSL certificate itself or in its redirect chain." -}, -"cseId": { -"items": { -"$ref": "QualityProseCSEUrlInfo" -}, -"type": "array" -}, -"csePagerankCutoff": { -"description": "URL should only be selected for CSE Index if it's pagerank is higher than cse_pagerank_cutoff.", -"format": "int32", -"type": "integer" -}, -"dataVersion": { -"$ref": "IndexingDocjoinerDataVersion", -"description": "Contains the tracking version of various data fields in CompositeDoc." -}, -"doc": { -"$ref": "GDocumentBase" -}, -"docAttachments": { -"$ref": "Proto2BridgeMessageSet", -"description": "A generic container to hold document annotations and signals. For a full list of extensions live today, see go/wde." -}, -"docImages": { -"description": "Info about \"selected\" images associated with the document for which we (already) have ImageData. For each image URL, some fixed number of documents are selected as web referrers for the image URL, and within those selected documents, we say the image is \"selected\". Within the remaining documents, we say the image is \"rejected\". Note that this distinction is slightly different from selected for indexing. Only images within doc_images where is_indexed_by_imagesearch is true will be selected for indexing. You can find the rejected images at composite_doc.doc_attachments().get(). You can find images that are selected, but for which we have no ImageData (yet) at composite_doc.image_indexing_info().selected_not_indexed_image_link()", -"items": { -"$ref": "ImageData" -}, -"type": "array" -}, -"docVideos": { -"description": "Info about videos embedded in the document.", -"items": { -"$ref": "ImageRepositoryVideoProperties" -}, -"type": "array" -}, -"docinfoPassthroughAttachments": { -"$ref": "Proto2BridgeMessageSet", -"description": "This message set is used for data pushed into the index using the signals framework that is never to be used in Mustang or TG Continuum scoring/snippeting code. Any protocol buffer stored in this message set is automatically returned in a docinfo response - it ends up in the \"info\" message set in the WWWSnippetResponse, so it can be used in post-doc twiddlers and for display in GWS with no code changes in Mustang or Teragoogle." -}, -"docjoinsOnSpannerCommitTimestampMicros": { -"description": "The commit timestamp of a CDoc update to Docjoins on Spanner.", -"format": "int64", -"type": "string" -}, -"embeddedContentInfo": { -"$ref": "IndexingEmbeddedContentEmbeddedContentInfo", -"description": "Data produced by the embedded-content system. This is a thin message, containing only embedded_links_info data for the embedder and JavaScript/CSS embedded links (the embedded-content bigtable also contains snapshots, compressed document trees and all embedded link types). Provided using the index signal API." -}, -"extradup": { -"items": { -"$ref": "CompositeDocExtraDup" -}, -"type": "array" -}, -"forwardingdup": { -"items": { -"$ref": "CompositeDocForwardingDup" -}, -"type": "array" -}, -"includedcontent": { -"items": { -"$ref": "CompositeDocIncludedContent" -}, -"type": "array" -}, -"indexingIntermediate": { -"description": "Serialized indexing intermediate data.", -"format": "byte", -"type": "string" -}, -"indexinginfo": { -"$ref": "CompositeDocIndexingInfo" -}, -"labelData": { -"$ref": "QualityLabelsGoogleLabelData", -"description": "This field associates a document to particular labels and assigns confidence values to them." -}, -"liveexperimentinfo": { -"$ref": "CompositeDocLiveExperimentInfo" -}, -"localinfo": { -"$ref": "LocalWWWInfo" -}, -"localizedAlternateName": { -"description": "Localized alternate names are similar to alternate names, except that it is associated with a language different from its canonical. This is the subset of webmaster-provided localized alternate names being in the dup cluster of this document. Used during serving for swapping in the URL based on regional and language preferences of the user.", -"items": { -"$ref": "IndexingConverterLocalizedAlternateName" -}, -"type": "array" -}, -"localizedvariations": { -"$ref": "CompositeDocLocalizedVariations" -}, -"partialUpdateInfo": { -"$ref": "CompositeDocPartialUpdateInfo", -"description": "Only present in partial cdocs." -}, -"perDocData": { -"$ref": "PerDocData" -}, -"porninfo": { -"$ref": "ClassifierPornDocumentData", -"description": "Porn related data used for image and web search porn classification as well as for diagnostics purposes." -}, -"properties": { -"$ref": "DocProperties" -}, -"ptoken": { -"$ref": "PtokenPToken", -"description": "Contains information necessary to perform policy decision on the usage of the data assosiated with this cdoc." -}, -"qualitysignals": { -"$ref": "CompositeDocQualitySignals" -}, -"registrationinfo": { -"$ref": "RegistrationInfo", -"description": "Information about the most recent creation and expiration of this domain. It's extracted from domainedge signal." -}, -"richcontentData": { -"$ref": "IndexingConverterRichContentData", -"description": "If present, indicates that some content was inserted, deleted, or replaced in the document's content (in CompositeDoc::doc::Content::Representation), and stores information about what was inserted, deleted, or replaced." -}, -"richsnippet": { -"$ref": "RichsnippetsPageMap", -"description": "rich snippet extracted from the content of a document." -}, -"robotsinfolist": { -"$ref": "CompositeDocRobotsInfoList" -}, -"scaledIndyRank": { -"description": "to copy to per-doc", -"format": "int32", -"type": "integer" -}, -"sitemap": { -"$ref": "Sitemap", -"description": "Sitelinks: a collection of interesting links a user might be interested in, given they are interested in this document. WARNING: this is different from the crawler Sitemaps (see SitemapsSignals in the attachments)." -}, -"storageRowTimestampMicros": { -"description": "Row timestamp in CDoc storage.", -"format": "int64", -"type": "string" -}, -"subindexid": { -"items": { -"enum": [ -"LTG_CANDIDATE", -"NOSUBINDEX", -"BASE", -"CSEINDEX_EXTENDED", -"DAILY", -"TIMBIT_PROTECTED", -"LANDFILL1", -"LANDFILL2", -"LANDFILL3", -"LANDFILL_BLOGSEARCH", -"LANDFILL_SOCIAL", -"INSTANT", -"UNIFIED_LANDFILL", -"BLOGSEARCH_DYNAMIC_ASSIMILATED", -"BLOGSEARCH_EXTENDED", -"MOFFETT", -"UNSELECTED_DOCUMENTS", -"AQUARIUS", -"WEBSEARCH_FRESH", -"WEBSEARCH1", -"WEBSEARCH2", -"WEBSEARCH3", -"UNIFIED_ZEPPELIN_HIGH_QUALITY", -"ASIANREGIONAL", -"EMEAREGIONAL", -"CSEINDEX", -"BASEREGIONAL", -"BLACKHOLE", -"XBASE", -"FRESHBASE", -"XASIANREGIONAL", -"XEMEAREGIONAL", -"XBASEREGIONAL", -"BLIMPIE", -"BLIMPIEPP", -"GOODYEAR", -"GOODYEARPP", -"QUASAR", -"ZEPPELIN1", -"ZEPPELIN2", -"ZEPPELIN3", -"ZEPPELIN_STAGING", -"PULSAR", -"TIMBIT", -"LANDFILL_CSE", -"UNIFIED_ZEPPELIN" -], -"enumDescriptions": [ -"ltg-candidate is a label for docs whose selection decision is deferred to ltg by Alexandria. It does not correspond to any serving corpus.", -"", -"", -"Custom Search Engine extended corpus to catch all CSE urls not indexed in unified zeppelin or higher tiers.", -"", -"timbit_protected is a label for keeping docs from dropping out of base, i.e., all timbit protected docs belong to base/basextended.", -"Generic corpora used in Oscar.", -"", -"", -"Blogsearch special purpose corpus for Oscar.", -"Social special purpose corpus for Oscar.", -"", -"One big landfill index for Union.", -"Alexandria version of Blogsearch Assimilated.", -"Blogsearch corpus for all docs not indexed in unified zeppelin or higher.", -"Moffett corpus for all docs indexed in base and unified zeppelin.", -"Unselected documents corpus used for index-selection eval tools.", -"Aquarius corpus which is used to hold all app-only documents. go/aquarius", -"Corpus corresponding to serving corpus websearch_fresh.", -"Corpus corresponding to serving corpus websearch_1.", -"Corpus corresponding to serving corpus websearch_2.", -"Corpus corresponding to serving corpus websearch_3.", -"Top quality documents in unified zeppelin", -"Deprecated corpora.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"syntacticDate": { -"$ref": "QualityTimebasedSyntacticDate" -}, -"url": { -"description": "WARNING!!! \"url\" field in CompositeDoc is optional, and is usually missing: e.g., Docjoin CompositeDoc's don't have CompositeDoc::url. has_url() checking is often useful. So don't rely on CompositeDoc::url unless you're sure otherwise. Usually you want to use CompositeDoc::doc::url instead.", -"type": "string" -}, -"urldate": { -"description": "Date in the url extracted by quality/snippets/urldate/date-in-url.cc This is given as midnight GMT on the date in question.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"CompositeDocAdditionalChecksums": { -"description": "Additional checksums of the document.", -"id": "CompositeDocAdditionalChecksums", -"properties": { -"NoTransientChecksum96": { -"description": "Same as ContentChecksum96 but without transient boilerplate.", -"format": "byte", -"type": "string" -}, -"SimHash": { -"deprecated": true, -"description": "Deprecated. Use simhash_v2 and simhash_v2_significance instead.", -"format": "uint64", -"type": "string" -}, -"SimHashIsTrusted": { -"deprecated": true, -"type": "boolean" -}, -"simhashV2": { -"description": "Simhash-v2 is generated by SimHashParseHandler, designed as a complete replacement of simhash-v1 (a.k.a. the original simhash above) from ApproxDupsParseHandler. Simhash-v2 uses a revised algorithm so that it is expected to work better in most cases than simhash-v1. They coexist in current transition period, then simhash-v1 will be retired.", -"format": "uint64", -"type": "string" -}, -"simhashV2Significance": { -"description": "Simhash-v2-significance is used to describe the confidence about the corresponding simhash-v2 value. It is defined as the average absolute difference from zero of all internal state components when finalizing a simhash-v2 value in HashMultiSetDotCauchy. We used to compare the significance against some pre-defined threshold (default: 20) to get a boolean value \"trusted_simhash_v2\". However, it is possible that this field is missing while \"simhash_v2\" is present, in such case (1) Use \"SimHashIsTrusted\" instead if it is present, AND/OR (2) Assume \"simhash_v2\" is trusted if its value is non-zero.", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"CompositeDocAlternateName": { -"description": "Alternate names are some urls that we would like to associate with documents in addition to canonicals. Sometimes we may want to serve these alternatenames instead of canonicals. Alternames in CompositeDoc should come from WebMirror pipeline.", -"id": "CompositeDocAlternateName", -"properties": { -"Url": { -"type": "string" -}, -"UrlEncoding": { -"description": "See webutil/urlencoding", -"format": "int32", -"type": "integer" -}, -"ecnFp": { -"description": "Fp96 of webmirror equivalence class as of last time this was exported.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"CompositeDocExtraDup": { -"description": "The top non-forwarding dups of the canonical url.", -"id": "CompositeDocExtraDup", -"properties": { -"ecnFp": { -"description": "Fp96 of webmirror equivalence class as of last time this was exported.", -"format": "byte", -"type": "string" -}, -"url": { -"description": "The url of the non-forwarding dup.", -"type": "string" -} -}, -"type": "object" -}, -"CompositeDocForwardingDup": { -"description": "The top forwarding dups of the canonical url. (note: it may actually include some dups that are NOT used for forwarding data but for making \"info:\" complete)", -"id": "CompositeDocForwardingDup", -"properties": { -"ecn": { -"description": "The name of the url's webmirror equivalence class.", -"format": "byte", -"type": "string" -}, -"ecnFp": { -"format": "byte", -"type": "string" -}, -"purposes": { -"description": "The purpose(s) of the forwarding dup indicating if it is used for forwarding signal/anchors generally, or only for forwarding some specific signal (e.g. navboost), or for some other purposes (e.g., not for forwarding any data but for making \"info:\" complete). See indexing/dups/public/dups.h for more details.", -"format": "int32", -"type": "integer" -}, -"rawPagerank": { -"description": "Raw pagerank of the url.", -"format": "int32", -"type": "integer" -}, -"repid": { -"description": "The webmirror repid of the forwarding dup.", -"format": "byte", -"type": "string" -}, -"url": { -"description": "The url of the forwarding dup.", -"type": "string" -}, -"urlencoding": { -"description": "The encoding of the url (see webutil/urlencoding for details).", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"CompositeDocIncludedContent": { -"id": "CompositeDocIncludedContent", -"properties": { -"SourceTypeBitfield": { -"description": "Indicate how this content came to be included. Legal values are constructed by bitwise-OR-ing values from the included_content::SourceType enum. Default SourceTypeBitfield = included_content::INCLUDED_FRAME", -"format": "int64", -"type": "string" -}, -"includedDoc": { -"$ref": "GDocumentBase" -}, -"linkUrl": { -"type": "string" -}, -"perDocData": { -"$ref": "PerDocData" -}, -"properties": { -"$ref": "DocProperties" -} -}, -"type": "object" -}, -"CompositeDocIndexingInfo": { -"description": "Contains information *mostly* used within indexing (e.g. not used for building the production serving shards). Most of this data is generated only in Alexandria, however there are exceptions.", -"id": "CompositeDocIndexingInfo", -"properties": { -"cdocBuildInfo": { -"$ref": "IndexingDocjoinerCDocBuildInfo", -"description": "To hold extra info for building a final cdoc from raw cdoc and goldmine annotations." -}, -"contentProtected": { -"description": "Whether current page is under content protection, i.e. a page has been crawled as an error page, but we preserve its last known good content and keep its crawl_status as converter.CrawlStatus::CONTENT.", -"type": "boolean" -}, -"convertToRobotedReason": { -"description": "If set, indicates that the crawl status was converted to ROBOTED for the reason specified by the enum value in converter.RobotedReasons.ConvertToRobotedReasons. See indexing/converter/proto/converter.proto for details. If unset, then the document was not converted to roboted, and if the document crawl status is ROBOTED, then the document is disallowed (at least to Google) in robots.txt.", -"format": "int32", -"type": "integer" -}, -"crawlStatus": { -"description": "One of the enum values in converter.CrawlStatus.State (see indexing/converter/proto/converter.proto for details). Default is converter.CrawlStatus::CONTENT. The document is roboted if the value is converter.CrawlStatus::ROBOTED.", -"format": "int32", -"type": "integer" -}, -"demotionTags": { -"items": { -"enum": [ -"DEMOTION_TYPE_NONE", -"DEMOTION_TYPE_BADURLS_DEMOTE" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"errorType": { -"description": "One of the enum values in converter.ErrorPageType (see indexing/converter/proto/error-page-detector-enum.proto for detail). Default is converter::ERROR_PAGE_NONE.", -"format": "int32", -"type": "integer" -}, -"freshdocsCorpora": { -"items": { -"enum": [ -"WEB", -"REALTIME", -"CSE", -"CSE_PREMIUM", -"BUSTER_IMAGES", -"NEWS", -"VIDEO", -"YOUTUBE", -"WEB_INSTANT", -"WEB_DAILY", -"CACHE_COLON" -], -"enumDescriptions": [ -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"A table with just the bodysource attachment" -], -"type": "string" -}, -"type": "array" -}, -"hostid": { -"description": "The host id of the document. Used chiefly to determine whether the document is part of a parked domain.", -"format": "uint64", -"type": "string" -}, -"ieIdentifier": { -"description": "A short descriptive string to help identify the IE application or setup where this CDoc is generated. For example: websearch_m3 This field is for debuggability purposes.", -"type": "string" -}, -"imageIndexingInfo": { -"$ref": "ImageSearchImageIndexingInfo", -"description": "Indexing info about images (i.e. image links missing image data, etc)." -}, -"indexingTs": { -"description": "The timestamp (the time since the Epoch, in microseconds) when the docjoin is exported from indexing. The main purpose of this field is to identify different versions of the same document.", -"format": "int64", -"type": "string" -}, -"noLongerCanonicalTimestamp": { -"description": "If set, the timestamp in microseconds when the URL stopped being canonical. This should never be set for exported canonical documents. This field is used by dups during canonical flip, and by webmain when doc selection switched between desktop and mobile. Union respects this timestamp to prevent old doc being deleted until the new doc is picked up", -"format": "int64", -"type": "string" -}, -"normalizedClickScore": { -"description": "This score is calculated by re-mapping the back onto the partition's score distribution, such that the score represents the score of the equivalently ranked organically-selected document.", -"format": "float", -"type": "number" -}, -"primaryVertical": { -"description": "Vertical membership of the document. - `primary_vertical` is the vertical that initiated indexing of this document (or empty if the vertical was websearch). - `verticals` is the full list of verticals that contained this document (excluding websearch) at indexing time. `primary_vertical` may or may not be an element of `verticals` because of vertical membership skew between the ingestion time and indexing time. See go/one-indexing-for-web for more background.", -"type": "string" -}, -"rawNavboost": { -"description": "The raw navboost count for the canonical url without aggregating the navboost from dup urls. This field is used when building forwarding map.", -"format": "int32", -"type": "integer" -}, -"rowTimestamp": { -"description": "The timestamp (the time since the Epoch, in microseconds) to represent doc version, which is used in the downstream processing after Raffia. If it's not set, indexing_ts will be used as row_timestamp. The timestamp is generally set by reprocessing to set slightly newer indexing_ts such that the system can respect the reprocessed version to overwrite old data in storage.", -"format": "int64", -"type": "string" -}, -"selectionTierRank": { -"description": "Selection tier rank is a language normalized score ranging from 0-1 over the serving tier (Base, Zeppelins, Landfills) for this document.", -"format": "float", -"type": "number" -}, -"tracingId": { -"description": "The tracing ids is to label the version of url for url status tracking. This repeated field will carry at most 10 tracing id. See more details in go/rich-tracing-design There will be less than 2% base+uz cdocs carrying this field. The major sources of tracing ids include: * Indexing API pushed urls * Index Metrics sampling urls The tracing ids will be written into cdocs by Webmain Ramifier. The consumer of the tracing ids is Union serving notification collector see more at go/serving-notification-from-union", -"items": { -"type": "string" -}, -"type": "array" -}, -"urlChangerate": { -"$ref": "CrawlerChangerateUrlChangerate", -"description": "Changerate information for this doc (see crawler/changerate/changerate.proto for details)." -}, -"urlHistory": { -"$ref": "CrawlerChangerateUrlHistory", -"description": "Url change history for this doc (see crawler/changerate/changerate.proto for details). Note if a doc has more than 20 changes, we only keep the last 20 changes here to avoid adding to much data in its docjoin." -}, -"urlPatternSignals": { -"$ref": "IndexingSignalAggregatorUrlPatternSignals", -"description": "UrlPatternSignals for this doc, used to compute document score in LTG (see indexing/signal_aggregator/proto/signal-aggregator.proto for details)." -}, -"verticals": { -"items": { -"type": "string" -}, -"type": "array" -}, -"videoIndexingInfo": { -"$ref": "ImageRepositoryVideoIndexingInfo", -"description": "Indexing info about videos." -} -}, -"type": "object" -}, -"CompositeDocLiveExperimentInfo": { -"description": "Contains information needed for end-to-end live experiments. For a cdoc generated by production pipeline, it includes experiment IDs that have selected current document. For a cdoc generated by experiment pipeline, it includes current experiment ID.", -"id": "CompositeDocLiveExperimentInfo", -"properties": { -"perLiveExperimentInfo": { -"description": "List of necessary information for each live experiments.", -"items": { -"$ref": "CompositeDocLiveExperimentInfoPerLiveExperimentInfo" -}, -"type": "array" -} -}, -"type": "object" -}, -"CompositeDocLiveExperimentInfoPerLiveExperimentInfo": { -"description": "Contains information for a live experiment.", -"id": "CompositeDocLiveExperimentInfoPerLiveExperimentInfo", -"properties": { -"experimentId": { -"description": "ID of a live experiment.", -"type": "string" -}, -"partialCdoc": { -"$ref": "CompositeDoc", -"description": "Partial CDoc for a live experiment." -} -}, -"type": "object" -}, -"CompositeDocLocalizedVariations": { -"id": "CompositeDocLocalizedVariations", -"properties": { -"dupsComputedAlternateNames": { -"description": "A subset of computed variations, only the members which are dups to the main url. Used during serving for swapping in the URL based on regional and language preferences of the user.", -"items": { -"$ref": "IndexingDupsComputedLocalizedAlternateNamesLocaleEntry" -}, -"type": "array" -}, -"webmasterAlternateNames": { -"description": "All localized alternate names provided by the webmaster (canonical and dups, indexed and not-indexed). Used on the ranking side for swapping out results based on the webmaster preference.", -"items": { -"$ref": "IndexingConverterLocalizedAlternateName" -}, -"type": "array" -} -}, -"type": "object" -}, -"CompositeDocPartialUpdateInfo": { -"description": "Contains information about the partial updates present in a partial CompositeDoc.", -"id": "CompositeDocPartialUpdateInfo", -"properties": { -"goldmineAnnotatorNames": { -"description": "List of goldmine annotator updates present in the enclosing partial cdoc.", -"items": { -"type": "string" -}, -"type": "array" -}, -"imagesSignalNames": { -"description": "List of images signal updates present in the enclosing partial cdoc. Images signal name for a images signal is the unique name for the signal according to SignalSpec.", -"items": { -"type": "string" -}, -"type": "array" -}, -"lastFullIndexingInfo": { -"description": "Contains last full indexing information for partial updates.", -"items": { -"$ref": "CompositeDocPartialUpdateInfoLastFullIndexingInfo" -}, -"type": "array" -}, -"shouldLookupDocjoinsTier": { -"description": "Which tier we should do cdoc lookup to merge partial cdocs. This uses the integer value of indexing.selection.CorpusId. NOT intended for other usage.", -"format": "int32", -"type": "integer" -}, -"shouldLookupDocjoinsVerticalCorpus": { -"description": "Which vertical docjoin corpus we should do cdoc lookup to merge partial cdocs. This uses the string value of table name in google3/indexing/docjoins/spanner/schema/global_tables_data.sdl. For example, Voce and ShortVideo. NOT intended for other usage.", -"type": "string" -}, -"signalNames": { -"description": "List of signal updates present in the enclosing partial cdoc. Signal name for a signal is unique name for the signal according to SignalSpec.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"CompositeDocPartialUpdateInfoLastFullIndexingInfo": { -"description": "Last full indexing information for the partial CDoc.", -"id": "CompositeDocPartialUpdateInfoLastFullIndexingInfo", -"properties": { -"corpus": { -"description": "The corpus of last full updates.", -"enum": [ -"RAFFIA_WEBSEARCH", -"RAFFIA_FASTPATH_DAILY", -"RAFFIA_FASTPATH_INSTANT", -"DOCJOINS" -], -"enumDescriptions": [ -"Base Raffia corpora", -"Fastpath Raffia corpora", -"", -"Docjoins repository" -], -"type": "string" -}, -"lastFullIndexingTsMicros": { -"description": "Last full update indexing timestamp in microseconds.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"CompositeDocQualitySignals": { -"description": "Note: This is a misleading name as of 2022/10/14. The field is still set and has meaningful data, but no longer holds quality signals. All the data are freshness-related and they're not particularly sensitive.", -"id": "CompositeDocQualitySignals", -"properties": { -"lastSignificantUpdate": { -"$ref": "QualityTimebasedLastSignificantUpdate", -"description": "Contains a date used for the \"Date Last Modified\" toolbelt restrict mode. Note: this date is a combined date and is different from the pure shingle-based signal stored in contentage.last_significant_update field." -}, -"pagetype": { -"$ref": "QualityTimebasedPageType" -} -}, -"type": "object" -}, -"CompositeDocRobotsInfoList": { -"description": "List of robots info parsed for the user-agents other than the default used to crawl this page.", -"id": "CompositeDocRobotsInfoList", -"properties": { -"newsRobotsInfo": { -"$ref": "IndexingConverterRobotsInfo" -} -}, -"type": "object" -}, -"CompressedQualitySignals": { -"description": "A message containing per doc signals that are compressed and included in Mustang and TeraGoogle. For TeraGoogle, this message is included in perdocdata which means it can be used in preliminary scoring. CAREFUL: For TeraGoogle, this data resides in very limited serving memory (Flash storage) for a huge number of documents. Next id: 44", -"id": "CompressedQualitySignals", -"properties": { -"anchorMismatchDemotion": { -"description": "anchor_mismatch_demotion: converted from QualityBoost.mismatched.boost.", -"format": "uint32", -"type": "integer" -}, -"authorityPromotion": { -"description": "authority promotion: converted from QualityBoost.authority.boost", -"format": "uint32", -"type": "integer" -}, -"babyPandaDemotion": { -"description": "baby_panda_demotion: converted from QualityBoost.rendered.boost.", -"format": "uint32", -"type": "integer" -}, -"babyPandaV2Demotion": { -"description": "New BabyPanda demotion, applied on top of Panda. This is meant to replace |baby_panda_demotion|.", -"format": "uint32", -"type": "integer" -}, -"crapsAbsoluteHostSignals": { -"description": "Impressions, unsquashed, host level, not to be used with compressed ratios. Not to be used in Pattern Data.", -"format": "uint32", -"type": "integer" -}, -"crapsNewHostSignals": { -"format": "uint64", -"type": "string" -}, -"crapsNewPatternSignals": { -"format": "uint64", -"type": "string" -}, -"crapsNewUrlSignals": { -"description": "For craps_[url|pattern]_signals, please avoid accessing these fields directly, even in minor ways like checking has_craps_*. Instead, please use methods from quality/navboost/craps/craps-lossy-compression.h or talk to craps-team@.", -"format": "uint64", -"type": "string" -}, -"crapsUnscaledIpPriorBadFraction": { -"format": "uint32", -"type": "integer" -}, -"exactMatchDomainDemotion": { -"description": "Page quality signals converted from fields in proto QualityBoost in quality/q2/proto/quality-boost.proto. To save indexing space, we convert the float values in [0, 1] to integers in range [0, 1023] (use 10 bits). exact_match_domain_demotion: converted from QualityBoost.emd.boost.", -"format": "uint32", -"type": "integer" -}, -"experimentalNsrTeamData": { -"$ref": "QualityNsrExperimentalNsrTeamData", -"deprecated": true, -"description": "This field is *not* propagated to shards, but it's populated at serving time by go/web-signal-joins (see b/207344056). See go/0DayLEs for details. This is only meant to be used during LEs, it should *not* be used for launches." -}, -"experimentalNsrTeamWsjData": { -"description": "This field is *not* propagated to shards, but it's populated at serving time by go/web-signal-joins (see b/207344056). See go/0DayLEs for details. This is only meant to be used during LEs, it should *not* be used for launches.", -"items": { -"$ref": "QualityNsrExperimentalNsrTeamWSJData" -}, -"type": "array" -}, -"experimentalQstarDeltaSignal": { -"description": "This field is *not* propagated to shards. It is meant to be populated at serving time using one of the versions present in the `experimental_nsr_team_wsj_data` field above (using the `ExperimentalNsrTeamDataOverridesParams` opti to populate it; see http://source/search?q=ExperimentalNsrTeamDataOverridesParams%20file:ascorer.proto). The purpose of this field is to be read by an experimental Q* component, in order to quickly run LEs with new delta components. See go/0DayLEs for details.", -"format": "float", -"type": "number" -}, -"experimentalQstarSignal": { -"description": "This field is *not* propagated to shards. It is meant to be populated at serving time using one of the versions present in the `experimental_nsr_team_wsj_data` field above (using the `ExperimentalNsrTeamDataOverridesParams` opti to populate it; see http://source/search?q=ExperimentalNsrTeamDataOverridesParams%20file:ascorer.proto). The purpose of this field is to be read by an experimental Q* component, in order to quickly run LEs with new components. See go/0DayLEs for details.", -"format": "float", -"type": "number" -}, -"experimentalQstarSiteSignal": { -"description": "This field is *not* propagated to shards. It is meant to be populated at serving time using one of the versions present in the `experimental_nsr_team_wsj_data` field above (using the `ExperimentalNsrTeamDataOverridesParams` opti to populate it; see http://source/search?q=ExperimentalNsrTeamDataOverridesParams%20file:ascorer.proto). The purpose of this field is to be read by an experimental Q* component, in order to quickly run LEs with new site components. See go/0DayLEs for details.", -"format": "float", -"type": "number" -}, -"experimentalWebHealthSignal": { -"description": "This field is *not* propagated to shards. It is meant to be populated at serving time using one of the versions present in the `experimental_nsr_team_wsj_data` field above (using the `ExperimentalNsrTeamDataOverridesParams` opti to populate it; see http://source/search?q=ExperimentalNsrTeamDataOverridesParams%20file:ascorer.proto). The purpose of this field is to be read by an experimental W* component, in order to quickly run LEs with new signals. See go/0DayLEs for details.", -"format": "float", -"type": "number" -}, -"lowQuality": { -"description": "S2V low quality score: converted from quality_nsr.NsrData, applied in Qstar. See quality_nsr::util::ConvertNsrDataToLowQuality.", -"format": "uint32", -"type": "integer" -}, -"navDemotion": { -"description": "nav_demotion: converted from QualityBoost.nav_demoted.boost.", -"format": "uint32", -"type": "integer" -}, -"nsrConfidence": { -"deprecated": true, -"description": "NSR confidence score: converted from quality_nsr.NsrData. This field is deprecated - use nsr_variance inside nsr_data_proto instead.", -"format": "uint32", -"type": "integer" -}, -"nsrOverrideBid": { -"deprecated": true, -"description": "NSR override bid, used in Q* for emergency overrides. This field is deprecated - used the equivalent field inside nsr_data_proto instead.", -"format": "float", -"type": "number" -}, -"nsrVersionedData": { -"deprecated": true, -"description": "Versioned NSR score to be used in continuous evaluation of the upcoming NSR version and assess quality impact on various slices. This field is deprecated - used the equivalent field inside nsr_data_proto instead.", -"items": { -"$ref": "NSRVersionedItem" -}, -"type": "array" -}, -"pairwiseqScoringData": { -"$ref": "PairwiseQScoringData", -"deprecated": true, -"description": "Deprecated and unused field." -}, -"pairwiseqVersionedData": { -"description": "Versioned PairwiseQ score to be used in continuous evaluation of the upcoming PairwiseQ versions and assess quality impact on various slices.", -"items": { -"$ref": "PairwiseQVersionedItem" -}, -"type": "array" -}, -"pandaDemotion": { -"description": "This is the encoding of Panda fields in the proto SiteQualityFeatures in quality/q2/proto/site_quality_features.proto. The encoding/decoding is performed using functions from quality_coati::coati_util.", -"format": "uint32", -"type": "integer" -}, -"pqData": { -"description": "Encoded page-level PQ signals.", -"format": "uint32", -"type": "integer" -}, -"pqDataProto": { -"$ref": "QualityNsrPQData", -"description": "Stripped page-level signals, not present in the encoded field 'pq_data'." -}, -"productReviewPDemotePage": { -"format": "uint32", -"type": "integer" -}, -"productReviewPDemoteSite": { -"description": "Product review demotion/promotion confidences. (Times 1000 and floored)", -"format": "uint32", -"type": "integer" -}, -"productReviewPPromotePage": { -"deprecated": true, -"format": "uint32", -"type": "integer" -}, -"productReviewPPromoteSite": { -"deprecated": true, -"format": "uint32", -"type": "integer" -}, -"productReviewPReviewPage": { -"description": "Fields product_review_p_review_page and product_review_p_uhq_page are for promoting/demoting HQ/LQ review pages in NGS. See go/pr-boosts for details. The possibility of a page being a review page.", -"format": "uint32", -"type": "integer" -}, -"productReviewPUhqPage": { -"description": "The possibility of a page being a high quality review page.", -"format": "uint32", -"type": "integer" -}, -"scamness": { -"description": "Scam model score. Used as one of the web page quality qstar signals. Value range from 0 to 1023.", -"format": "uint32", -"type": "integer" -}, -"serpDemotion": { -"description": "serp demotion: applied in Qstar.", -"format": "uint32", -"type": "integer" -}, -"siteAuthority": { -"description": "site_authority: converted from quality_nsr.SiteAuthority, applied in Qstar.", -"format": "uint32", -"type": "integer" -}, -"topicEmbeddingsVersionedData": { -"description": "Versioned TopicEmbeddings data to be populated later into superroot / used directly in scorers.", -"items": { -"$ref": "QualityAuthorityTopicEmbeddingsVersionedItem" -}, -"type": "array" -}, -"ugcDiscussionEffortScore": { -"description": "UGC page quality signals. (Times 1000 and floored)", -"format": "uint32", -"type": "integer" -}, -"unauthoritativeScore": { -"description": "Unauthoritative score. Used as one of the web page quality qstar signals.", -"format": "uint32", -"type": "integer" -}, -"vlqNsr": { -"deprecated": true, -"description": "NSR for low-quality videos, converted from quality_nsr.NsrData.vlq_nsr. This field is deprecated - used the equivalent field inside nsr_data_proto instead.", -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"ConceptsConceptId": { -"id": "ConceptsConceptId", -"properties": { -"conceptId": { -"type": "string" -}, -"id": { -"deprecated": true, -"description": "uint64 bring lots of issues, and string a better practice.", -"format": "uint64", -"type": "string" -} -}, -"type": "object" -}, -"ContentAttributions": { -"description": "The following protobuf is used to store an attribution from one page to (usually) one other page, giving credit for the content. This information is used during ranking to promote the attributed page. This protobuf is copied from a quality_contra::SelectedAttribution. See //quality/contra/authorship/attribution and https://qwiki.corp.google.com/display/Q/ContentTrackingContentAttribution.", -"id": "ContentAttributions", -"properties": { -"freshdocsOutgoing": { -"description": "Selected outgoing attributions extracted on FreshDocs.", -"items": { -"$ref": "ContentAttributionsOutgoingAttribution" -}, -"type": "array" -}, -"offlineOutgoing": { -"description": "Selected outgoing attributions extracted via offline MR jobs.", -"items": { -"$ref": "ContentAttributionsOutgoingAttribution" -}, -"type": "array" -}, -"onlineOutgoing": { -"description": "Selected outgoing attributions extracted online on Alexandria.", -"items": { -"$ref": "ContentAttributionsOutgoingAttribution" -}, -"type": "array" -} -}, -"type": "object" -}, -"ContentAttributionsOutgoingAttribution": { -"description": "This is a copy of quality_contra::SelectedAttribution::SelectedInfo. The url is converted to docid and other fields are copied directly.", -"id": "ContentAttributionsOutgoingAttribution", -"properties": { -"bestEvidenceType": { -"enum": [ -"PAGE_LINK_TO", -"SITE_LINK_TO", -"SITE_OTHER_PAGE_LINK_TO", -"ORG_OTHER_SITE_LINK_TO", -"ORG_OTHER_PAGE_LINK_TO", -"SITE_SCORE_BASED", -"LOW_CONFIDENCE_LINK_TO", -"ANCHOR_ATTRIBUTION_TO", -"SITE_NAME_MENTION", -"SITE_NAME_MENTION_KEYWORDED", -"EVIDENCE_TYPE_END" -], -"enumDescriptions": [ -"Direct link to page in question.", -"Link to the site of the page in question: Example: \"\" or \"source: cnn.com\" for the page \"http://cnn.com/news/13\"", -"Link to some *other* page on the site of the page in question: Example: \"\" for the page \"http://cnn.com/news/13\"", -"Direct link to some *site* with the same OrgName. Example: \"\" for the page \"http://cnn.com/news/13\"", -"Link to some page on a site with the same OrgName. Example: \"\" for the page \"http://cnn.com/news/13\"", -"Fake link which is created based on site scores to propagate offline clustering data.", -"Link with low confidence score. Is used for online clustering only.", -"Anchor to page in question.", -"A mention of the site name in the text.", -"A mention of the site name with citation keyword in front of it.", -"" -], -"type": "string" -}, -"docid": { -"format": "uint64", -"type": "string" -}, -"properties": { -"format": "uint32", -"type": "integer" -}, -"usableForClustering": { -"type": "boolean" -} -}, -"type": "object" -}, -"ContentAwareCropsIndexing": { -"id": "ContentAwareCropsIndexing", -"properties": { -"mustangBytes": { -"description": "Compact representation for Mustang storage. See image/search/utils/packed_crops.h for details on the packing format.", -"format": "byte", -"type": "string" -}, -"mustangBytesVersion": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"CopleyLexicalMetadata": { -"description": "Contains lexical metadata for a given reference. For example, this proto will be used to store locale-specific Lexical mids for contact relationships (e.g. /g/11gv0vypg4 is the mid for mother in english and /g/11gmy_gv87 is for mother in french) as an extension to QRefAnnotation::other_metadata, when available.", -"id": "CopleyLexicalMetadata", -"properties": { -"canonicalLexicalMid": { -"description": "Mid for an entity that has lexical data (a LexiconEntry). See https://g3doc.corp.google.com/nlp/generation/g3doc/lexical_data.md for for more information about lexical data. This is the canonical mid for this entity (eg. it would be for \"mother\" in EN even if user referred to \"mom\").", -"type": "string" -} -}, -"type": "object" -}, -"CopleyPersonalReference": { -"description": "Represents a reference made by a user that refers to some personal entity.", -"id": "CopleyPersonalReference", -"properties": { -"personalReferenceType": { -"description": "The manner in which the entity was referenced (e.g. \"my hotel\", \"the airport\").", -"enum": [ -"PERSONAL_UNKNOWN_REFERENCE", -"PERSONAL_HOTEL_REFERENCE", -"PERSONAL_HOTEL_BOOKING_AGENT_REFERENCE", -"PERSONAL_RESTAURANT_REFERENCE", -"PERSONAL_RESTAURANT_BOOKING_AGENT_REFERENCE", -"PERSONAL_PARKING_REFERENCE", -"PERSONAL_FLIGHT_REFERENCE", -"PERSONAL_GENERIC_SOCIAL_EVENT_REFERENCE", -"PERSONAL_CONCERT_REFERENCE", -"PERSONAL_SPORTS_REFERENCE", -"PERSONAL_MOVIE_REFERENCE", -"PERSONAL_TOUR_REFERENCE", -"PERSONAL_HOME_REFERENCE", -"PERSONAL_WORK_REFERENCE", -"PERSONAL_MAPS_ALIAS_REFERENCE", -"PERSONAL_CONTACT_REFERENCE", -"PERSONAL_CONTACT_PERSON_REFERENCE", -"PERSONAL_CONTACT_LOCATION_REFERENCE", -"PERSONAL_FAMILY_MEMBER_REFERENCE", -"PERSONAL_CONTACT_BIRTHDAY_REFERENCE", -"PERSONAL_CONTACT_ADDRESS_REFERENCE", -"PERSONAL_RELATIONSHIP_REFERENCE", -"PERSONAL_RELATIONSHIP_PERSON_REFERENCE", -"PERSONAL_RELATIONSHIP_LOCATION_REFERENCE", -"PERSONAL_MEMORABLE_DATE_REFERENCE", -"PERSONAL_MEMORY_ANNIVERSARY_DATE_REFERENCE", -"PERSONAL_MEMORY_PAYDAY_DATE_REFERENCE", -"PERSONAL_MEMORY_WEDDING_DATE_REFERENCE", -"PERSONAL_MEMORY_BIRTHDAY_DATE_REFERENCE", -"PERSONAL_MEMORY_EXAM_DATE_REFERENCE", -"PERSONAL_MEMORY_MATHEXAM_DATE_REFERENCE", -"PERSONAL_MEMORY_OILCHANGE_DATE_REFERENCE", -"PERSONAL_MEMORY_GRADUATION_DATE_REFERENCE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"CopleyPersonalReferenceMetadata": { -"description": "General message used to store metadata about references to personal entities, even if those entities cannot be resolved.", -"id": "CopleyPersonalReferenceMetadata", -"properties": { -"referenceScore": { -"description": "The strength of the personal reference. For example \"my flight\" may receive a high reference_score, whereas \"the airport\" may receive a low score.", -"format": "float", -"type": "number" -}, -"references": { -"description": "A list of all references made. Empty if no personal references exist. Multiple references can be present when multiple references were made in a single query, or the type of reference was ambiguous.", -"items": { -"$ref": "CopleyPersonalReference" -}, -"type": "array" -}, -"subreferenceMetadata": { -"$ref": "CopleySubreferenceMetadata", -"description": "Subreference metadata for all compound references on this span." -} -}, -"type": "object" -}, -"CopleySourceTypeList": { -"id": "CopleySourceTypeList", -"properties": { -"sourceTypeMetadata": { -"items": { -"$ref": "CopleySourceTypeMetadata" -}, -"type": "array" -} -}, -"type": "object" -}, -"CopleySourceTypeMetadata": { -"description": "Contains the source and type information related to a personal entity, for example if it's an hotel or a restaurant (type) and if it comes from gmail, calendar, etc. (source). Next ID: 13", -"id": "CopleySourceTypeMetadata", -"properties": { -"contactAnnotationId": { -"description": "Annotation ID of a contact annotation, e.g. a relationship set via Assistant. This ID is generated by People Write Server. It is used to delete Contact Annotations via People API.", -"type": "string" -}, -"displayableName": { -"type": "string" -}, -"emailIdentifier": { -"description": "Only used if personal_data_provenance == PERSONAL_SOURCE_GMAIL. Used to create a link to the source email in the form: mail.google.com/mail/u/0/?extsrc=sync&client=h&plid={email_identifier}", -"type": "string" -}, -"eventId": { -"$ref": "EventIdMessage", -"description": "Populated for some footprints data sources; uniquely identifies the footprint that generated the personal data that this provenance is attached to." -}, -"localDiscoverySettingsMetadata": { -"$ref": "PersonalizationSettingsApiProtoLocalDiscoveryLocalDiscoverySettingsMetadata" -}, -"personalDataProvenance": { -"enum": [ -"PERSONAL_SOURCE_UNKNOWN", -"PERSONAL_SOURCE_GMAIL", -"PERSONAL_SOURCE_CALENDAR", -"PERSONAL_SOURCE_MAPS_ALIAS", -"PERSONAL_SOURCE_FOCUS", -"PERSONAL_SOURCE_FOCUS_CONSISTENT", -"PERSONAL_SOURCE_FOCUS_TOP_N_CONTACTS", -"PERSONAL_SOURCE_ASSISTANT_CONTACT_AFFINITY", -"PERSONAL_SOURCE_ASSISTANT_MEMORY", -"PERSONAL_SOURCE_PWS", -"PERSONAL_SOURCE_HOUSEHOLD", -"PERSONAL_SOURCE_HULK_PLACES", -"PERSONAL_SOURCE_FOCUS_OWNER", -"PERSONAL_SOURCE_WHITEPAGES", -"PERSONAL_SOURCE_ASSISTANT_DEVICES", -"PERSONAL_SOURCE_TEACH_LEARN", -"PERSONAL_SOURCE_GELLER_ANSWERS", -"PERSONAL_SOURCE_LAMS_SETTINGS", -"PERSONAL_SOURCE_GAIA", -"PERSONAL_SOURCE_XTALK", -"PERSONAL_SOURCE_MOVIE_DIALOG", -"PERSONAL_SOURCE_MEDIA_HABITUAL_CACHE", -"PERSONAL_SOURCE_PERSONAL_TOPICSERVER", -"PERSONAL_SOURCE_PHOTO_LABELS", -"PERSONAL_SOURCE_PEOPLE_API", -"PERSONAL_SOURCE_CONTEXT_API", -"PERSONAL_SOURCE_MUSIC_PREFERRED_PROVIDER", -"PERSONAL_SOURCE_STASH", -"PERSONAL_SOURCE_SMART_HOME_DEVICES", -"PERSONAL_SOURCE_DEVICE_STATES", -"PERSONAL_SOURCE_HANDBAG_PERSONALIZED_WEBREF_ENTITIES", -"PERSONAL_GRAPH_PEOPLE_SIGNAL_POST_PROCESSING", -"PERSONAL_SOURCE_PERSONALIZED_PRONUNCIATIONS", -"PERSONAL_SOURCE_DEVICE_INSTALLED_APPS", -"PERSONAL_SOURCE_CONTACT_AGGREGATED_DATA", -"PERSONAL_SOURCE_DYNAMIC_ENTITY_INDEX", -"PERSONAL_SOURCE_STADIA", -"PERSONAL_SOURCE_COMMUNAL_GROUP", -"PERSONAL_SOURCE_LOCATION_SHARING", -"PERSONAL_SOURCE_MAPS_SEARCH", -"PERSONAL_SOURCE_MEDIA_USER_CONTEXT_INFO", -"PERSONAL_SOURCE_MEDIA_USER_ENTITIES", -"PERSONAL_SOURCE_DEVICE_SIGNED_IN_ACCOUNTS", -"PERSONAL_SOURCE_ASSISTANT_USER_PROFILES" -], -"enumDescriptions": [ -"", -"", -"", -"", -"A subset of the user's Focus and device contacts from Starlight. As of 11/2020, this subset includes: starred contacts, relationships, and contacts with attributes (birthday, address, etc.) set via Assistant. Only requests Starlight's base model.", -"Same as PERSONAL_SOURCE_FOCUS, but additionally includes photos labels (face clusters, albums, locations) from Starlight. Requires Starlight's fresh model (go/starlight-freshness) for more consistent results. Use with caution: May fail to return any data if the fresh model is not loaded in time.", -"User's top n(~200) contacts returned by Starlight. Starlight uses various signals and sources to prepare a list of top n contacts for a given user.", -"User's contact affinities read from Footprint's assistant_contact_affinity corpus.", -"", -"", -"", -"", -"", -"", -"Reads from the ASSISTANT_DEVICE_SETTINGS", -"", -"", -"", -"", -"", -"", -"", -"", -"The user's photo labels from Starlight.", -"The current user's data from the people API. This includes the user's profile image.", -"", -"", -"", -"The user's SmartHome devices from AutomationService. This includes user's devices, rooms and structures.", -"The user's devices states from DeviceStatesService. For example, if the device is playing music and the name of the music if it is.", -"Personalized WebRef entity mids synced down to device (go/handbag).", -"For triples produced by postprocessing signals for people entities.", -"", -"The names of the user's installed apps on their Android phone.", -"", -"For triples directly served from Footprints Corpus DYNAMIC_ENTITY_INDEX. These are often smart home devices.", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"personalDataType": { -"enum": [ -"PERSONAL_UNKNOWN", -"PERSONAL_HOTEL", -"PERSONAL_RESTAURANT", -"PERSONAL_PARKING", -"PERSONAL_FLIGHT", -"PERSONAL_SOCIAL_EVENT", -"PERSONAL_MAPS_ALIAS", -"PERSONAL_CONTACT", -"PERSONAL_PROFILE", -"PERSONAL_BILL", -"PERSONAL_CAR_RENTAL", -"PERSONAL_GENERIC_EVENT", -"PERSONAL_TRAIN", -"PERSONAL_BUS", -"PERSONAL_TAXI", -"PERSONAL_FERRY", -"PERSONAL_PHONE_NUMBER", -"PERSONAL_DEVICE", -"PERSONAL_PREFERENCE", -"PERSONAL_DIETARY_RESTRICTION", -"PERSONAL_MEDIA_HABITUAL_CACHE", -"PERSONAL_NEWS_PREFERENCE", -"PERSONAL_FAVORITE", -"PERSONAL_GAMER_CONTACT" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"provenanceCategory": { -"items": { -"enum": [ -"PROVENANCE_CATEGORY_UNKNOWN", -"CORE_APPS_DATA" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"sensitivity": { -"$ref": "KnowledgeAnswersSensitivitySensitivity", -"description": "Sensitivity applying to this copley annotation." -} -}, -"type": "object" -}, -"CopleySubreferenceMetadata": { -"description": "Represents the most compound resolved entities and most nested unresolved references for a span. Useful for punting.", -"id": "CopleySubreferenceMetadata", -"properties": { -"mostCompoundResolvedEntities": { -"description": "Resolved entities are sorted from highest resolution score to lowest.", -"items": { -"$ref": "CopleySubreferenceResolution" -}, -"type": "array" -}, -"mostNestedUnresolvedReference": { -"$ref": "CopleySubreferenceReference", -"description": "This is a merged representation of the compound reference having the most_compound_resolved_entities as an argument." -} -}, -"type": "object" -}, -"CopleySubreferenceReference": { -"description": "Represents a reference that may be part of a larger compound reference. For example, \"my brother's birthday\" will have a subreference that may have references for \"my brother\".", -"id": "CopleySubreferenceReference", -"properties": { -"personalReferenceTypes": { -"description": "Type of reference. There may be multiple for a single reference (e.g. relationship and contact).", -"items": { -"enum": [ -"PERSONAL_UNKNOWN_REFERENCE", -"PERSONAL_HOTEL_REFERENCE", -"PERSONAL_HOTEL_BOOKING_AGENT_REFERENCE", -"PERSONAL_RESTAURANT_REFERENCE", -"PERSONAL_RESTAURANT_BOOKING_AGENT_REFERENCE", -"PERSONAL_PARKING_REFERENCE", -"PERSONAL_FLIGHT_REFERENCE", -"PERSONAL_GENERIC_SOCIAL_EVENT_REFERENCE", -"PERSONAL_CONCERT_REFERENCE", -"PERSONAL_SPORTS_REFERENCE", -"PERSONAL_MOVIE_REFERENCE", -"PERSONAL_TOUR_REFERENCE", -"PERSONAL_HOME_REFERENCE", -"PERSONAL_WORK_REFERENCE", -"PERSONAL_MAPS_ALIAS_REFERENCE", -"PERSONAL_CONTACT_REFERENCE", -"PERSONAL_CONTACT_PERSON_REFERENCE", -"PERSONAL_CONTACT_LOCATION_REFERENCE", -"PERSONAL_FAMILY_MEMBER_REFERENCE", -"PERSONAL_CONTACT_BIRTHDAY_REFERENCE", -"PERSONAL_CONTACT_ADDRESS_REFERENCE", -"PERSONAL_RELATIONSHIP_REFERENCE", -"PERSONAL_RELATIONSHIP_PERSON_REFERENCE", -"PERSONAL_RELATIONSHIP_LOCATION_REFERENCE", -"PERSONAL_MEMORABLE_DATE_REFERENCE", -"PERSONAL_MEMORY_ANNIVERSARY_DATE_REFERENCE", -"PERSONAL_MEMORY_PAYDAY_DATE_REFERENCE", -"PERSONAL_MEMORY_WEDDING_DATE_REFERENCE", -"PERSONAL_MEMORY_BIRTHDAY_DATE_REFERENCE", -"PERSONAL_MEMORY_EXAM_DATE_REFERENCE", -"PERSONAL_MEMORY_MATHEXAM_DATE_REFERENCE", -"PERSONAL_MEMORY_OILCHANGE_DATE_REFERENCE", -"PERSONAL_MEMORY_GRADUATION_DATE_REFERENCE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"referenceScore": { -"description": "Highest reference score for any references merged in this span.", -"format": "float", -"type": "number" -}, -"relationshipLexicalInfo": { -"$ref": "CopleyLexicalMetadata", -"description": "Only set for unresolved relationship references and can be used to get the canonical word for the relationship (e.g. \"mother\") in TTS." -} -}, -"type": "object" -}, -"CopleySubreferenceResolution": { -"description": "Represents a resolution that may be part of a larger compound reference. For example, \"my brother's birthday\" will have a subreference that may have resolutions for \"my brother\".", -"id": "CopleySubreferenceResolution", -"properties": { -"mid": { -"description": "Can be used with PKG Service for looking up metadata about this entity at fulfillment/GenX time.", -"type": "string" -}, -"name": { -"description": "Name of the entity represented by this resolution.", -"type": "string" -}, -"resolutionScore": { -"description": "A resolution score of 0 indicates that it did not resolve to a real entity.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"CorpusSelectionInfo": { -"id": "CorpusSelectionInfo", -"properties": { -"corpus": { -"enum": [ -"UNKNOWN", -"LENS" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"corpusScore": { -"description": "Corpus specific score for an image", -"format": "float", -"type": "number" -}, -"isSelectedForIndexing": { -"description": "Whether an image was selected for indexing.", -"type": "boolean" -}, -"referrerDocid": { -"description": "Set of referrers indexed with the image.", -"items": { -"format": "uint64", -"type": "string" -}, -"type": "array" -}, -"referrerUrls": { -"description": "Set of referrer urls indexed with the image.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"CountryClickDistribution": { -"id": "CountryClickDistribution", -"properties": { -"confidence": { -"description": "To store confidence in the distribution in cases when total is not set.", -"format": "double", -"type": "number" -}, -"item": { -"items": { -"$ref": "CountryClickDistributionItem" -}, -"type": "array" -}, -"total": { -"description": "To store total clicks on this page/domain.", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"CountryClickDistributionItem": { -"id": "CountryClickDistributionItem", -"properties": { -"doubleValue": { -"format": "double", -"type": "number" -}, -"name": { -"type": "string" -}, -"value": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"CountryCountryAttachment": { -"description": "If you add new fields to this message, do not use any tag value less than the \"Next free tag\" below. The lower tag values might be missing in this file, but they were used in past for some field, so cannot be used again. Next free tag: 44", -"id": "CountryCountryAttachment", -"properties": { -"clickDistribution": { -"$ref": "CountryClickDistribution", -"description": "Store weighted click distribution for page level country-id classification." -}, -"countryidFromUgc": { -"description": "Is true if the country attachment was computed through the UGC pipeline.", -"type": "boolean" -}, -"debug": { -"description": "A non critical field to store debug info for a country attachment. Used in experiments and for debugging.", -"type": "string" -}, -"debugSourceUrl": { -"description": "Set to the signal source URLs when merging country signals in Alexandria during sitemoves. Essentially if sites A and B move to C, and we merge A and B's signal to C, in the countryattachment signal C will have URL A and B as source_url. Only used for debugging and it doesn't show up in docjoins.", -"items": { -"type": "string" -}, -"type": "array" -}, -"documentLocationSource": { -"description": "Specifies the origin of `geo_locations`. Right now, it can either come from deprecated Docloc system or the new Brainloc system when Docloc doesn't have sufficient evidence.", -"enum": [ -"UNSPECIFIED", -"DOCLOC", -"BRAINLOC", -"LOGLOC" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"existNextLevel": { -"type": "boolean" -}, -"fromLanguageFallback": { -"description": "Booleans to keep track of where the country-id of the page came from. These are used for debugging and/or unittests, and cleared in production.", -"type": "boolean" -}, -"fromRestricts": { -"type": "boolean" -}, -"fromSgDomains": { -"type": "boolean" -}, -"fromTld": { -"type": "boolean" -}, -"fromUgc": { -"type": "boolean" -}, -"fromUrlPattern": { -"type": "boolean" -}, -"fromWmx": { -"type": "boolean" -}, -"geoLocations": { -"$ref": "CountryGeoLocations", -"description": "New MetroID: Now called GeoLocations since the locations could be sublocalities, cities or states. GeoLocations are always more fine grained than country. TODO (jayeshv): Once new MetroID/GeoLocations is launched everywhere, deleted old MetroID related fields." -}, -"global": { -"type": "boolean" -}, -"isValidForCountryRestrict": { -"description": "Set to true if the local_countries field can be used for country restricts as well.", -"type": "boolean" -}, -"localCountries": { -"description": "two-letter(lower-case) countrycode, e.g. us countries that is local to", -"items": { -"type": "string" -}, -"type": "array" -}, -"localCountryCodes": { -"description": "Fields that actually store the country id in docjoins. The format of this data is defined in //i18n/identifiers/stableinternalregionconverter.cc. Converter defined there can be used to convert it to RegionCode format.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"metroIdList": { -"description": "Metro locations: list of NavBoost feature V2 associated with a doc, along with the enclosing province. Metro locations with new tags.", -"items": { -"$ref": "CountryMetroNBFeature" -}, -"type": "array" -}, -"metroLocationId": { -"description": "Metro level data. metro_location_id stores geotokens for metro restricts.", -"items": { -"type": "string" -}, -"type": "array" -}, -"metroNavboost": { -"description": "Metro navboost: list of (NavBoost feature V2, navboost float) pairs.", -"items": { -"$ref": "CountryMetroNBFeature" -}, -"type": "array" -}, -"provinceGeotokenList": { -"items": { -"$ref": "CountryProvinceGeotoken" -}, -"type": "array" -}, -"relatedCountries": { -"description": "two-letter(lower-case) countrycode, e.g. us countries that is related to, but not local to", -"items": { -"type": "string" -}, -"type": "array" -}, -"relatedCountryCodes": { -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"restrictCountries": { -"description": "List of two-letter(lower-case) countrycodes(e.g. us) valid for restricts. Typically cloned out of local_countries if is_valid_for_country_restrict is set to true.", -"items": { -"type": "string" -}, -"type": "array" -}, -"salientCountries": { -"description": "[Experimental]: Top salient countries for a doc. If a country can not be found on this field it can be considered that this doc is not relevant to it.", -"items": { -"$ref": "CountrySalientCountry" -}, -"type": "array" -}, -"salientCountrySet": { -"$ref": "QualitySalientCountriesSalientCountrySet" -}, -"sitename": { -"description": "Domain name of keys in filtering metro reducer class, used only by the intermediate mapreduces to produce filtered data.", -"type": "string" -}, -"superGlobal": { -"description": "Super global pages get lesser demotion than global pages. A document can only be either global or super_global but not both.", -"type": "boolean" -}, -"urlPatternBasedCountry": { -"format": "int32", -"type": "integer" -}, -"urlPatternBasedLanguage": { -"description": "Language and country extracted using the URL pattern map.", -"format": "int32", -"type": "integer" -}, -"userVisibleCountryFromLogs": { -"description": "This is used to store the visible country id computed from logs data", -"type": "string" -}, -"userVisibleLocalCountry": { -"description": "This is the country id we show to users on the result page. This is kept different from country demotion country id because we dont want to expose our backoff and url based detection algorithm - also we want to be ultra conservative in showing this.", -"format": "int32", -"type": "integer" -}, -"weightAboveIdealForLocalness": { -"description": "If result is global, store weight above ideal, as a confidence signal. Used in query localness, cleared in production CountryAttachment.", -"format": "double", -"type": "number" -}, -"wmxCountry": { -"description": "Country specified for a web-site through webmaster console.", -"type": "string" -} -}, -"type": "object" -}, -"CountryGeoLocation": { -"description": "Stores one location and all meta-data associated with that location.", -"id": "CountryGeoLocation", -"properties": { -"clickRadius50Percent": { -"description": "The radius (in miles) around the assigned location that the document gets 50% of its clicks.", -"format": "uint32", -"type": "integer" -}, -"confidence": { -"description": "Confidence on the location. Ranges in [0.0, 1.0]. Cleared during index creation.", -"format": "float", -"type": "number" -}, -"confidencePercent": { -"description": "Confidence mapped to [0, 100]. Converted to integer for efficient storage. Populated during index creation.", -"format": "uint32", -"type": "integer" -}, -"internalId": { -"description": "Used for compressed docloc data. In compressed data, instead of location_info, only an integer ID for that LocationInfo is stored. A separate lookup table is used to get full LocationInfo from the internal ID.", -"format": "int32", -"type": "integer" -}, -"locationInfo": { -"$ref": "CountryLocationInfo" -}, -"propagatedFromASubpage": { -"description": "True if this location is assigned to one of the subpages, and not to the page itself. If the total number of locations assigned to all the subpages of a page is small (usually up to 5), then that page also gets assigned those locations, and this flag is set for those locations.", -"type": "boolean" -} -}, -"type": "object" -}, -"CountryGeoLocations": { -"description": "List of locations assigned to a document.", -"id": "CountryGeoLocations", -"properties": { -"geoLocation": { -"items": { -"$ref": "CountryGeoLocation" -}, -"type": "array" -}, -"isNonLocationSpecific": { -"description": "This will be set to true for documents which receive several clicks but are not assigned any location because the click distribution is flat. Typical examples are global sites like facebook.com, chains like walmart.com, informational sites like wikipedia.org etc. This flag is not propagated to deeper pages since this signal is meant to indicate that a website or a part of website is conclusively non-local, so propagating this information to deeper pages does not make sense. If this flag is set, then the only possible geo_location will be the ones which are propagated_from_a_subpage.", -"type": "boolean" -}, -"propagationDepthFromParent": { -"description": "Depth of the URL from it's nearest parent in GeoLocation data. Webpages inherhit locations from their parent pages. For example, if foo.com/a is assigned location L1, and foo.com/a/b is not assigned any location, then http://www.foo.com/a/b inherits location L1 from it's nearest parent foo.com/a in GeoLocation data. This attribute is the distance from the nearest parent which is present in GeoLocation data. In this particular case, it will be 1.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"CountryLocationInfo": { -"description": "This represents one location.", -"id": "CountryLocationInfo", -"properties": { -"center": { -"$ref": "GeostorePointProto", -"description": "The latitude and longitude of the conceptual center of the location. For cities, this would be the center of the downtown, or maybe the location of city hall. For states and countries it might be the capital city. But there are no guarantees and this may be any random point inside the location." -}, -"city": { -"type": "string" -}, -"country": { -"description": "Human readable name hierarchy. Only the relevant fields will be present. For example for city GeoLocations, sub_locality field will not be present. Cleared during index creation.", -"type": "string" -}, -"county": { -"type": "string" -}, -"enclosingStateFeatureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "Oyster feature ID of the enclosing state. Cleared during index creation." -}, -"featureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "Oyster feature ID of the location. Cleared during index creation." -}, -"state": { -"type": "string" -}, -"stateIdFprint": { -"description": "32 bit fingerprint of the feature id of the state of this location. For cities and sub-localities it will be the enclosing state. For state locations, it will be fingerprint of the feture-id of the location itself. Populated during index creation.", -"format": "uint32", -"type": "integer" -}, -"subLocality": { -"type": "string" -}, -"type": { -"description": "Type of the location (sub-locality, city, state etc).", -"enum": [ -"TYPE_ANY", -"TYPE_TRANSPORTATION", -"TYPE_ROUTE", -"TYPE_DEPRECATED_HIGHWAY_DO_NOT_USE", -"TYPE_HIGHWAY", -"TYPE_HIGHWAY_1", -"TYPE_HIGHWAY_2", -"TYPE_HIGHWAY_3", -"TYPE_HIGHWAY_4", -"TYPE_HIGHWAY_5", -"TYPE_HIGHWAY_6", -"TYPE_HIGHWAY_7", -"TYPE_HIGHWAY_8", -"TYPE_HIGHWAY_9", -"TYPE_BICYCLE_ROUTE", -"TYPE_TRAIL", -"TYPE_SEGMENT", -"TYPE_ROAD", -"TYPE_RAILWAY", -"TYPE_STANDARD_TRACK", -"TYPE_JR_TRACK", -"TYPE_NARROW_TRACK", -"TYPE_MONORAIL_TRACK", -"TYPE_SUBWAY_TRACK", -"TYPE_LIGHT_RAIL_TRACK", -"TYPE_BROAD_TRACK", -"TYPE_HIGH_SPEED_RAIL", -"TYPE_TROLLEY_TRACK", -"TYPE_FERRY", -"TYPE_FERRY_BOAT", -"TYPE_FERRY_TRAIN", -"TYPE_VIRTUAL_SEGMENT", -"TYPE_INTERSECTION", -"TYPE_TRANSIT", -"TYPE_TRANSIT_STATION", -"TYPE_BUS_STATION", -"TYPE_TRAMWAY_STATION", -"TYPE_TRAIN_STATION", -"TYPE_SUBWAY_STATION", -"TYPE_FERRY_TERMINAL", -"TYPE_AIRPORT", -"TYPE_AIRPORT_CIVIL", -"TYPE_AIRPORT_MILITARY", -"TYPE_AIRPORT_MIXED", -"TYPE_HELIPORT", -"TYPE_SEAPLANE_BASE", -"TYPE_AIRSTRIP", -"TYPE_CABLE_CAR_STATION", -"TYPE_GONDOLA_LIFT_STATION", -"TYPE_FUNICULAR_STATION", -"TYPE_SPECIAL_STATION", -"TYPE_HORSE_CARRIAGE_STATION", -"TYPE_MONORAIL_STATION", -"TYPE_SEAPORT", -"TYPE_TRANSIT_STOP", -"TYPE_TRANSIT_TRIP", -"TYPE_TRANSIT_DEPARTURE", -"TYPE_TRANSIT_LEG", -"TYPE_TRANSIT_LINE", -"TYPE_TRANSIT_AGENCY_DEPRECATED_VALUE", -"TYPE_TRANSIT_TRANSFER", -"TYPE_SEGMENT_PATH", -"TYPE_ROAD_SIGN", -"TYPE_INTERSECTION_GROUP", -"TYPE_PATHWAY", -"TYPE_RESTRICTION_GROUP", -"TYPE_TOLL_CLUSTER", -"TYPE_POLITICAL", -"TYPE_COUNTRY", -"TYPE_ADMINISTRATIVE_AREA", -"TYPE_ADMINISTRATIVE_AREA1", -"TYPE_US_STATE", -"TYPE_GB_COUNTRY", -"TYPE_JP_TODOUFUKEN", -"TYPE_ADMINISTRATIVE_AREA2", -"TYPE_GB_FORMER_POSTAL_COUNTY", -"TYPE_GB_TRADITIONAL_COUNTY", -"TYPE_ADMINISTRATIVE_AREA3", -"TYPE_ADMINISTRATIVE_AREA4", -"TYPE_ADMINISTRATIVE_AREA5", -"TYPE_ADMINISTRATIVE_AREA6", -"TYPE_ADMINISTRATIVE_AREA7", -"TYPE_ADMINISTRATIVE_AREA8", -"TYPE_ADMINISTRATIVE_AREA9", -"TYPE_COLLOQUIAL_AREA", -"TYPE_RESERVATION", -"TYPE_LOCALITY", -"TYPE_GB_POST_TOWN", -"TYPE_JP_GUN", -"TYPE_JP_SHIKUCHOUSON", -"TYPE_JP_SUB_SHIKUCHOUSON", -"TYPE_COLLOQUIAL_CITY", -"TYPE_SUBLOCALITY", -"TYPE_US_BOROUGH", -"TYPE_GB_DEPENDENT_LOCALITY", -"TYPE_JP_OOAZA", -"TYPE_JP_KOAZA", -"TYPE_JP_GAIKU", -"TYPE_GB_DOUBLE_DEPENDENT_LOCALITY", -"TYPE_JP_CHIBAN", -"TYPE_JP_EDABAN", -"TYPE_SUBLOCALITY1", -"TYPE_SUBLOCALITY2", -"TYPE_SUBLOCALITY3", -"TYPE_SUBLOCALITY4", -"TYPE_SUBLOCALITY5", -"TYPE_NEIGHBORHOOD", -"TYPE_CONSTITUENCY", -"TYPE_DESIGNATED_MARKET_AREA", -"TYPE_SCHOOL_DISTRICT", -"TYPE_LAND_PARCEL", -"TYPE_DISPUTED_AREA", -"TYPE_POLICE_JURISDICTION", -"TYPE_STATISTICAL_AREA", -"TYPE_CONSTITUENCY_FUTURE", -"TYPE_PARK", -"TYPE_GOLF_COURSE", -"TYPE_LOCAL_PARK", -"TYPE_NATIONAL_PARK", -"TYPE_US_NATIONAL_PARK", -"TYPE_US_NATIONAL_MONUMENT", -"TYPE_NATIONAL_FOREST", -"TYPE_PROVINCIAL_PARK", -"TYPE_PROVINCIAL_FOREST", -"TYPE_CAMPGROUNDS", -"TYPE_HIKING_AREA", -"TYPE_BUSINESS", -"TYPE_GOVERNMENT", -"TYPE_BORDER_CROSSING", -"TYPE_CITY_HALL", -"TYPE_COURTHOUSE", -"TYPE_EMBASSY", -"TYPE_LIBRARY", -"TYPE_SCHOOL", -"TYPE_UNIVERSITY", -"TYPE_EMERGENCY", -"TYPE_HOSPITAL", -"TYPE_PHARMACY", -"TYPE_POLICE", -"TYPE_FIRE", -"TYPE_DOCTOR", -"TYPE_DENTIST", -"TYPE_VETERINARIAN", -"TYPE_TRAVEL_SERVICE", -"TYPE_LODGING", -"TYPE_RESTAURANT", -"TYPE_GAS_STATION", -"TYPE_PARKING", -"TYPE_POST_OFFICE", -"TYPE_REST_AREA", -"TYPE_CASH_MACHINE", -"TYPE_CAR_RENTAL", -"TYPE_CAR_REPAIR", -"TYPE_SHOPPING", -"TYPE_GROCERY", -"TYPE_TOURIST_DESTINATION", -"TYPE_ECO_TOURIST_DESTINATION", -"TYPE_BIRD_WATCHING", -"TYPE_FISHING", -"TYPE_HUNTING", -"TYPE_NATURE_RESERVE", -"TYPE_TEMPLE", -"TYPE_CHURCH", -"TYPE_GURUDWARA", -"TYPE_HINDU_TEMPLE", -"TYPE_MOSQUE", -"TYPE_SYNAGOGUE", -"TYPE_STADIUM", -"TYPE_BAR", -"TYPE_MOVIE_RENTAL", -"TYPE_COFFEE", -"TYPE_GOLF", -"TYPE_BANK", -"TYPE_DOODLE", -"TYPE_GROUNDS", -"TYPE_AIRPORT_GROUNDS", -"TYPE_BUILDING_GROUNDS", -"TYPE_CEMETERY", -"TYPE_HOSPITAL_GROUNDS", -"TYPE_INDUSTRIAL", -"TYPE_MILITARY", -"TYPE_SHOPPING_CENTER", -"TYPE_SPORTS_COMPLEX", -"TYPE_UNIVERSITY_GROUNDS", -"TYPE_DEPRECATED_TARMAC", -"TYPE_ENCLOSED_TRAFFIC_AREA", -"TYPE_PARKING_LOT", -"TYPE_PARKING_GARAGE", -"TYPE_OFF_ROAD_AREA", -"TYPE_BORDER", -"TYPE_BUILDING", -"TYPE_GEOCODED_ADDRESS", -"TYPE_NATURAL_FEATURE", -"TYPE_TERRAIN", -"TYPE_SAND", -"TYPE_BEACH", -"TYPE_DUNE", -"TYPE_ROCKY", -"TYPE_ICE", -"TYPE_GLACIER", -"TYPE_BUILT_UP_AREA", -"TYPE_VEGETATION", -"TYPE_SHRUBBERY", -"TYPE_WOODS", -"TYPE_AGRICULTURAL", -"TYPE_GRASSLAND", -"TYPE_TUNDRA", -"TYPE_DESERT", -"TYPE_SALT_FLAT", -"TYPE_WATER", -"TYPE_OCEAN", -"TYPE_BAY", -"TYPE_BIGHT", -"TYPE_LAGOON", -"TYPE_SEA", -"TYPE_STRAIT", -"TYPE_INLET", -"TYPE_FJORD", -"TYPE_LAKE", -"TYPE_SEASONAL_LAKE", -"TYPE_RESERVOIR", -"TYPE_POND", -"TYPE_RIVER", -"TYPE_RAPIDS", -"TYPE_DISTRIBUTARY", -"TYPE_CONFLUENCE", -"TYPE_WATERFALL", -"TYPE_SPRING", -"TYPE_GEYSER", -"TYPE_HOT_SPRING", -"TYPE_SEASONAL_RIVER", -"TYPE_WADI", -"TYPE_ESTUARY", -"TYPE_WETLAND", -"TYPE_WATER_NAVIGATION", -"TYPE_FORD", -"TYPE_CANAL", -"TYPE_HARBOR", -"TYPE_CHANNEL", -"TYPE_REEF", -"TYPE_REEF_FLAT", -"TYPE_REEF_GROWTH", -"TYPE_REEF_EXTENT", -"TYPE_REEF_ROCK_SUBMERGED", -"TYPE_IRRIGATION", -"TYPE_DAM", -"TYPE_DRINKING_WATER", -"TYPE_CURRENT", -"TYPE_WATERING_HOLE", -"TYPE_TECTONIC", -"TYPE_WATERING_HOLE_DEPRECATED", -"TYPE_VOLCANO", -"TYPE_LAVA_FIELD", -"TYPE_FISSURE", -"TYPE_FAULT", -"TYPE_LAND_MASS", -"TYPE_CONTINENT", -"TYPE_ISLAND", -"TYPE_ATOLL", -"TYPE_OCEAN_ROCK_EXPOSED", -"TYPE_CAY", -"TYPE_PENINSULA", -"TYPE_ISTHMUS", -"TYPE_ELEVATED", -"TYPE_PEAK", -"TYPE_NUNATAK", -"TYPE_SPUR", -"TYPE_PASS", -"TYPE_PLATEAU", -"TYPE_RIDGE", -"TYPE_RAVINE", -"TYPE_CRATER", -"TYPE_KARST", -"TYPE_CLIFF", -"TYPE_VISTA", -"TYPE_DIGITAL_ELEVATION_MODEL", -"TYPE_UPLAND", -"TYPE_TERRACE", -"TYPE_SLOPE", -"TYPE_CONTOUR_LINE", -"TYPE_PAN", -"TYPE_UNSTABLE_HILLSIDE", -"TYPE_MOUNTAIN_RANGE", -"TYPE_UNDERSEA", -"TYPE_SUBMARINE_SEAMOUNT", -"TYPE_SUBMARINE_RIDGE", -"TYPE_SUBMARINE_GAP", -"TYPE_SUBMARINE_PLATEAU", -"TYPE_SUBMARINE_DEEP", -"TYPE_SUBMARINE_VALLEY", -"TYPE_SUBMARINE_BASIN", -"TYPE_SUBMARINE_SLOPE", -"TYPE_SUBMARINE_CLIFF", -"TYPE_SUBMARINE_PLAIN", -"TYPE_SUBMARINE_FRACTURE_ZONE", -"TYPE_CAVE", -"TYPE_ROCK", -"TYPE_ARCHIPELAGO", -"TYPE_POSTAL", -"TYPE_POSTAL_CODE", -"TYPE_POSTAL_CODE_PREFIX", -"TYPE_PREMISE", -"TYPE_SUB_PREMISE", -"TYPE_SUITE", -"TYPE_POST_TOWN", -"TYPE_POSTAL_ROUND", -"TYPE_META_FEATURE", -"TYPE_DATA_SOURCE", -"TYPE_LOCALE", -"TYPE_TIMEZONE", -"TYPE_BUSINESS_CHAIN", -"TYPE_PHONE_NUMBER_PREFIX", -"TYPE_PHONE_NUMBER_AREA_CODE", -"TYPE_BUSINESS_CORRIDOR", -"TYPE_ADDRESS_TEMPLATE", -"TYPE_TRANSIT_AGENCY", -"TYPE_FUTURE_GEOMETRY", -"TYPE_EVENT", -"TYPE_EARTHQUAKE", -"TYPE_HURRICANE", -"TYPE_WEATHER_CONDITION", -"TYPE_TRANSIENT", -"TYPE_ENTRANCE", -"TYPE_CARTOGRAPHIC", -"TYPE_HIGH_TENSION", -"TYPE_SKI_TRAIL", -"TYPE_SKI_LIFT", -"TYPE_SKI_BOUNDARY", -"TYPE_WATERSHED_BOUNDARY", -"TYPE_TARMAC", -"TYPE_WALL", -"TYPE_PICNIC_AREA", -"TYPE_PLAY_GROUND", -"TYPE_TRAIL_HEAD", -"TYPE_GOLF_TEEING_GROUND", -"TYPE_GOLF_PUTTING_GREEN", -"TYPE_GOLF_ROUGH", -"TYPE_GOLF_SAND_BUNKER", -"TYPE_GOLF_FAIRWAY", -"TYPE_GOLF_HOLE", -"TYPE_DEPRECATED_GOLF_SHOP", -"TYPE_CAMPING_SITE", -"TYPE_DESIGNATED_BARBECUE_PIT", -"TYPE_DESIGNATED_COOKING_AREA", -"TYPE_CAMPFIRE_PIT", -"TYPE_WATER_FOUNTAIN", -"TYPE_LITTER_RECEPTACLE", -"TYPE_LOCKER_AREA", -"TYPE_ANIMAL_ENCLOSURE", -"TYPE_CARTOGRAPHIC_LINE", -"TYPE_ESTABLISHMENT", -"TYPE_ESTABLISHMENT_GROUNDS", -"TYPE_ESTABLISHMENT_BUILDING", -"TYPE_ESTABLISHMENT_POI", -"TYPE_ESTABLISHMENT_SERVICE", -"TYPE_CELESTIAL", -"TYPE_ROAD_MONITOR", -"TYPE_PUBLIC_SPACES_AND_MONUMENTS", -"TYPE_STATUE", -"TYPE_TOWN_SQUARE", -"TYPE_LEVEL", -"TYPE_COMPOUND", -"TYPE_COMPOUND_GROUNDS", -"TYPE_COMPOUND_BUILDING", -"TYPE_COMPOUND_SECTION", -"TYPE_TERMINAL_POINT", -"TYPE_REGULATED_AREA", -"TYPE_LOGICAL_BORDER", -"TYPE_GCONCEPT_ONLY", -"TYPE_DO_NOT_USE_RESERVED_TO_CATCH_GENERATED_FILES", -"TYPE_UNKNOWN" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -false, -true, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -true, -true, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false -], -"enumDescriptions": [ -"ABSTRACT", -"ABSTRACT", -"A route is any section of road (or rails, etc.) that has a name. This includes city streets as well as highways. Road segments can belong to multiple routes (e.g. El Camino, CA-82).", -"DEPRECATED", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"A designated bicycle route, whose segments may consist of any combination of bicycle paths, bicycle lanes, or city streets.", -"A designated trail, which may consist of paved walkways, dirt paths, fire road, streets or highways, etc.", -"ABSTRACT", -"", -"Railroads use several different incompatible track types.", -"", -"", -"", -"", -"", -"", -"", -"", -"Tracks for streetcars, cable-cars, etc. Ferries are services that are part of the road network but are not roads. They typically involve fares and scheduled departure times.", -"ABSTRACT", -"The vast majority of ferries are ferry boats.", -"Also called a \"car transport\", a ferry train is a rail service that carries passengers and their vehicles across undrivable terrain. The Channel Tunnel (\"Chunnel\") is the most famous example, but they are also common in the Alps where they connect neighboring valleys otherwise separated by impassable mountains.", -"Any plausible 1-dimensional path through a 2+ dimensional space, for the purposes of making graph-search-based routing possible. Such segments can be used to model paths through parking lots, squares, floors of buildings and other areas.", -"An intersection consists of a collection of segments that terminate at the same location. This is topological definition: it may not match what a typical user would think of as an \"intersection\". See TYPE_INTERSECTION_GROUP, below, for more information. Each segment terminating at an intersection has an \"endpoint type\" that specifies how that segment is terminated: stop sign, yield sign, three-way light, etc.", -"ABSTRACT", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"A transit line is a collection of transit legs, associated with some invariant properties of the trips that run over the legs. See also transitline.proto", -"TYPE_TRANSIT_AGENCY was moved to 0xC91. This deprecated enum value still exists for debugging purposes only.", -"DEPRECATED", -"ABSTRACT", -"Road sign features have names, point geometry, etc. They also have segment_path data (see below) which lists the segments that refer to the sign. See segment.proto for the reference from the segment to the road sign.", -"Our TYPE_INTERSECTION feature, above, models the point where one or more segments terminate. This is topological definition: it may not match what a typical user would think of as an \"intersection\". Consider the intersections where Hayes, Market, Larkin, and 9th Street meet near (37.77765, -122.41638) in San Francisco. Most people would probably consider this a single feature, even though we model it as four separate TYPE_INTERSECTION features. This TYPE_INTERSECTION_GROUP is used to model the user's concept of a complex intersection.", -"RESERVED", -"A restriction group describes a set of segment restrictions that belong together and have a name or an associated event. See also restriction_group.proto", -"DEPRECATED", -"ABSTRACT", -"", -"ABSTRACT", -"", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"e.g. Silicon Valley", -"A reservation is a region collectively held or governed by indigenous people and officially recognized by the country\u2019s government at the federal or state level. A reservation may be fully contained within an administrative feature or partially contained within two or more. These regions are referred to by different categorical names depending on country and even by state, including but not limited to: \u201cIndian Reservations\u201d, \u201cIndian Reserves\u201d, \u201cLand Claim Settlement Lands\u201d, \u201cIndian Lands\u201d, \u201cTreaty Lands\u201d, \u201cIndigenous Territories\u201d, etc. A reservation is not a historic indigenous territory boundary or a region which has applied for land rights but has not yet received official recognition.", -"", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"An entity widely considered to be a city, that may itself be made up of smaller political entities, some of which are cities/towns/villages themselves. For example, the colloquial view of Sydney, Australia actually comprises many smaller cities, but is regarded as a city itself. This type is not suitable for modeling official metro-/micropolitan or other statistical areas.", -"ABSTRACT", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"Designated Market Areas (or DMAs) are used by marketing and ratings companies (such as the Nielsen Media Research company) to describe geographical regions (such as the greater New York metropolitan area) that are covered by a set of television stations. (See http://www.schooldata.com/pdfs/DMA.pdf) In the United States, DMAs should have a DMA numeric ID name, tagged with the FLAG_DESIGNATED_MARKET_AREA_ID flag.", -"", -"", -"Eventually we'll have more data for disputed areas (e.g., who makes claims on the area, who has de facto control, etc.). For the moment, we just define a type so we can simply mark areas as disputed.", -"Boundaries representing the jurisdiction of a particular police station.", -"An area used for aggregating statistical data, eg, a census region. Note that TYPE_STATISTICAL_AREA has a third nibble so we can add an abstract parent above it later if need be at 0x2E1 (and rename TYPE_STATISTICAL_AREA as TYPE_STATISTICAL_AREA1).", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"A line representing the boundary between two features. See border.proto for details.", -"DEPRECATED", -"An association of a point with an address, with no other information.", -"ABSTRACT", -"Expanses of land that share common surface attributes. These areas would look more or less uniform from a high altitude.", -"", -"", -"", -"", -"", -"", -"Terrain that looks populated.", -"Terrain that is covered in vegetation.", -"", -"", -"", -"", -"", -"", -"A flat expanse of salt left by the evaporation of a body of salt water.", -"Features can be TYPE_WATER if we don't have enough information to properly type the body of water. TYPE_WATER is also used as the type for child features that compose a TYPE_RIVER feature.", -"One of the large salt-water bodies that covers most of the globe.", -"An ocean subdivision formed by a coastal indentation. Includes coves and gulfs.", -"An open body of water formed by a slight coastal indentation.", -"", -"An ocean subdivision more or less confined by land and islands.", -"A long narrow ocean subdivision. Includes sounds.", -"", -"", -"An inland body of standing water.", -"A lake that dries up part of the year.", -"An artificial body of water, possibly created by a dam, often used for irrigation or house use.", -"", -"An inland body of moving water, or parts associated with it in which there is little or no current (backwater).", -"", -"A branch which flows away from the main river. Includes deltas.", -"A place where two or more rivers join.", -"", -"A place where ground water flows naturally out of the ground.", -"", -"", -"A river that dries up part of the year.", -"A dry riverbed that occasionally receives flashfloods.", -"A place at the end of a river where fresh and salt water mix. Includes tidal creeks and limans.", -"Land that is usually flooded. Includes bogs, marshes, flats, moors, and swamps.", -"", -"A shallow place where water may be waded through.", -"A narrow passage used by boats. Normally artificial.", -"A deep place near a shore where ships commonly drop anchor.", -"A deep part in a body of water that is suitable for navigation. Includes narrows.", -"Rocks, coral, sandbars, or other features beneath the surface of the water that pose a hazard to passing ships. Includes shoals.", -"A relatively shallow zone of the back reef located closest to the shore, that may be exposed at low tide.", -"A small section of rocks, coral, sandbars, or other features beneath the surface of the water that forms part of a reef.", -"The full extent of the reef complex.", -"A submerged rock in the water.", -"Man-made (and sometimes natural) channels used to move water. This type was used for both dam structures and water that is hold back by dams. We should use TYPE_COMPOUND_BUILDING for dam structures and TYPE_RESERVOIR for water.", -"DEPRECATED", -"", -"Includes overfalls.", -"A natural depression filled with water where animals come to drink.", -"This type is incorrectly under TYPE_TECTONIC instead of TYPE_WATER. This was a mistake and is now fixed. See TYPE_WATERING_HOLE for the replacement.", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"", -"An exposed rock in the water.", -"A small, low-elevation, sandy island formed on the surface of coral reefs", -"A stretch of land projecting into water. Includes capes and spits.", -"A strip of land connecting two larger land masses, such as continents.", -"Features that are notable for being high (or low), or for having sudden changes in elevation. These features might have an \"elevation\" extension to specify the actual elevation. See ElevationProto for more information.", -"Elevations that have a distinctive peak.", -"A peak or ridge of a mountain that extends through a glacier.", -"A subsidiary peak of a mountain.", -"A route over an otherwise difficult to traverse feature. Includes saddle.", -"Elevations that are flat on top. Includes mesas and buttes.", -"A ridge is a geographical feature consisting of a chain of mountains or hills that form a continuous elevated crest with a single ridgeline for some distance.", -"Steep declines usually carved by erosion. Includes valleys, canyons, ditches, and gorges.", -"Depressions causes by impact, explosion, and sometimes sink-holes.", -"Topography formed on limestone and gypsum by dissolution with sinkholes, caves, etc.", -"A vertical or nearly vertical slope. Includes escarpments.", -"An elevated place that is notable for having a good view. Raster digital elevation data. This is not a type to be used by providers or consumed by clients.", -"RESERVED", -"Land along streams higher than the alluvial plain or stream terrace.", -"", -"Land not so steep as a cliff, but changing elevation. Includes slides.", -"All the points on the polygon are at the same elevation.", -"A near-level shallow, natural depression or basin, usually containing an intermittent lake, pond, or pool.", -"", -"A series of mountains or hills ranged in a line and connected by high ground. Mountain ranges usually consist of many smaller ridges. For example, the Himalayas, the Andes. the Alps, etc.", -"Features that are notable for being high (or low), or for having sudden changes in elevation. These features might have an \"elevation\" extension to specify the actual elevation. See ElevationProto for more information.", -"includes peaks, ranges, and spurs", -"", -"includes saddles", -"", -"", -"includes trenches and troughs", -"", -"", -"", -"", -"", -"Don't use 0xA7. Use 8 bits for additional types under TYPE_NATURAL_FEATURE, so we don't run out of space. The following are miscellaneous natural features that don't fit any of the categories above.", -"", -"A feature representing a group or chain of islands. ", -"ABSTRACT", -"This is the type for postal codes which are complete and independent enough that there should be a feature for them (e.g. US 5-digit ZIP codes). For even more detailed suffixes that further subdivide a postal code (such as the +4 component in US ZIP codes), store the information in a TYPE_POSTAL_CODE_SUFFIX address component. When a range or set of postal codes share the same geographical area, e.g. because a precise subdivision does not exist or this subdivision is unknown, this type is used for each individual postal code.", -"A prefix portion of a postal code which does not meet the requirements for TYPE_POSTAL_CODE, but which is useful to search for, for example UK outcodes.", -"DEPRECATED", -"DEPRECATED This is deprecated and we want to use TYPE_COMPOUND_SECTION instead.", -"DEPRECATED", -"The term \"post town\" is used for a locality-like-entity that is only used for postal addresses.", -"DEPRECATED", -"ABSTRACT", -"Every data source used in constructing a data repository has a corresponding feature that provides more information about that data source. The extra information is stored in the optional data_source field below.", -"A locale feature provides region specific conventions such as preferred language and formatting details for time, date, and currency values. Locales aren't necessary defined by physical geographic features, so they are classified as meta-features.", -"A timezone feature is used to specify the region covering an international timezone. When a point is covered by multiple timezone features, the most specific one can be used to compute the local time at this point. Most specific implies a much smaller region or the one that is closer to the center. A feature's timezone can be specified in the repeated related_timezone field.", -"A business chain feature is used to represent a chain, e.g. Starbucks, McDonald's, etc. Other features representing specific stores/franchises of this chain may refer to one such feature via RELATION_MEMBER_OF_CHAIN. This is not strictly reserved to commercial chains but can also be used to model organizations such as the Red Cross or the United Nations.", -"A phone number prefix feature is used to specify the region where phone numbers (typically fixed-line numbers) must begin with a certain prefix. Any phone number prefix down to any level of granularity could be represented by this type.", -"A phone number area code is a prefix which also coincides with the area code, or national destination code, of a particular region.", -"A Business Corridor is a dense cluster of semantically similar establishments. TYPE_BUSINESS_CORRIDOR features are distinguished from TYPE_COLLOQUIAL_AREA features because the corridors are not under the political hierarchy, are allowed to be nameless, and may not correspond to well-known real world locations. For more details, see go/geo-corridors-schema.", -"An address template feature provides region-specific conventions for structuring addresses. These features aren't necessarily defined by physical geographic features, so they are classified as meta-features.", -"A transit agency operates a number of lines, typically all in the same city, region or country. See also transitagency.proto", -"A feature whose geometry is planned to replace the geometry on another feature.", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"RESERVED", -"A portal of entry or exit to another feature. Examples: - Subway station entrance. - Parking lot entrance.", -"Cartographic features are used to capture real-world objects for which there is no current desire to model any specific attributes. These are only useful to make the map tiles look pretty.", -"DEPRECATED", -"Also see skitrail.proto", -"Also see skilift.proto", -"Also see skiboundary.proto", -"", -"Starting with TYPE_TARMAC, we use longer IDs, so that we can expand the number of feature types under TYPE_CARTOGRAPHIC.", -"Use TYPE_COMPOUND_GROUND and appropriate gcids for the next two.", -"DEPRECATED", -"DEPRECATED", -"", -"Sub-types within a golf course.", -"", -"", -"", -"", -"Use TYPE_ESTABLISHMENT_POI and gcid:golf_shop for golf shops instead.", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"Subtype within a zoo - a cage or fenced-off or otherwise delineated area containing animals.", -"A line for a cartographic detail. For example the international date line. Such features should have polyline geometry.", -"ABSTRACT This type is being replaced by TYPE_COMPOUND_GROUNDS. For further details, see go/compounds-v2", -"DEPRECATED This type has been replaced by TYPE_COMPOUND_BUILDING. For further details, see go/oyster-compounds", -"DEPRECATED", -"An establishment that has a physical location. Note that it *may* also have a service area (e.g. a restaurant that offers both dine-in and delivery). This type of business is also known as a \"hybrid\" Service Area Business. Establishment POIs can be referenced by TYPE_COMPOUND features using the RELATION_PRIMARILY_OCCUPIED_BY. This is the reciprocal relation of the RELATION_OCCUPIES.", -"NOTE(tcain): Using value 0xD441, since we could find ourselves with a need to differentiate service areas from online-only at this level in the future, but still benefit from being able to group those under a common parent, disjoint from TYPE_ESTABLISHMENT_POI.", -"The root of types of features that are in the sky, rather than on the earth. There will eventually be a hierarchy of types here.", -"Features responsible for monitoring traffic on roads (usually for speed). Includes cameras at particular points as well as monitors that cover larger spans. Features of this type should have a corresponding gcid that specifies the correct subtype (e.g. gcid:road_camera or gcid:speed_camera_zone). This type was originally named as TYPE_ROAD_CAMERA.", -"ABSTRACT", -"Note that this type does not distinguish the nature of the statue (religious, historical, memorial, tourist, ...).", -"Open space used for events, gathering, or as market-place.", -"A feature used to represent a logical level, e.g. floor.", -"ABSTRACT", -"e.g. campus, compound, parcel.", -"e.g. single family dwelling, office building.", -"e.g. suite, room, hallway, cubicle.", -"A terminal point represents a good location for a user to meet a taxi, ridesharing vehicle, or general driver.", -"An area controlled in some way by an authoritative source, such as a government-designated COVID containment zone or an area under government sanctions. Features of this type should have one or more gcids corresponding to their specific regulation, and client handling of these features may vary based on the type of regulation.", -"A grouping of TYPE_BORDER features (\"border segments\"), which together represent a border between two features of the same type.", -"A generic feature type for any geo-type that cannot be modeled under existing feature types. No new feature type should be created within feature proto after this type.", -"DEPRECATED", -"A feature of completely unknown type. This should only be used when absolutely necessary. One example in which this type is useful is in the Chinese importer, which must heuristically segment addresses into components - it often does not know what types to make those components. Please note that the Oyster address formatter does not currently support address components of TYPE_UNKNOWN well." -], -"type": "string" -} -}, -"type": "object" -}, -"CountryMetroNBFeature": { -"description": "A metro feature, keyed by NavBoost feature id V2. This can be a metro id, a boost, or extended in the future to add probabilities or weights.", -"id": "CountryMetroNBFeature", -"properties": { -"enclosingProvinceGeotoken": { -"description": "The enclosing_province_geotoken is a 32 bit fingerprint of the state encosing the (metro) id. MetroId's can span multiple states. Enclosing geotoken is filled in with the state name for disambiguation. ProvinceGeotoken field is different as it indicates an \"interest\". Format: 32 bit fingerprint(__state__country).", -"format": "uint32", -"type": "integer" -}, -"id": { -"description": "A 32 bit navboost v2 feature id encoding (country, language, metro). NavBoosterUtils class (google3/quality/navboost/nav_booster_utils.h) provides functions to decode this feature.", -"format": "uint32", -"type": "integer" -}, -"navboost": { -"description": "This is the multiplier to apply to the result for this locale & query. NOTE: This is for serving purposes only and should not be populated in the index.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"CountryProvinceGeotoken": { -"description": "A 32 bit fingerprint of a state level geotoken. The geotoken is in the following format: __state__country. These indicate a page is of interest to these states/regions of a country. The use of message is to enable easy additions of probabilities or weights per metro id in the future.", -"id": "CountryProvinceGeotoken", -"properties": { -"geotoken": { -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"CountrySalientCountry": { -"description": "Salient Countries is an estimated probability (salience) of a doc to be relevant to a country. On this message, countries are represented as int32 (the format of this data is defined in i18n/identifiers/stableinternalregionconverter.cc). Salience is a value in range [0.0 - 1.0] in which 1.0 represents a high likelihood to be relevant to the country", -"id": "CountrySalientCountry", -"properties": { -"compressedSalience": { -"format": "uint32", -"type": "integer" -}, -"countryCode": { -"format": "int32", -"type": "integer" -}, -"salience": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"CrawlerChangerateMultipleComponentDistribution": { -"description": "Proto contains parameters for a multiple component distributions, where each component has non-negative weight and the sum of component weights is 1.", -"id": "CrawlerChangerateMultipleComponentDistribution", -"properties": { -"components": { -"items": { -"$ref": "CrawlerChangerateSingleComponentDistribution" -}, -"type": "array" -} -}, -"type": "object" -}, -"CrawlerChangerateSingleComponentDistribution": { -"description": "Proto contains parameters of a single component distribution.", -"id": "CrawlerChangerateSingleComponentDistribution", -"properties": { -"logScaling": { -"description": "Scaling factor to ensure the approximated posterior to have the same scale as the product of prior and likelihood. This value is used to compute posterior weights. Uses log scale to provide a wider range. This field is for internal use only.", -"format": "float", -"type": "number" -}, -"type": { -"description": "The type indicates the type of the distribution.", -"enum": [ -"LOG_GAMMA", -"INV_GAMMA", -"GAMMA", -"LOG_NORMAL" -], -"enumDescriptions": [ -"Log-Gamma distribution, where it assumes the log of the values follow Gamma distribution. It uses extension gamma_params.", -"Inverse-Gamma distribution, where it assumes the inverse of values follow Gamma distribution. It uses extension gamma_params.", -"Gamma distribution. It uses extension gamma_params.", -"Log-Normal distribution. It uses extension log_normal_params." -], -"type": "string" -}, -"weight": { -"description": "The weight is only used in multiple component scenarios.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"CrawlerChangerateUrlChange": { -"description": "NEXT_TAG: 13", -"id": "CrawlerChangerateUrlChange", -"properties": { -"additionalChangesMerged": { -"description": "Duplicate UrlChanges crawled within a specified time range will be merged together. UrlChanges are considered duplicates if the simhash, simhash_is_trusted, simhash_v2, simhash_v2_is_trusted, and shingle_simhash are the same. additional_changes_merged indiciates the number of duplicate UrlChanges merged into this UrlChange.", -"format": "int32", -"type": "integer" -}, -"fractionalTileChange": { -"deprecated": true, -"description": "Deprecated fields. The fraction of tiles (0 to 1) that changed.", -"format": "double", -"type": "number" -}, -"interval": { -"description": "The length in seconds of the change.", -"format": "int32", -"type": "integer" -}, -"offDomainLinksChange": { -"deprecated": true, -"description": "Whether the content of the off-domain links changed.", -"type": "boolean" -}, -"offDomainLinksCount": { -"deprecated": true, -"description": "The new count of off-domain links, if they changed.", -"format": "int32", -"type": "integer" -}, -"onDomainLinksCount": { -"deprecated": true, -"description": "The new count of on-domain links, if the count changed.", -"format": "int32", -"type": "integer" -}, -"onDomainLinksCountChange": { -"deprecated": true, -"description": "Whether the number of on-domain links changed.", -"type": "boolean" -}, -"shingleSimhash": { -"$ref": "IndexingConverterShingleFingerprint", -"description": "The old simhash value obtained from shingles." -}, -"simhash": { -"description": "The simhash-v1 value. Use of simhash-v1 is deprecated, and newer UrlChange should only contain simhash-v2. During this transition period, UrlChange can contain either simhash or simhash_v2. It is possible that previous UrlChange only contain simhash-v1 and the next UrlChange only contain simhash-v2. In this case, we skip that interval in our changerate computation. [go/changerate-simhash-v2-migration]", -"format": "uint64", -"type": "string" -}, -"simhashIsTrusted": { -"description": "Whether the simhash-v1 should be trusted.", -"type": "boolean" -}, -"simhashV2": { -"description": "The simhash-v2 value.", -"format": "uint64", -"type": "string" -}, -"simhashV2IsTrusted": { -"description": "Whether the simhash-v2 value should be trusted.", -"type": "boolean" -} -}, -"type": "object" -}, -"CrawlerChangerateUrlChangerate": { -"description": "The next available field number is 22. To access the best estimate of change period, please use GetChangePeriod in predict-change-rate.h to select between this and other change period estimates below.", -"id": "CrawlerChangerateUrlChangerate", -"properties": { -"approximatedPosterior": { -"$ref": "CrawlerChangerateMultipleComponentDistribution", -"description": "The approximated posterior distribution." -}, -"averageChangeSignificance": { -"description": "The \"significance\" of the average change we saw of this document (from 0 to 1). Influenced by content changes. This can be used for prioritizing the crawl (higher significance first).", -"format": "double", -"type": "number" -}, -"changeperiod": { -"description": "//////////////////////////////////////////////////////////////////////////// The classic changerate estimation. //////////////////////////////////////////////////////////////////////////// The classic estimate of change period (in seconds). It is computed by inserted a \"fake\" change and no-change interval as a prior distribution. This field is generally not used and should NOT be accessed directly. See above for correct method for determining the change period estimate.", -"format": "int32", -"type": "integer" -}, -"confidence": { -"description": "The confidence (between 0 and 1) in the changeperiod guess.", -"format": "double", -"type": "number" -}, -"globalBasedChangePeriod": { -"description": "//////////////////////////////////////////////////////////////////////////// The changerate estimation based on the global prior. //////////////////////////////////////////////////////////////////////////// The global-based changeperiod. This is our estimate (in seconds) for the average time between changes. It is computed using the new prior method based on global_based_prior_period and the global_based_prior_strength specified below. This is used for computing pattern priors. Use pattern_based_change_period or changeperiod fields for all other purposes.", -"format": "int32", -"type": "integer" -}, -"globalBasedChangePeriodConfidence": { -"description": "The 'confidence' of the global-based changeperiod. This is the n-th root of the posterior evaluated at MAP point, where n is the number of history intervals. For now, it is hard to interpret the meaning of the absolute values of 'average' posterior cross different sets of data.", -"format": "double", -"type": "number" -}, -"globalBasedPriorPeriod": { -"description": "The 2 parameters below specify the prior employed in calculating the global_based_change_period. These values are precomputed through an offline analysis and specified via flags.", -"format": "double", -"type": "number" -}, -"globalBasedPriorStrength": { -"format": "double", -"type": "number" -}, -"lastChangeSignificance": { -"description": "The \"significance\" of the last change we saw of this document (from 0 to 1). Influenced by content changes, etc. This can be used for prioritizing the crawl (higher significance first).", -"format": "double", -"type": "number" -}, -"lastChanged": { -"description": "The last time (unix timestamp) we saw a changed copy of the document. Provided iff we have seen the page change.", -"format": "int32", -"type": "integer" -}, -"lastFetched": { -"description": "The last time (unix timestamp) we saw a fetched copy of the document.", -"format": "int32", -"type": "integer" -}, -"numIntervals": { -"description": "The number of intervals we've seen for this document (where an interval is two different versions).", -"format": "int32", -"type": "integer" -}, -"patternBasedChangePeriod": { -"description": "//////////////////////////////////////////////////////////////////////////// The changerate estimation based on the pattern prior. //////////////////////////////////////////////////////////////////////////// The pattern-based changeperiod. This is our estimate (in seconds) for the average time between changes. It is calculated based on the pattern_based_prior_period and pattern_based_prior_strength below. This quantity will eventually replace the old changeperiod calculation.", -"format": "int32", -"type": "integer" -}, -"patternBasedChangePeriodConfidence": { -"description": "The same as global_based_change_period_confidence, except it is computed using pattern based priors.", -"format": "double", -"type": "number" -}, -"patternBasedLowerChangePeriod": { -"description": "The lower edge of a confidence interval for the pattern-based change period.", -"format": "int32", -"type": "integer" -}, -"patternBasedPriorPeriod": { -"description": "The 2 parameters below specify the prior employed in calculating the pattern_based_change_period. These values are calculated in a separate process and looked up based on the URL pattern.", -"format": "double", -"type": "number" -}, -"patternBasedPriorStrength": { -"format": "double", -"type": "number" -}, -"patternChangePeriodVersion": { -"description": "The version number of the algorithm, refer to ChangePeriodVersion for more information.", -"format": "int32", -"type": "integer" -}, -"type": { -"description": "//////////////////////////////////////////////////////////////////////////// Basic information of a document. //////////////////////////////////////////////////////////////////////////// The type of the document determined by crawl histories, refer to TYPE for more information.", -"format": "int32", -"type": "integer" -}, -"ugcChangePeriod": { -"description": "//////////////////////////////////////////////////////////////////////////// The UGC changerate estimation. //////////////////////////////////////////////////////////////////////////// Information on change period generated from user generated content (UGC) change history.", -"format": "int32", -"type": "integer" -}, -"ugcChangePeriodConfidence": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"CrawlerChangerateUrlHistory": { -"id": "CrawlerChangerateUrlHistory", -"properties": { -"change": { -"description": "All the changes we've seen for this URL.", -"items": { -"$ref": "CrawlerChangerateUrlChange" -}, -"type": "array" -}, -"latestVersion": { -"$ref": "CrawlerChangerateUrlVersion", -"description": "The latest version we've seen." -}, -"url": { -"description": "This field in only set in 'url_history' column of Union repository to avoid having to read CompositeDocs.", -"type": "string" -} -}, -"type": "object" -}, -"CrawlerChangerateUrlVersion": { -"description": "NEXT_TAG: 15", -"id": "CrawlerChangerateUrlVersion", -"properties": { -"additionalChangesMerged": { -"description": "Same as the field in UrlChange. This allows us to merge identical UrlVersions into a single UrlVersion.", -"format": "int32", -"type": "integer" -}, -"contentType": { -"description": "The content type of the page.", -"format": "int32", -"type": "integer" -}, -"isImsNotModified": { -"description": "Whether this is an IMS response (a 304, not modified).", -"type": "boolean" -}, -"lastModified": { -"description": "The date from the LastModified header, if present.", -"format": "int32", -"type": "integer" -}, -"shingleSimhash": { -"$ref": "IndexingConverterShingleFingerprint", -"description": "The simhash value obtained from shingles." -}, -"simhash": { -"deprecated": true, -"description": "The simhash-v1 value. The simhash-v1 is now deprecated and new UrlVersions should only populate simhash-v2. During migration phase from using simhash-v1 to simhash-v2, it is possible that previous UrlChange only contain simhash-v1 and the next UrlChange / UrlVersion could only contain simhash-v2. In this case, we skip that interval in our changerate computation. [go/changerate-simhash-v2-migration]", -"format": "uint64", -"type": "string" -}, -"simhashIsTrusted": { -"deprecated": true, -"description": "Whether the simhash-v1 should be trusted.", -"type": "boolean" -}, -"simhashV2": { -"description": "The simhash-v2 value.", -"format": "uint64", -"type": "string" -}, -"simhashV2IsTrusted": { -"description": "Whether the simhash-v2 value should be trusted.", -"type": "boolean" -}, -"timestamp": { -"description": "The timestamp we crawled the page.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"CrowdingPerDocData": { -"id": "CrowdingPerDocData", -"properties": { -"newscluster": { -"items": { -"$ref": "CrowdingPerDocDataNewsCluster" -}, -"type": "array" -} -}, -"type": "object" -}, -"CrowdingPerDocDataNewsCluster": { -"description": "For crowding in news we need to keep data about the last X clustering iterations around.", -"id": "CrowdingPerDocDataNewsCluster", -"properties": { -"ClusterId": { -"description": "Fingerprint combination of all urls in a cluster", -"format": "uint64", -"type": "string" -}, -"ClusterSize": { -"description": "This is the X in the \"and X related >>\" link on headlines and search results", -"format": "int32", -"type": "integer" -}, -"ClusterTimeStamp": { -"description": "When was this clustered (needed for keeping last X iterations around and discarding earlier ones)", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"DeepCropIndexing": { -"id": "DeepCropIndexing", -"properties": { -"cropBytes": { -"description": "Compact representation for indexing, see creatism::CropBitmap for details on the packing format.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"DeepCropPixels": { -"id": "DeepCropPixels", -"properties": { -"x0": { -"description": "Pixels version of the DeepCropIndexing bytes, this corresponds to the crop box for a given image (based input image size and desired aspect ratio).", -"format": "uint32", -"type": "integer" -}, -"x1": { -"format": "uint32", -"type": "integer" -}, -"y0": { -"format": "uint32", -"type": "integer" -}, -"y1": { -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"DocProperties": { -"description": "NOTE: In segindexer, the docproperties of a document may be reused from a previous cycle if its content is not changed. If you add a new field to DocProperties, make sure it is taken care (i.e., gets copied from a previous cycle to the current document) in CDocProperties::EndDocument().", -"id": "DocProperties", -"properties": { -"avgTermWeight": { -"description": "The average weighted font size of a term in the doc body", -"format": "int32", -"type": "integer" -}, -"badTitle": { -"description": "Missing or meaningless title", -"type": "boolean" -}, -"badtitleinfo": { -"items": { -"$ref": "DocPropertiesBadTitleInfo" -}, -"type": "array" -}, -"languages": { -"description": "A Language enum value. See: go/language-enum", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"leadingtext": { -"$ref": "SnippetsLeadingtextLeadingTextInfo", -"description": "Leading text information generated by google3/quality/snippets/leadingtext/leadingtext-detector.cc" -}, -"numPunctuations": { -"format": "int32", -"type": "integer" -}, -"numTags": { -"format": "int32", -"type": "integer" -}, -"numTokens": { -"description": "The number of tokens, tags and punctuations in the tokenized contents. This is an approximation of the number of tokens, tags and punctuations we end up with in mustang, but is inexact since we drop some tokens in mustang and also truncate docs at a max cap.", -"format": "int32", -"type": "integer" -}, -"proseRestrict": { -"description": "The restricts for CSE structured search.", -"items": { -"type": "string" -}, -"type": "array" -}, -"restricts": { -"items": { -"type": "string" -}, -"type": "array" -}, -"timestamp": { -"description": "The time CDocProperties::StartDocument() is called, encoded as seconds past the epoch (Jan 1, 1970). This value is always refreshed and not reused.", -"format": "int64", -"type": "string" -}, -"title": { -"description": "Extracted from the title tag of the content. This is typically extracted by TitleMetaCollector defined at google3/segindexer/title-meta-collector.h. Please see its documentation for the format and other caveats.", -"type": "string" -} -}, -"type": "object" -}, -"DocPropertiesBadTitleInfo": { -"description": "Bad title information.", -"id": "DocPropertiesBadTitleInfo", -"properties": { -"score": { -"format": "float", -"type": "number" -}, -"type": { -"enum": [ -"NOT_BAD", -"MISSING_OR_MEANINGLESS", -"BOILERPLATE", -"FOREIGN", -"UNREADABLE", -"NAKED", -"NO_QUERY_SUPPORT", -"NO_SITE_INFO" -], -"enumDescriptions": [ -"", -"An empty string, or a bad title such as \"Untitled\". See quality_titles::bad_titles::BadTitleDetector for more details.", -"Most of the title is marked as boilerplate regions. See boilerplate::BoilerplateTitleScorer for more details.", -"The language of a title is different from the language of the document. See quality_snippets::foreigntitle::ForeignTitleScorer for more details.", -"Title has a low readability score. See quality_titles::ReadabilityScorer for more details.", -"Title needs more context (i.e. a site name), e.g. \"Photos\", \"Contact Us\". See quality_titles::bad_titles::BadTitleDetector for more details.", -"Title contains no navboost query. See quality_titles::AlternativeTitlesGenerator for more details.", -"Title has no site title information. See quality_titles::AlternativeTitlesGenerator for more details." -], -"type": "string" -} -}, -"type": "object" -}, -"DrishtiCompressedFeature": { -"description": "Protocol buffer for storing compressed feature.", -"id": "DrishtiCompressedFeature", -"properties": { -"featureName": { -"enum": [ -"UNKNOWN", -"STARBURST_V4", -"RESNETISH_V3" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"inRangeBitstream": { -"description": "in_range_bitstream is the string produced by range coder, while out_of_range_bitstream corresponds to the overflow stream, which is used whenever a quantized value is out of range. See https://cs.corp.google.com/piper///depot/google3/research/vision/piedpiper/brain/python/layers/entropy_models.py?l=225&cl=234825412", -"format": "byte", -"type": "string" -}, -"outOfRangeBitstream": { -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"DrishtiDenseFeatureData": { -"id": "DrishtiDenseFeatureData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"value": { -"description": "Dense data.", -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiDenseTokenData": { -"description": "Protocol buffer for storing dense token data.", -"id": "DrishtiDenseTokenData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"value": { -"items": { -"format": "uint32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiFeatureExtra": { -"id": "DrishtiFeatureExtra", -"properties": {}, -"type": "object" -}, -"DrishtiFeatureSetData": { -"id": "DrishtiFeatureSetData", -"properties": { -"extra": { -"description": "Extra information for this particular FeatureSetData (example timestamp of this frame in the video). (Almost never used).", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"feature": { -"description": "The following can have multiple FeatureSetElement(s) Each of these FeatureSetElement correspond to the various feature groups. One concrete example is the way these features are generated - example audio, video or OCR.", -"items": { -"$ref": "DrishtiFeatureSetDataFeatureSetElement" -}, -"type": "array" -}, -"label": { -"description": "Labels for this particular FeatureSetData. (Almost never used). Only interesting when you have (for example) frame level labels.", -"items": { -"$ref": "DrishtiLabelSetElement" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiFeatureSetDataFeatureSetElement": { -"description": "A FeatureSetElement stores the features coming from a single group.", -"id": "DrishtiFeatureSetDataFeatureSetElement", -"properties": { -"compressed": { -"$ref": "DrishtiCompressedFeature" -}, -"dense": { -"$ref": "DrishtiDenseFeatureData" -}, -"indexed": { -"$ref": "DrishtiIndexedFeatureData" -}, -"name": { -"description": "A name for the feature group: example \"AUDIO\", \"VIDEO\", \"OCR\", etc.", -"type": "string" -}, -"quantized": { -"$ref": "DrishtiQuantizedDenseFeatureData" -}, -"quantizedByteDense": { -"$ref": "DrishtiQuantizedByteDenseFeatureData" -}, -"quantizedByteIndexed": { -"$ref": "DrishtiQuantizedByteIndexedFeatureData" -}, -"sparse": { -"$ref": "DrishtiSparseFeatureData" -}, -"token": { -"$ref": "DrishtiDenseTokenData" -} -}, -"type": "object" -}, -"DrishtiFeatureSetDataSequence": { -"description": "This represents a sequence (ordered) of FeatureSetData elements.", -"id": "DrishtiFeatureSetDataSequence", -"properties": { -"element": { -"description": "FeatureSetData contains the features. In most scenarios, you only have one element. However, multiple elements are appropriate in case of videos where each element may correspond to a frame in the video.", -"items": { -"$ref": "DrishtiFeatureSetData" -}, -"type": "array" -}, -"extra": { -"description": "Some extra information about this FeatureSetDataSequence. (Almost never used).", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"label": { -"description": "Global (video-level) labels. In most cases, you only have one LabelSetElement. All the labels will be stored in this single LabelSetElement. Scenarios where you may have multiple LabelSetElement(s) is (for example) when you want to differentiate the labels into various sub-groups - eg, central vs relevant, kg-ids vs queries, etc.", -"items": { -"$ref": "DrishtiLabelSetElement" -}, -"type": "array" -}, -"timestamp": { -"description": "If set, must be same length as element. Each entry is the timestamp in microseconds where the FeatureSetData element was extracted.", -"items": { -"format": "int64", -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiIndexedFeatureData": { -"id": "DrishtiIndexedFeatureData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as index and value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"index": { -"description": "Indexed data. index and value must be of the same length.", -"items": { -"format": "uint64", -"type": "string" -}, -"type": "array" -}, -"value": { -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiLabelSetData": { -"id": "DrishtiLabelSetData", -"properties": { -"extra": { -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"targetClass": { -"items": { -"format": "uint64", -"type": "string" -}, -"type": "array" -}, -"targetClassName": { -"items": { -"type": "string" -}, -"type": "array" -}, -"targetValue": { -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -}, -"targetWeight": { -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -}, -"weight": { -"description": "Weight assigned to this set of labels.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"DrishtiLabelSetElement": { -"id": "DrishtiLabelSetElement", -"properties": { -"label": { -"$ref": "DrishtiLabelSetData" -}, -"name": { -"type": "string" -} -}, -"type": "object" -}, -"DrishtiQuantizedByteDenseFeatureData": { -"description": "Proto message to store quantized dense feature data.", -"id": "DrishtiQuantizedByteDenseFeatureData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"value": { -"description": "Quantized values for the feature.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"DrishtiQuantizedByteIndexedFeatureData": { -"description": "Proto message to store quantized indexed feature data.", -"id": "DrishtiQuantizedByteIndexedFeatureData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"index": { -"items": { -"format": "uint64", -"type": "string" -}, -"type": "array" -}, -"value": { -"description": "Quantized values for the feature.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"DrishtiQuantizedDenseFeatureData": { -"id": "DrishtiQuantizedDenseFeatureData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"value": { -"description": "Quantized Dense data.", -"items": { -"format": "byte", -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiSparseFeatureData": { -"id": "DrishtiSparseFeatureData", -"properties": { -"extra": { -"description": "If extra is present it must be of the same length as label and value.", -"items": { -"$ref": "DrishtiFeatureExtra" -}, -"type": "array" -}, -"generalExtra": { -"$ref": "DrishtiFeatureExtra" -}, -"label": { -"description": "Indexed data. label and value must be of the same length.", -"items": { -"type": "string" -}, -"type": "array" -}, -"value": { -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -} -}, -"type": "object" -}, -"DrishtiVesperEncodedThumbnail": { -"description": "The attributes of encoded thumbnail images. Next id: 10.", -"id": "DrishtiVesperEncodedThumbnail", -"properties": { -"byteSize": { -"description": "Image size in bytes. Meaningful when contents are not stored inline (e.g., via `image_blob_id`).", -"format": "int64", -"type": "string" -}, -"crc32c": { -"description": "CRC-32 checksum of the image bytes. Can be used for data integrity check.", -"format": "uint32", -"type": "integer" -}, -"encodingQuality": { -"description": "JPEG/WEBP quality factor in range [0,100].", -"format": "int32", -"type": "integer" -}, -"encodingType": { -"description": "Image encoding type.", -"enum": [ -"UNKNOWN", -"JPEG", -"WEBP", -"PNG" -], -"enumDescriptions": [ -"", -"JPEG encoded.", -"WEBP encoded.", -"PNG encoded." -], -"type": "string" -}, -"height": { -"format": "int32", -"type": "integer" -}, -"imageBlobId": { -"description": "The Blob ID of the thumbnail image in the Blobstore. We recommend absolute IDs with universe prefix if this field is passed across systems. The owner of this blob is also responsible for data Wipeout compliance.", -"type": "string" -}, -"imageBytes": { -"description": "Encoded thumbnail bytes. Prefer this over `image_string` as we are not supposed to store image bytes in a proto string field.", -"format": "byte", -"type": "string" -}, -"imageString": { -"deprecated": true, -"description": "Please migrate to `image_bytes`.", -"type": "string" -}, -"width": { -"description": "Thumbnail resolution.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"DrishtiVesperMovingThumbnail": { -"description": "LINT: LEGACY_NAMES MovingThumbnail is defined as a short video clip that represents the whole video content. Next id: 17.", -"id": "DrishtiVesperMovingThumbnail", -"properties": { -"beginTimestampMs": { -"description": "The begin timestamp in milliseconds.", -"format": "int32", -"type": "integer" -}, -"durationMs": { -"description": "The duration of the moving thumbnail in milliseconds. Note that the duration may not be the difference between begin_timestamp_ms and end_timestamp_ms, esp when the moving thumbnail covers multiple clips from the video.", -"format": "int32", -"type": "integer" -}, -"encodedGifAnimation": { -"format": "byte", -"type": "string" -}, -"encodedVideoString": { -"description": "The encoded video string.", -"format": "byte", -"type": "string" -}, -"encodedWebpAnimation": { -"description": "The encoded WebP animation.", -"format": "byte", -"type": "string" -}, -"endTimestampMs": { -"description": "The end timestamp in milliseconds.", -"format": "int32", -"type": "integer" -}, -"height": { -"description": "Pixel height of the moving thumbnail.", -"format": "int32", -"type": "integer" -}, -"id": { -"description": "MovingThumbnail id (e.g., the video id).", -"type": "string" -}, -"movingThumbnailerVersion": { -"description": "If set, this is the algorithm version used to generate this moving thumbnail.", -"enum": [ -"V0", -"V1", -"DEPRECATED_V2", -"DEPRECATED_V3", -"V4", -"V5", -"V6", -"SHOPPING_PREVIEW_V0", -"SHORT_PREVIEW_V0", -"LIVE_MOVING_THUMBNAILER", -"MANUAL" -], -"enumDescriptions": [ -"Version 0 and 1 - Initial version with thumbnail quality, iconic face and motion scores.", -"", -"Version 2 - Version 1 with default static thumbnail matching, title matching, face matching and face clustering (Deprecated).", -"Version 3 - Version 2 with high resolution transcodes preferred (Deprecated).", -"Version 4 - Version 1 with default static thumbnail matching, title matching, and high resolution transcodes preferred.", -"Version 5 - Version 1 with title matching and high res transcodes preferred.", -"Version 6 - Version 1 with default static thumbnail matching, title matching, retention data and high resolution transcodes preferred.", -"Version used for Commerce Shopping Videos", -"Version used for YT Shorts.", -"Live moving thumbnailer (no scorers involved).", -"Manually generated." -], -"type": "string" -}, -"name": { -"description": "MovingThumbnail name.", -"enum": [ -"UNKNOWN", -"MQDEFAULT", -"MQDEFAULT_6S", -"LQDEFAULT_6S", -"MQ220P_5S", -"MQDEFAULT_6S_HIGHLIGHT", -"MQDEFAULT_6S_PRE_HIGHLIGHT", -"MQDEFAULT_6S_THIRD_HIGHLIGHT", -"MQDEFAULT_6S_ZOOM_IN", -"SD360P_6S_ZOOM_IN", -"MQDEFAULT_3S", -"MQDEFAULT_6S_480x270", -"MQDEFAULT_1S", -"MQ_SHORTS_PREVIEW", -"HQ_SHORTS_PREVIEW" -], -"enumDescriptions": [ -"", -"320x180, 10s", -"320x180, 6s", -"196x110, 6s", -"392x220, 5s", -"320x180, 6s, highlight", -"320x180, 6s, pre-highlight", -"(3s before highlight) 320x180, 6s, 3rd best highlight", -"320x180, 6s, central zoom-in version", -"640x360, 6s, central zoom-in version", -"320x180, 3s", -"480x270, 6s", -"Do not use this (deprecated)", -"360x360, original aspect ratio", -"480x480, original aspect ratio" -], -"type": "string" -}, -"score": { -"description": "The score of the moving thumbnail.", -"format": "float", -"type": "number" -}, -"scoreComponents": { -"$ref": "DrishtiVesperMovingThumbnailScoreComponents" -}, -"thumbnails": { -"description": "A set of single frame thumbnails in the MovingThumbnail.", -"items": { -"$ref": "DrishtiVesperThumbnail" -}, -"type": "array" -}, -"type": { -"description": "MovingThumbnail type.", -"enum": [ -"TYPE_UNKNOWN", -"AN_GIF", -"AN_WEBP", -"AN_MP4", -"AN_WEBM" -], -"enumDescriptions": [ -"", -"Animated gif.", -"Animated webp.", -"Animated mp4.", -"Animated webm." -], -"type": "string" -}, -"webpQualityLevel": { -"description": "The actual quality of the Webp animation. Note this value may not be equal to the quality value requested in the animation creator's options. This is because other requirements, such as the max file size, may force the creator to lower the actual quality value.", -"format": "float", -"type": "number" -}, -"width": { -"description": "Pixel width of the moving thumbnail.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"DrishtiVesperMovingThumbnailScoreComponents": { -"description": "Sum of individual score components within a moving thumbnail. Used as input for weight fitting.", -"id": "DrishtiVesperMovingThumbnailScoreComponents", -"properties": { -"audienceRewindRatioScore": { -"format": "float", -"type": "number" -}, -"iconicFaceScore": { -"format": "float", -"type": "number" -}, -"matchingScore": { -"format": "float", -"type": "number" -}, -"motionScore": { -"format": "float", -"type": "number" -}, -"titleMatchingScore": { -"format": "float", -"type": "number" -}, -"videoThumbQualityScore": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"DrishtiVesperThumbnail": { -"description": "The attributes of a video thumbnail.", -"id": "DrishtiVesperThumbnail", -"properties": { -"denseFeatures": { -"description": "Thumbnail dense features", -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -}, -"encodedImageString": { -"deprecated": true, -"description": "Thumbnail image as an encoded image. Deprecated, use encoded_thumbnails instead.", -"type": "string" -}, -"encodedImageStringSmall": { -"deprecated": true, -"description": "Thumbnail image as an encoded image with smaller resolution. Deprecated, use encoded_thumbnails instead.", -"type": "string" -}, -"encodedThumbnails": { -"description": "Encoded thumbnail images.", -"items": { -"$ref": "DrishtiVesperEncodedThumbnail" -}, -"type": "array" -}, -"id": { -"description": "Thumbnail id.", -"type": "string" -}, -"ocrText": { -"description": "Text in video thumbnails that was detected by OCR.", -"type": "string" -}, -"qualityScores": { -"description": "Thumbnail quality scores.", -"items": { -"$ref": "DrishtiVesperThumbnailQualityScore" -}, -"type": "array" -}, -"shouldUpdateDefaultThumbnail": { -"description": "If true, this thumbnail should update default thumbnail.", -"type": "boolean" -}, -"thumbnailerModelVersion": { -"description": "Thumbnailer Version.", -"enum": [ -"VERSION_UNKNOWN", -"VERSION_FIRST", -"VERSION_RANDOM", -"VERSION_4", -"VERSION_5", -"VERSION_6", -"VERSION_7", -"VERSION_8", -"VERSION_SHORTS_4", -"VERSION_SHORTS_5", -"VERSION_SHORTS_6", -"VERSION_STORY_4", -"VERSION_STORY_5", -"VERSION_MOVING_4", -"VERSION_MOVING_5", -"VERSION_MOVING_6", -"VERSION_MOVING_SHORTS_0", -"VERSION_MOVING_LIVE_0", -"VERSION_MOVING_MANUAL_0", -"VERSION_LITE_1", -"VERSION_CUSTOM", -"VERSION_REJECTED" -], -"enumDescriptions": [ -"", -"Use the first frame as thumbnail.", -"Randomly choose a frame as thumbnail.", -"Thumbnailer version 4.", -"Thumbnailer version 5.", -"Thumbnailer version 6 for violative thumbnail regeneration. Design: go/reduce-violative-thumb", -"Thumbnailer version 7.", -"Thumbnailer version 8 for violative thumbnail regeneration. Design: go/reduce-violative-thumb", -"Thumbnailer version 4 for shorts videos.", -"Thumbnailer version 5 for shorts videos.", -"Thumbnailer version 6 for shorts videos.", -"Thumbnailer version 4 for stories.", -"Thumbnailer version 5 for stories.", -"Moving thumbnailer version 4.", -"Moving thumbnailer version 5.", -"Moving thumbnailer version 6.", -"Moving thumbnailer version 0 for shorts.", -"Moving thumbnailer version 0 for live.", -"Moving thumbnail generated manually.", -"Lite Thumbnailer (previously known as FirstFrameThumbnailer)", -"Custom image (not generated). Likely client provided.", -"A gray fallback image for images that have been rejected." -], -"type": "string" -}, -"timestampMs": { -"description": "Thumbnail timestamp in milliseconds.", -"format": "int32", -"type": "integer" -}, -"type": { -"description": "Thumbnail type.", -"enum": [ -"UNKNOWN", -"MAIN_THUMB_CUSTOM", -"MAIN_THUMB_NON_CUSTOM", -"SHOT_THUMB", -"NUMBERED_THUMB", -"KEY_FRAME", -"FRAME", -"AUTO" -], -"enumDescriptions": [ -"", -"Customized main thumbnail uploaded from user.", -"Non-customized main thumbnail.", -"Shot thumbnail.", -"The numbered thumbnail, e.g. 1.jpg, 2.jpg, 3.jpg.", -"The iframe extracted from the media decoder.", -"The regular video frame.", -"This type indicates that the thumbnail is to use for thumbnail A/B test. In this case the id field will be used to indicate the control and experimental thumbnail." -], -"type": "string" -}, -"userReportedThumbnail": { -"$ref": "DrishtiVesperUserReportUserReportedThumbnail" -}, -"userReportedThumbnails": { -"description": "All user reported thumbnails of interest.", -"items": { -"$ref": "DrishtiVesperUserReportUserReportedThumbnail" -}, -"type": "array" -}, -"version": { -"description": "Thumbnail version, i.e., the unix time in seconds when the thumbnail was created.", -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"DrishtiVesperThumbnailQualityScore": { -"id": "DrishtiVesperThumbnailQualityScore", -"properties": { -"score": { -"format": "float", -"type": "number" -}, -"type": { -"enum": [ -"UNKNOWN", -"PHOTO_QUALITY", -"PAMIR_IMAGE_QUALITY", -"VIDEO_THUMB_QUALITY", -"SALIENCY", -"COMPLEXITY", -"SHARPNESS", -"CLOSE_UP", -"BEEHIVE_QUALITY", -"ICONIC_FACE", -"DUMMY", -"COLORFULNESS", -"MOTION", -"RETENTION_STATS", -"FACIAL_EXPRESSION", -"MATCHING", -"LUCKYSHOT_SHARPNESS", -"SINGLE_FACE_MODULE", -"TITLE_MATCHING", -"FACE_CLUSTERING", -"FACE_MATCHING", -"RACY_SCORE", -"NON_RACY_SCORE", -"SALIENCY_COVERAGE", -"AUDIENCE_WATCH_DATA", -"AUDIENCE_REWATCH_DATA", -"AUDIENCE_REWIND_RATIO", -"AUDIENCE_DROPOFF_RATIO", -"HIGHLIGHT_SCORE", -"JOY_FACE", -"EYE_OPEN", -"FACE_RATIO", -"OCR_RACY_SCORE", -"SHOT_BOUNDARY", -"NIMA", -"FOREGROUND_MOTION_SALIENCY", -"PAGE_QUALITY", -"GLOBAL_MOTION", -"CHAPTER_TITLE_MATCHING", -"DBSCAN_FRAME_CHAPTER_SIMILARITY", -"EYES_NOT_VISIBLY_CLOSED", -"ENGAGINESS", -"MERGED" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"The higher the score, the racier the thumbnail.", -"The higher the score, less racier of the thumbnail.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"go/nima", -"", -"", -"", -"", -"", -"", -"", -"The score merged from the above source types." -], -"type": "string" -} -}, -"type": "object" -}, -"DrishtiVesperUserReportHumanLabel": { -"description": "Proto holding values for details about human labels.", -"id": "DrishtiVesperUserReportHumanLabel", -"properties": { -"racyLevel": { -"enum": [ -"UNKNOWN", -"SAFE", -"BORDERLINE", -"SEXUAL" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"DrishtiVesperUserReportModelScore": { -"description": "Proto holding values for details about score and the source model.", -"id": "DrishtiVesperUserReportModelScore", -"properties": { -"modelName": { -"type": "string" -}, -"score": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"DrishtiVesperUserReportUserReportedThumbnail": { -"description": "Proto holding values for user reported thumbnails. Next id: 12", -"id": "DrishtiVesperUserReportUserReportedThumbnail", -"properties": { -"denseFeatures": { -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -}, -"duration": { -"description": "Number of days in which volume is calculated.", -"format": "int32", -"type": "integer" -}, -"humanLabel": { -"$ref": "DrishtiVesperUserReportHumanLabel" -}, -"impressions": { -"description": "Daily aggregared impressions for the reported video.", -"format": "int32", -"type": "integer" -}, -"needHumanLabel": { -"description": "Whether the thumbnail needs a human label.", -"type": "boolean" -}, -"rawHumanLabels": { -"items": { -"$ref": "DrishtiVesperUserReportHumanLabel" -}, -"type": "array" -}, -"reportScore": { -"$ref": "DrishtiVesperUserReportModelScore" -}, -"reportType": { -"enum": [ -"UNKNOWN", -"RACY" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"score": { -"$ref": "DrishtiVesperUserReportModelScore" -}, -"useCase": { -"enum": [ -"UNKNOWN", -"TRAIN", -"EVAL" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"volume": { -"description": "Number of reports.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"DrishtiVesperVideoThumbnail": { -"description": "Video level container for thumbnail with its attributes, e.g., timestamp, id, quality scores, annotations, or features.", -"id": "DrishtiVesperVideoThumbnail", -"properties": { -"id": { -"description": "Video id.", -"type": "string" -}, -"movingThumbnails": { -"items": { -"$ref": "DrishtiVesperMovingThumbnail" -}, -"type": "array" -}, -"thumbnails": { -"items": { -"$ref": "DrishtiVesperThumbnail" -}, -"type": "array" -} -}, -"type": "object" -}, -"EmbedsDeepLinkData": { -"description": "Deep-linking data is used to construct a deep-link URI for an activity or frame's embed, such that on click, the user is taken to the right place in a mobile app. If the app is not installed, the user is taken to the app store. If not on mobile, an analogous web uri is used.", -"id": "EmbedsDeepLinkData", -"properties": { -"appId": { -"description": "Application ID (or project ID) from Google API Console.", -"format": "int64", -"type": "string" -}, -"client": { -"description": "The data for a Google API Console client is entered by a developer during client registration and is stored in PackagingService.", -"items": { -"$ref": "EmbedsPackagingServiceClient" -}, -"type": "array" -}, -"deepLinkId": { -"description": "The ID for non-URL content. Embeds may either have no analogous web presence or prefer a native mobile experience if supported. In the case of no web presence, instead of setting the \"url\" field of an embed, such developers will set this field and other content fields, e.g. thumbnail, title, description. If set, this field is used to construct the deep-link URI. Note that the native experience is preferred over the web link and the web link is used as a fallback.", -"type": "string" -}, -"url": { -"description": "Analogous web presence. Used as desktop fallback or when no native link data is present.", -"type": "string" -} -}, -"type": "object" -}, -"EmbedsEmbedClientItem": { -"description": "Represents an embedded object in an update. This is a wrapper class that can contain a single specific item proto in an extension field. Think of it as a base class like `Message` in Java. Each item proto must declare that it extends this proto: message ExampleObject { option (item_type) = EXAMPLE_OBJECT; extend EmbedClientItem { optional ExampleObject example_object = ; } } See go/es-embeds for details.", -"id": "EmbedsEmbedClientItem", -"properties": { -"canonicalId": { -"description": "The canonical ID of the embed. If absent, the canonical ID is equal to the ID; if present, then the canonical ID represents an \"equivalence class\" of embeds which really refer to the same object. (For example, the URLs http://www.foo.com/ and http://foo.com/ refer to the same object) This field may be updated periodically by background processes.", -"type": "string" -}, -"deepLinkData": { -"$ref": "EmbedsDeepLinkData", -"description": "Deep-linking data to take the user to the right place in a mobile app. This is only used for preview and attribution. Links that are specific to a given embed type should live on that specific embed's proto by using Link. See http://goto.google.com/mariana-design." -}, -"id": { -"description": "The ID of the embed. This corresponds to the schema.org ID, as represented in the ItemScope.id field.", -"type": "string" -}, -"provenance": { -"$ref": "EmbedsProvenance", -"description": "The provenance of the embed, populated when the embed originated from a web fetch. The provenance captures information about the web page the embed had originated, like the URL that was retrieved and the retrieved URL's canonical form. This is useful in the case where the URL shared by the URL redirects (e.g., in the case of a shortened URL)." -}, -"renderId": { -"description": "The ID used to identify the embed during rendering. This field will match ID, if set, otherwise it will be the ID of the parent activity. This field is only populated on the server for client use and is not persisted to storage.", -"type": "string" -}, -"signature": { -"description": "Signature of the embed, used for verification.", -"type": "string" -}, -"transientData": { -"$ref": "EmbedsTransientData", -"description": "Transient generic data that will not be saved on the server." -}, -"type": { -"description": "The first value in `type` determines which extension field will be set. When creating an EmbedClientItem, you only need to set the first (primary) type in this field. When the server receives the item, it will populate the full type list using the parent annotations in the ItemType enum.", -"items": { -"enum": [ -"UNKNOWN", -"ACTION_V2", -"ADD_ACTION_V2", -"AGGREGATE_RATING_V2", -"ARTICLE_V2", -"ASSESS_ACTION_V2", -"AUDIO_OBJECT_V2", -"BASIC_INTERACTION_V2", -"BLOG_POSTING_V2", -"BLOG_V2", -"BOOK_V2", -"BUY_ACTION_V2", -"CHECK_IN_ACTION_V2", -"CHECKIN_V2", -"COLLEXION_V2", -"COMMENT_ACTION_V2", -"COMMENT_V2", -"COMMUNICATE_ACTION_V2", -"CONSUME_ACTION_V2", -"CREATE_ACTION_V2", -"CREATIVE_WORK_V2", -"DISCOVER_ACTION_V2", -"DOCUMENT_OBJECT_V2", -"DRAWING_OBJECT_V2", -"DRIVE_OBJECT_V2", -"EMOTISHARE_V2", -"ENTRY_POINT_V2", -"EVENT_TIME_V2", -"EVENT_V2", -"FILE_OBJECT_V2", -"FIND_ACTION_V2", -"FINANCIAL_QUOTE_V2", -"FORM_OBJECT_V2", -"GEO_COORDINATES_V2", -"GOOGLE_OFFER_V2", -"HANGOUT_CHAT_MESSAGE", -"HANGOUT_QUOTE", -"HANGOUT_V2", -"HOA_PLUS_EVENT_V2", -"IMAGE_OBJECT_V2", -"INTERACT_ACTION_V2", -"INTERACTION_V2", -"LISTEN_ACTION_V2", -"LOCAL_BUSINESS_V2", -"LOCAL_PLUS_PHOTO_ALBUM_V2", -"MAGAZINE_V2", -"MEDIA_OBJECT_V2", -"MOBILE_APPLICATION_V2", -"MOVIE_V2", -"MUSIC_ALBUM_V2", -"MUSIC_GROUP_V2", -"MUSIC_PLAYLIST_V2", -"MUSIC_RECORDING_V2", -"NEWS_ARTICLE_V2", -"OFFER_V2", -"ORGANIZATION_V2", -"ORGANIZE_ACTION_V2", -"PERSON_V2", -"PLACE_REVIEW_V2", -"PLACE_V2", -"PLAN_ACTION_V2", -"PLAY_MUSIC_ALBUM_V2", -"PLAY_MUSIC_TRACK_V2", -"PLAY_OBJECT_V2", -"PLUS_AUDIO_V2", -"PLUS_EVENT_V2", -"PLUS_MEDIA_COLLECTION_V2", -"PLUS_MEDIA_OBJECT_V2", -"PLUS_PAGE_V2", -"PLUS_PHOTOS_ADDED_TO_COLLECTION_V2", -"PLUS_PHOTO_ALBUM_V2", -"PLUS_PHOTO_COLLECTION_V2", -"PLUS_PHOTO_V2", -"PLUS_POST_V2", -"PLUS_RESHARE_V2", -"PLUS_SOFTWARE_APPLICATION_V2", -"POLL_OPTION_V2", -"POLL_V2", -"POSTAL_ADDRESS_V2", -"PRESENTATION_OBJECT_V2", -"PRODUCT_REVIEW_V2", -"RATING_V2", -"REACT_ACTION_V2", -"RESERVATION_V2", -"RESERVE_ACTION_V2", -"REVIEW_V2", -"REVIEW_ACTION_V2", -"SOFTWARE_APPLICATION_V2", -"SPREADSHEET_OBJECT_V2", -"SQUARE_INVITE_V2", -"SQUARE_V2", -"STICKER_V2", -"STORY_V2", -"THING_V2", -"TRADE_ACTION_V2", -"DEPRECATED_TOUR_OBJECT_V2", -"TV_EPISODE_V2", -"TV_SERIES_V2", -"UPDATE_ACTION_V2", -"VIEW_ACTION_V2", -"VIDEO_OBJECT_V2", -"VIDEO_GALLERY_V2", -"WANT_ACTION_V2", -"WEB_PAGE_V2", -"WRITE_ACTION_V2", -"YOUTUBE_CHANNEL_V2", -"GOOGLE_USER_PHOTO_V2", -"GOOGLE_USER_PHOTO_ALBUM", -"GOOGLE_PHOTO_RECIPE", -"THING", -"CREATIVE_WORK", -"EVENT", -"INTANGIBLE", -"ORGANIZATION", -"PERSON", -"PLACE", -"PRODUCT", -"ARTICLE", -"BLOG_POSTING", -"NEWS_ARTICLE", -"SCHOLARLY_ARTICLE", -"BLOG", -"BOOK", -"COMMENT", -"ITEM_LIST", -"MAP", -"MEDIA_OBJECT", -"AUDIO_OBJECT", -"IMAGE_OBJECT", -"MUSIC_VIDEO_OBJECT", -"VIDEO_OBJECT", -"MOVIE", -"MUSIC_PLAYLIST", -"MUSIC_ALBUM", -"MUSIC_RECORDING", -"PAINTING", -"PHOTOGRAPH", -"RECIPE", -"REVIEW", -"SCULPTURE", -"SOFTWARE_APPLICATION", -"MOBILE_APPLICATION", -"WEB_APPLICATION", -"TV_EPISODE", -"TV_SEASON", -"TV_SERIES", -"WEB_PAGE", -"ABOUT_PAGE", -"CHECKOUT_PAGE", -"COLLECTION_PAGE", -"IMAGE_GALLERY", -"VIDEO_GALLERY", -"CONTACT_PAGE", -"ITEM_PAGE", -"PROFILE_PAGE", -"SEARCH_RESULTS_PAGE", -"WEB_PAGE_ELEMENT", -"SITE_NAVIGATION_ELEMENT", -"TABLE", -"WP_AD_BLOCK", -"WP_FOOTER", -"WP_HEADER", -"WP_SIDEBAR", -"APP_INVITE", -"EMOTISHARE", -"BUSINESS_EVENT", -"CHILDRENS_EVENT", -"COMEDY_EVENT", -"DANCE_EVENT", -"EDUCATION_EVENT", -"FESTIVAL", -"FOOD_EVENT", -"LITERARY_EVENT", -"MUSIC_EVENT", -"SALE_EVENT", -"SOCIAL_EVENT", -"SPORTS_EVENT", -"THEATER_EVENT", -"VISUAL_ARTS_EVENT", -"RESERVATION", -"TRAVEL_EVENT", -"CORPORATION", -"EDUCATIONAL_ORGANIZATION", -"COLLEGE_OR_UNIVERSITY", -"ELEMENTARY_SCHOOL", -"HIGH_SCHOOL", -"MIDDLE_SCHOOL", -"PRESCHOOL", -"SCHOOL", -"GOVERNMENT_ORGANIZATION", -"LOCAL_BUSINESS", -"ANIMAL_SHELTER", -"AUTOMOTIVE_BUSINESS", -"AUTO_BODY_SHOP", -"AUTO_DEALER", -"AUTO_PARTS_STORE", -"AUTO_RENTAL", -"AUTO_REPAIR", -"AUTO_WASH", -"GAS_STATION", -"MOTORCYCLE_DEALER", -"MOTORCYCLE_REPAIR", -"CHILD_CARE", -"DRY_CLEANING_OR_LAUNDRY", -"EMERGENCY_SERVICE", -"FIRE_STATION", -"HOSPITAL", -"POLICE_STATION", -"EMPLOYMENT_AGENGY", -"ENTERTAINMENT_BUSINESS", -"ADULT_ENTERTAINMENT", -"AMUSEMENT_PARK", -"ART_GALLERY", -"CASINO", -"COMEDY_CLUB", -"MOVIE_THEATER", -"NIGHT_CLUB", -"FINANCIAL_SERVICE", -"ACCOUNTING_SERVICE", -"AUTOMATED_TELLER", -"BANK_OR_CREDIT_UNION", -"INSURANCE_AGENCY", -"FOOD_ESTABLISHMENT", -"BAKERY", -"BAR_OR_PUB", -"BREWERY", -"CAFE_OR_COFFEE_SHOP", -"FAST_FOOD_RESTAURANT", -"ICE_CREAM_SHOP", -"RESTAURANT", -"WINERY", -"GOVERNMENT_OFFICE", -"POST_OFFICE", -"HEALTH_AND_BEAUTY_BUSINESS", -"BEAUTY_SALON", -"DAY_SPA", -"HAIR_SALON", -"HEALTH_CLUB", -"NAIL_SALON", -"TATTOO_PARLOR", -"HOME_AND_CONSTRUCTION_BUSINESS", -"ELECTRICIAN", -"GENERAL_CONTRACTOR", -"HVAC_BUSINESS", -"HOUSE_PAINTER", -"LOCKSMITH", -"MOVING_COMPANY", -"PLUMBER", -"ROOFING_CONTRACTOR", -"INTERNET_CAFE", -"LIBRARY", -"LODGING_BUSINESS", -"BED_AND_BREAKFAST", -"HOSTEL", -"HOTEL", -"MOTEL", -"MEDICAL_ORGANIZATION", -"DENTIST", -"MEDICAL_CLINIC", -"OPTICIAN", -"PHARMACY", -"PHYSICIAN", -"VETERINARY_CARE", -"PROFESSIONAL_SERVICE", -"ATTORNEY", -"NOTARY", -"RADIO_STATION", -"REAL_ESTATE_AGENT", -"RECYCLING_CENTER", -"SELF_STORAGE", -"SHOPPING_CENTER", -"SPORTS_ACTIVITY_LOCATION", -"BOWLING_ALLEY", -"EXERCISE_GYM", -"GOLF_COURSE", -"PUBLIC_SWIMMING_POOL", -"SKI_RESORT", -"SPORTS_CLUB", -"STADIUM_OR_ARENA", -"TENNIS_COMPLEX", -"STORE", -"BIKE_STORE", -"BOOK_STORE", -"CLOTHING_STORE", -"COMPUTER_STORE", -"CONVENIENCE_STORE", -"DEPARTMENT_STORE", -"ELECTRONICS_STORE", -"FLORIST", -"FURNITURE_STORE", -"GARDEN_STORE", -"GROCERY_STORE", -"HARDWARE_STORE", -"HOBBY_SHOP", -"HOME_GOODS_STORE", -"JEWELRY_STORE", -"LIQUOR_STORE", -"MENS_CLOTHING_STORE", -"MOBILE_PHONE_STORE", -"MOVIE_RENTAL_STORE", -"MUSIC_STORE", -"OFFICE_EQUIPMENT_STORE", -"OUTLET_STORE", -"PAWN_SHOP", -"PET_STORE", -"SHOE_STORE", -"SPORTING_GOODS_STORE", -"TIRE_SHOP", -"TOY_STORE", -"WHOLESALE_STORE", -"TELEVISION_STATION", -"TOURIST_INFORMATION_CENTER", -"TRAVEL_AGENCY", -"PERFORMING_GROUP", -"MUSIC_GROUP", -"ADMINISTRATIVE_AREA", -"CITY", -"COUNTRY", -"STATE", -"CIVIC_STRUCTURE", -"AIRPORT", -"AQUARIUM", -"BEACH", -"BUS_STATION", -"BUS_STOP", -"CAMPGROUND", -"CEMETERY", -"CREMATORIUM", -"EVENT_VENUE", -"GOVERNMENT_BUILDING", -"CITY_HALL", -"COURTHOUSE", -"DEFENCE_ESTABLISHMENT", -"EMBASSY", -"LEGISLATIVE_BUILDING", -"MUSEUM", -"MUSIC_VENUE", -"PARK", -"PARKING_FACILITY", -"PERFORMING_ARTS_THEATER", -"PLACE_OF_WORSHIP", -"BUDDHIST_TEMPLE", -"CATHOLIC_CHURCH", -"CHURCH", -"HINDU_TEMPLE", -"MOSQUE", -"SYNAGOGUE", -"PLAYGROUND", -"R_V_PARK", -"RESIDENCE", -"APARTMENT_COMPLEX", -"GATED_RESIDENCE_COMMUNITY", -"SINGLE_FAMILY_RESIDENCE", -"TOURIST_ATTRACTION", -"SUBWAY_STATION", -"TAXI_STAND", -"TRAIN_STATION", -"ZOO", -"LANDFORM", -"BODY_OF_WATER", -"CANAL", -"LAKE_BODY_OF_WATER", -"OCEAN_BODY_OF_WATER", -"POND", -"RESERVOIR", -"RIVER_BODY_OF_WATER", -"SEA_BODY_OF_WATER", -"WATERFALL", -"CONTINENT", -"MOUNTAIN", -"VOLCANO", -"LANDMARKS_OR_HISTORICAL_BUILDINGS", -"USER_INTERACTION", -"USER_PLUS_ONES", -"ENUMERATION", -"BOOK_FORMAT_TYPE", -"ITEM_AVAILABILITY", -"OFFER_ITEM_CONDITION", -"JOB_POSTING", -"LANGUAGE", -"OFFER", -"QUANTITY", -"DISTANCE", -"DURATION", -"ENERGY", -"MASS", -"RATING", -"AGGREGATE_RATING", -"STRUCTURED_VALUE", -"CONTACT_POINT", -"POSTAL_ADDRESS", -"GEO_COORDINATES", -"GEO_SHAPE", -"NUTRITION_INFORMATION", -"PRESENTATION_OBJECT", -"DOCUMENT_OBJECT", -"SPREADSHEET_OBJECT", -"FORM_OBJECT", -"DRAWING_OBJECT", -"PLACE_REVIEW", -"FILE_OBJECT", -"PLAY_MUSIC_TRACK", -"PLAY_MUSIC_ALBUM", -"MAGAZINE", -"CAROUSEL_FRAME", -"PLUS_EVENT", -"HANGOUT", -"HANGOUT_BROADCAST", -"HANGOUT_CONSUMER", -"CHECKIN", -"EXAMPLE_OBJECT", -"SQUARE", -"SQUARE_INVITE", -"PLUS_PHOTO", -"PLUS_PHOTO_ALBUM", -"LOCAL_PLUS_PHOTO_ALBUM", -"PRODUCT_REVIEW", -"FINANCIAL_QUOTE", -"DEPRECATED_TOUR_OBJECT", -"PLUS_PAGE", -"GOOGLE_CHART", -"PLUS_PHOTOS_ADDED_TO_COLLECTION", -"RECOMMENDED_PEOPLE", -"PLUS_POST", -"DATE", -"DRIVE_OBJECT_COLLECTION", -"NEWS_MEDIA_ORGANIZATION", -"DYNAMITE_ATTACHMENT_METADATA", -"DYNAMITE_MESSAGE_METADATA" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false -], -"enumDescriptions": [ -"Largely deprecated, effectively an error condition and should not be in storage.", -"Embeds V2 types", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"The action of checking in, as opposed to a \"check-in\".", -"", -"", -"The act of commenting, which might result in a comment.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Boswell story (see goto/boswell)", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"A photo stored in photo service owned by a Google account user. This is distinct from PlusPhoto as it isn't tied to GPlus, but is instead intended to be a more general photo tied to a google user.", -"A photo album in photo service owned by a Google account user. This is distinct from PlusPhotoAlbum as it isn't tied to GPlus, but is instead intended to be a general photo album tied to a google user.", -"An embed used to create a single photo in photo service. This type is never stored but is used to create a GOOGLE_USER_PHOTO_V2 or PLUS_PHOTO_V2 embed.", -"Embeds V1 types", -"", -"", -"", -"", -"", -"", -"", -"CREATIVE_WORK subtypes", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"EVENT subtypes", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ORGANIZATION subtypes", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"PLACE subtypes", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"NOTE(jpanzer): This is a badly designed hierarchy and we should avoid depending on Event properties inside UserInteractions as much as possible IMHO.", -"", -"Intangibles, primarily used as sub-objects of other types", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"CREATIVE_WORK extensions", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Single frame for http://goto/carousel.", -"EVENT extensions", -"No declared proto. Used only as a base type for now.", -"", -"", -"", -"", -"NOTE(melchang): These correspond to http://schema.org/WebPage/Community and http://schema.org/WebPage/CommunityInvite. See b/7653610 for why these are \"SQUARE\" and not \"COMMUNITY\".", -"", -"", -"", -"", -"", -"", -"", -"", -"Data visualizations. See http://code.google.com/apis/chart/index.html", -"", -"A collection of people that have been recommended to a user.", -"A Google+ post.", -"An http://schema.org/Date unstructured ISO-8859 timestamp string.", -"Embed representing a collection of multiple Drive objects.", -"https://schema.org/NewsMediaOrganization", -"Deprecated in favor of DYNAMITE_MESSAGE_METADATA", -"Used for Dynamite message metadata in Hangouts" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"EmbedsPackagingServiceClient": { -"description": "Developers register a client in Google API Console to get the deep-linking feature on Google+ posts or frames about their apps. The client data is stored in this proto.", -"id": "EmbedsPackagingServiceClient", -"properties": { -"androidPackageName": { -"description": "Android app's package name to generate the deep-link URI.", -"type": "string" -}, -"iosAppStoreId": { -"description": "iOS app's App Store ID to generate the App Store URL when app is not installed on device.", -"type": "string" -}, -"iosBundleId": { -"description": "iOS app's bundle ID to generate the deep-link URI.", -"type": "string" -}, -"type": { -"description": "Type of Google API Console client.", -"enum": [ -"ANDROID", -"IOS" -], -"enumDescriptions": [ -"Client for Android app.", -"Client for iOS app." -], -"type": "string" -} -}, -"type": "object" -}, -"EmbedsProvenance": { -"description": "This field records where the ItemScope was retrieved, if it was created via a web fetch.", -"id": "EmbedsProvenance", -"properties": { -"annotationBlob": { -"description": "Annotation blob from Annotation Service.", -"format": "byte", -"type": "string" -}, -"canonicalUrl": { -"description": "Canonical url of the retrieved_url, if one was resolved during retrieval, for example, if a rel=\"canonical\" link tag was provided in the retrieved web page.", -"type": "string" -}, -"inputUrl": { -"description": "The url originally passed in the PRS request, which should be used to re-discover the content. Note that this URL may be a forwarding service or link shortener (bit.ly), so it should not be assumed to be canonical, but should be used for navigation back to the original source of the itemscope.", -"type": "string" -}, -"itemtype": { -"description": "Contains exact types as parsed, whether or not we recognized that type at parse time. If an itemscope is created by merging SchemaOrg markup and open graph markup then the first itemtype would be schemaorg type, the second would be open graph and so on. example: http://schema.org/VideoObject, og:video.movie Plain text; usually a URL", -"items": { -"type": "string" -}, -"type": "array" -}, -"retrievedTimestampMsec": { -"description": "The server retrieved timestamp (in msec).", -"format": "uint64", -"type": "string" -}, -"retrievedUrl": { -"description": "The final URL that was the actual source of the itemscope, after any redirects.", -"type": "string" -} -}, -"type": "object" -}, -"EmbedsTransientData": { -"description": "Transient generic data that will not be saved on the server.", -"id": "EmbedsTransientData", -"properties": {}, -"type": "object" -}, -"EventIdMessage": { -"description": "An EventId is a 128 bit identifier that uniquely identifies an event, such as a query. The event time recorded to the nearest microsecond, along with information about the process generating the event, ensures that all EventIds are unique. Details of this EventId are described in a design document: http://www/eng/designdocs/sawmill/adlogs.html", -"id": "EventIdMessage", -"properties": { -"processId": { -"description": "process_id is an integer that identifies the process on this machine that generated this event. This id is calculated once when the server generates its first event, and may change if the process is migrated to a different host. This field has a very specific format mandated by the logs collection infrastructure, which is subject to change WITHOUT NOTICE. As of 2013-01-09, this format is: uint32 process_id = (time(NULL) << 24) + (getpid() & 0xFFFFFF); If you are generating an extended_pid directly, you MUST use one of the maintained library implementations in order to generate it properly: C++ //borg/borgletlib:extended_pid; call borg::ExtendedPid() Python //borg/borgletlib/python:pyextendedpid; call ExtendedPid() Go //borg/borgletlib/go:extendedpid; call Get() Java //java/com/google/common/logging; call EventId.getPid() If you think that you need to parse the values of this field, please contact logs-collection-dev@ to discuss your requirement.", -"format": "uint32", -"type": "integer" -}, -"serverIp": { -"description": "server_ip is the IPv4 address or http://go/ghostid of the machine running the server that created this event message. This allows us to distinguish between events that occur at the same time on different servers. Format: 10.1.2.3 is stored as 0x0a010203, and GHostId 1 as 0x00000001.", -"format": "uint32", -"type": "integer" -}, -"timeUsec": { -"description": "time_usec is the number of microseconds since the epoch (i.e., since 1970-01-01 00:00:00 UTC) as an int64: 1e6 * (unix time) + microseconds. Applications must ensure that EventIdMessages have increasing times, artificially increasing time_usec to one greater than the previous value if necessary. Alternate implementations were considered: 1. storing unix time and microseconds separately would require a bit more storage, and the convenience of having a single value representing the time seemed more useful than having trivial access to a unix time. 2. storing unix time in the upper 32 bits would allow for more precision - up to 4G events/second, but it wouldn't print nicely as a decimal value and it seems unlikely that any single server would ever sustain more than 1M events/second. 3. Java-compatible time uses millis - this would limit servers to 1000 events per second - too small. Other names for this field were considered, including time, time_stamp, and utime. We felt that including the units in the name would tend to produce more readable code. utime might be interpreted as user time. unix timestamp * 1e6 + microseconds", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"ExtraSnippetInfoResponse": { -"description": "the extra info response from ascorer used to build snippets in GWS experiments", -"id": "ExtraSnippetInfoResponse", -"properties": { -"matchinfo": { -"$ref": "ExtraSnippetInfoResponseMatchInfo" -}, -"querysubitem": { -"items": { -"$ref": "ExtraSnippetInfoResponseQuerySubitem" -}, -"type": "array" -}, -"tidbit": { -"items": { -"$ref": "ExtraSnippetInfoResponseTidbit" -}, -"type": "array" -} -}, -"type": "object" -}, -"ExtraSnippetInfoResponseMatchInfo": { -"id": "ExtraSnippetInfoResponseMatchInfo", -"properties": { -"titleMatches": { -"description": "bitvector of query items matching the title", -"format": "uint64", -"type": "string" -}, -"urlMatches": { -"description": "bitvector of query items matching the url", -"format": "uint64", -"type": "string" -}, -"weightedItems": { -"description": "bitvector of query items considered by chooser", -"format": "uint64", -"type": "string" -} -}, -"type": "object" -}, -"ExtraSnippetInfoResponseQuerySubitem": { -"description": "A query term, phrase, or synonym. An original query term or phrase is called an \"item\". Each item may have more than one \"subitem\" if there are synonyms. In rare cases a subitem may correspond to multiple items, such as the subitem \"cia\" in the query [central intelligence agency].", -"id": "ExtraSnippetInfoResponseQuerySubitem", -"properties": { -"isHighlighted": { -"description": "Additional information from the SnippetQuery.", -"type": "boolean" -}, -"isOptional": { -"type": "boolean" -}, -"isOriginal": { -"description": "true iff this subitem was an original query term or phrase. Can only be false if want_all_query_subitems == true in the request.", -"type": "boolean" -}, -"items": { -"description": "a bitvector of the query items corresponding to this subitem. Typically only one bit is set, but see comment above.", -"format": "int32", -"type": "integer" -}, -"text": { -"description": "text associated with this query item", -"type": "string" -}, -"weight": { -"description": "the weight of this query item, as calculated by SubitemWeight(): https://qwiki.corp.google.com/display/Q/SnippetWeights", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"ExtraSnippetInfoResponseTidbit": { -"id": "ExtraSnippetInfoResponseTidbit", -"properties": { -"anchorinfo": { -"$ref": "ExtraSnippetInfoResponseTidbitAnchorInfo" -}, -"begin": { -"description": "For tidbits only: position of tidbit in the document. More specifically, tidbit is found at [begin, end) in the document's tokens.", -"format": "int32", -"type": "integer" -}, -"end": { -"format": "int32", -"type": "integer" -}, -"items": { -"description": "a bitvector of each query term within this tidbit", -"format": "uint64", -"type": "string" -}, -"score": { -"description": "the score for this tidbit if there was one this is returned for Snippets and Tidbits and is only meaningful for comparing between objects of the same type (snippet to snippet, tidbit to tidbit)", -"format": "float", -"type": "number" -}, -"text": { -"description": "the tidbit text, with search terms already highlighted", -"type": "string" -}, -"type": { -"enum": [ -"TIDBIT", -"BODY", -"META", -"GWD", -"FULL", -"ANCHOR" -], -"enumDescriptions": [ -"it is a tidbit returned by want_all_tidbits", -"these are the best candidate snippets from each section, returned by want_all_snippets", -"", -"NOTE(kinoue): ODP/GWD snippet is unlaunched as of June 2017. This is no longer used.", -"", -"NOTE(kinoue): Deprecated and unsupported. anchor text, returned if max_anchors_wanted > 0" -], -"type": "string" -} -}, -"type": "object" -}, -"ExtraSnippetInfoResponseTidbitAnchorInfo": { -"description": "this information is specific to anchors and is only returned if type == ANCHOR", -"id": "ExtraSnippetInfoResponseTidbitAnchorInfo", -"properties": { -"offdomainCount": { -"format": "int32", -"type": "integer" -}, -"ondomainCount": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"FaceIndexing": { -"id": "FaceIndexing", -"properties": { -"mustangBytes": { -"description": "Always use image/search/utils/face_proto_util.h for packing and unpacking these values.", -"format": "byte", -"type": "string" -}, -"mustangBytesVersion": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"FatcatCompactBinaryClassification": { -"id": "FatcatCompactBinaryClassification", -"properties": { -"binaryClassifier": { -"description": "Either binary_classifier will be set, using the enum above, or binary_classifier_name will be set, if it is not one of the classifiers in the enum - never both.", -"enum": [ -"BLOG", -"FORUM", -"LOGIN", -"B2B_OK", -"IMAGES", -"SOCIAL", -"PURCHASING_INTENT", -"PORN", -"ADULTISH", -"VIOLENCE_GORE", -"GOSSIP" -], -"enumDescriptions": [ -"Pagetypes", -"", -"", -"", -"", -"", -"", -"Sensitive content", -"", -"", -"" -], -"type": "string" -}, -"binaryClassifierName": { -"type": "string" -}, -"discreteFraction": { -"description": "A CompactDocClassification will not usually have a weight. For a CompactSiteClassification, this value will be 0...127 corresponding to 0.0...1.0, indicating fraction of the site that this label applies to", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"FatcatCompactDocClassification": { -"description": "The result of PetacatAnnotator. Each result contains: 1. RephilClusters; 2. At most 5 verticals from each taxonomy, sorted by the probabilities in descending order. 3. Binary classification results about page types and sensitive content. The types of taxonomies include: verticals4, geo, verticals4_geo, products_services, icm_im_audiences and icm_im_audiences_dev.", -"id": "FatcatCompactDocClassification", -"properties": { -"binary": { -"items": { -"$ref": "FatcatCompactBinaryClassification" -}, -"type": "array" -}, -"clusters": { -"$ref": "FatcatCompactRephilClusters" -}, -"epoch": { -"type": "string" -}, -"langCode": { -"type": "string" -}, -"rephilModelId": { -"description": "The id of the Rephil model used to generate the Rephil clusters. If it is absent, Rephil 4 is assumed.", -"format": "int32", -"type": "integer" -}, -"taxonomic": { -"items": { -"$ref": "FatcatCompactTaxonomicClassification" -}, -"type": "array" -}, -"url": { -"description": "not needed if the url is the sstable / bigtable key used during intermediate processing only", -"type": "string" -}, -"weight": { -"description": "The relative weight of this doc within a site, typically something like pagerank or navboost impressions. May be a large number (like an actual pageviews estimate), not limited to a small range.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"FatcatCompactRephilClusters": { -"id": "FatcatCompactRephilClusters", -"properties": { -"cluster": { -"items": { -"$ref": "FatcatCompactRephilClustersCluster" -}, -"type": "array" -} -}, -"type": "object" -}, -"FatcatCompactRephilClustersCluster": { -"id": "FatcatCompactRephilClustersCluster", -"properties": { -"discreteWeight": { -"description": "0...127 corresponds to 0.0 - 1.0", -"format": "int32", -"type": "integer" -}, -"id": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"FatcatCompactTaxonomicClassification": { -"description": "A version of this proto for logging is available at cs/symbol:logged_fatcat.LoggedCompactTaxonomicClassification", -"id": "FatcatCompactTaxonomicClassification", -"properties": { -"category": { -"items": { -"$ref": "FatcatCompactTaxonomicClassificationCategory" -}, -"type": "array" -}, -"classifierVersion": { -"type": "string" -}, -"taxonomy": { -"description": "Either taxonomy will be set, using the enum above, or taxonomy_name will be set (if the taxonomy is not one of the ones in the enum) - never both", -"enum": [ -"VERTICALS", -"VERTICALS4", -"VERTICALS4_GEO", -"GEO", -"PRODUCTS_SERVICES", -"ICM_IM_AUDIENCES", -"ICM_IM_AUDIENCES_DEV" -], -"enumDescriptions": [ -"DEPRECATED Verticals, v3", -"Publisher Verticals, v4. go/verticals4", -"Publisher Verticals, v4 + World Localities", -"World Localities", -"Products & Services", -"ICM In Market Audiences", -"Experimental ICM In Market Audiences" -], -"type": "string" -}, -"taxonomyName": { -"type": "string" -} -}, -"type": "object" -}, -"FatcatCompactTaxonomicClassificationCategory": { -"description": "A taxonomic category. A classification consists of weight (totalling 1.0) distributed among one or more categories.", -"id": "FatcatCompactTaxonomicClassificationCategory", -"properties": { -"discreteWeight": { -"description": "go/petacat-faq#how-should-i-interpret-classification-weights Discrete to reduce size. Range is [0,127], corresponding to [0.0,1.0].", -"format": "int32", -"type": "integer" -}, -"id": { -"description": "The category's ID, e.g. 20 for /Sports in the go/verticals4 taxonomy.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"FocusBackendContactDetailHash": { -"description": "http://go/contact-detail-hash.", -"id": "FocusBackendContactDetailHash", -"properties": { -"type": { -"enum": [ -"UNSPECIFIED", -"PHONE", -"EMAIL" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"value": { -"description": "The hash here will be a 16-bit weak hash to avoid reverse engineering for decoding the actual contact detail. The hash value is computed by the fingerprint of the raw contact detail mod 2^16.", -"format": "uint32", -"type": "integer" -} -}, -"type": "object" -}, -"FocusBackendContactPointer": { -"description": "A contact pointer that represents a contact (http://go/assistant-contact-id).", -"id": "FocusBackendContactPointer", -"properties": { -"annotationId": { -"description": "The annotation ID. Annotations are only allowed to point to annotations that do not themselves have a pointer (avoids any possibilty of loops). Cast this field to string in javascript to make it compile in js.", -"format": "int64", -"type": "string" -}, -"deviceRawContactId": { -"$ref": "FocusBackendDeviceRawContactId", -"description": "The raw contact ID from an active mobile device of the user." -}, -"focusContactId": { -"description": "The contact ID from the Focus backend. Cast this field to string in javascript to make it compile in js.", -"format": "int64", -"type": "string" -}, -"otherContactId": { -"$ref": "FocusBackendOtherContactId", -"description": "Additional contact ids that are not actively used to match contact pointers to contacts." -}, -"secondaryId": { -"$ref": "FocusBackendSecondaryContactId", -"description": "The secondary identifier of contact. It will be used when the primary ID doesn't match any contact." -} -}, -"type": "object" -}, -"FocusBackendDeviceContactId": { -"description": "//////////////////// DeviceContactId ////////////////////// Used by Device Contacts only. For more details see go/fbs-support-for-device-contacts.", -"id": "FocusBackendDeviceContactId", -"properties": { -"ContactId": { -"description": "DeviceContact Id.", -"format": "int64", -"type": "string" -}, -"DeviceId": { -"$ref": "FocusBackendDeviceId", -"description": "Device Id." -} -}, -"type": "object" -}, -"FocusBackendDeviceId": { -"description": "//////////////////// DeviceId ////////////////////// Used by Device Contacts only. For more details see go/fbs-support-for-device-contacts.", -"id": "FocusBackendDeviceId", -"properties": { -"AndroidDeviceId": { -"description": "The GServices id on Android. See go/android-id.", -"format": "int64", -"type": "string" -}, -"Hash": { -"description": "DeviceId.Hash is a SHA256 of some attribute of the user and device. For Android devices: Hash = SHA256(gaia_account_name + \u201c:\u201d + \u201c1\u201d + \u201c:\u201d + (android id - LSB)); For iOS devices: Hash = TOLOWER(HEX(GMCSComputeUserDeviceToken(userId, iOsDeviceId)) For more details see go/client-instance-id.", -"type": "string" -} -}, -"type": "object" -}, -"FocusBackendDeviceRawContactId": { -"description": "//////////////////// DeviceRawContactId ////////////////////// Used by Device Contacts Only. The Raw ID as assigned to the original contact on the device. For more details see go/fbs-support-for-device-contacts.", -"id": "FocusBackendDeviceRawContactId", -"properties": { -"DeviceId": { -"$ref": "FocusBackendDeviceId" -}, -"RawContactId": { -"description": "Raw ID assigned by the device. Cast this field to string in javascript to make it compile in js.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"FocusBackendOtherContactId": { -"description": "Additional contact ids that are not actively used to match contact pointers to contacts. There may be overlap with primary or secondary contact ids.", -"id": "FocusBackendOtherContactId", -"properties": { -"deviceContactId": { -"description": "Device contact ID, when available: - The annotation points to a device contact, and the device contact id was correctly populated when the annotation was created. Note that the device contact id is populated once per device contact on a device. It is distinct from RawContactId - a single device contact may have multiple raw contact ids. - The annotation points to a Focus contact that was merged with device contact information in Starlight. When the annotation was created, a device contact id was available on the merged person object. - The contact annotation was created from April 2021 onwards. All prior annotations do not populate this field. ContactPointer creation relies on the client caller to correctly populate the device contact id, and does not enforce any assumptions on availability of this field. This field is repeated because in rare cases Starlight may merge device contact information across different devices into a single merged person object. WARNING: Use with extreme caution! This ID is not stable. For more details see go/fbs-support-for-device-contacts.", -"items": { -"$ref": "FocusBackendDeviceContactId" -}, -"type": "array" -} -}, -"type": "object" -}, -"FocusBackendSecondaryContactId": { -"description": "The secondary ID of a contact.", -"id": "FocusBackendSecondaryContactId", -"properties": { -"contactDetailHash": { -"description": "The hashes of the contact details (e.g. phone number and email address).", -"items": { -"$ref": "FocusBackendContactDetailHash" -}, -"type": "array" -}, -"contactName": { -"description": "The contact's full name, not hashed.", -"type": "string" -}, -"contactNameHash": { -"description": "The hash of contact's full name, generated using Fingerprint2011(). Cast this field to string in javascript to make it compile in js.", -"format": "uint64", -"type": "string" -} -}, -"type": "object" -}, -"FreebaseCitation": { -"description": "Citation contains the information needed to correctly attribute the source of data.", -"id": "FreebaseCitation", -"properties": { -"dataset": { -"description": "Mid of the dataset.", -"type": "string" -}, -"isAttributionRequired": { -"description": "If set to true, the citation is required to be displayed when the data is used.", -"type": "boolean" -}, -"project": { -"description": "Name of the project of the data's origin.", -"type": "string" -}, -"provider": { -"description": "The name of the provider of this information.", -"type": "string" -}, -"statement": { -"description": "A human readable statement of attribution.", -"type": "string" -}, -"uri": { -"description": "Uri link associated with this data.", -"type": "string" -} -}, -"type": "object" -}, -"FreebaseId": { -"description": "An Id contains the identifiers used to reference this topic (entity) in the Knowledge Graph. The Knowledge Graph supports several forms of identifiers: - \"mids\" (machine ids) that are assigned at creation time, and support a resolution mechanism that tracks topics after they are merged (for more about mids, see go/kg-mid), - \"ids\" are human-readable ids (HRIDs) that are derived from a namespace hierarchy stored in Knowledge Graph, and a set of rules, - \"guids\" are low-level ids historically used in Freebase (pre-Knowledge Graph, deprecated). Only the mid and id are supplied here. Note that mids can be converted to guids or uint64s (see //metaweb/util/mid/mid.h).", -"id": "FreebaseId", -"properties": { -"id": { -"description": "\"id\" may be a human readable ID (HRID) or a MID. Originally it was intended to always be a human readable ID, but that convention was not always followed so clients should be wary. Not every topic has an id.", -"type": "string" -}, -"mid": { -"description": "The \"mid\" should be used whenever a globally unique, primary key into the Knowledge Graph is needed. These keys are always prefixed with the \"/m\" and \"/g\", (and more rarely the \"/x\" and \"/t\") namespaces, and are alphanumeric strings consisting of lowercase letters excluding vowels, numbers and the underscore character. (Applications should not assume a constant length for these strings as Livegraph reserves the right to extend the number of characters to accommodate more topics.)", -"type": "string" -} -}, -"type": "object" -}, -"FreebaseLatLong": { -"description": "Represents a geopoint, which is one of the possible Value types.", -"id": "FreebaseLatLong", -"properties": { -"latDeg": { -"format": "float", -"type": "number" -}, -"longDeg": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"FreebaseMeasurement": { -"description": "Represents a measurements, which is one of the possible Value types. A measurement value like \"5.2 meter^2 / second\" would be represented as: magnitude: 5.2 unit { unit_mid: \"/m/mid_for_meter\" power: 2 } unit { unit_mid: \"/m/mid_for_second\" power: -1 }", -"id": "FreebaseMeasurement", -"properties": { -"magnitude": { -"format": "float", -"type": "number" -}, -"unit": { -"description": "Repeated units are interpreted as a product. i.e. (meter ^ 1) * (second ^ -2)", -"items": { -"$ref": "FreebaseMeasurementUnit" -}, -"type": "array" -} -}, -"type": "object" -}, -"FreebaseMeasurementUnit": { -"id": "FreebaseMeasurementUnit", -"properties": { -"power": { -"format": "int32", -"type": "integer" -}, -"unit": { -"$ref": "FreebaseId" -}, -"unitMid": { -"deprecated": true, -"description": "Deprecated fields.", -"type": "string" -} -}, -"type": "object" -}, -"FreebaseNestedStruct": { -"description": "List of { predicate, { object } } to be processed as a Nested Struct. Nested Struct can be recursive. NestedStruct.property_value(i).value(j) may have nested_struct field.", -"id": "FreebaseNestedStruct", -"properties": { -"propertyValue": { -"items": { -"$ref": "FreebasePropertyValue" -}, -"type": "array" -} -}, -"type": "object" -}, -"FreebasePropertyValue": { -"description": "A PropertyValue associates properties with values in the context of a topic.", -"id": "FreebasePropertyValue", -"properties": { -"property": { -"$ref": "FreebaseId", -"description": "The id of the property." -}, -"totalValueCount": { -"description": "Indicates the total values that exist for this property, even if they aren't all present in the value field, due to truncation.", -"format": "int64", -"type": "string" -}, -"value": { -"description": "The value associated with the property for the containing topic.", -"items": { -"$ref": "FreebaseValue" -}, -"type": "array" -}, -"valueStatus": { -"description": "If ValueStatus is not set at all, the implication is that there are well-known value(s), specified in the \"value\" field. (It should be considered malformed data to have value_status set when len(values) > 0.)", -"enum": [ -"HAS_UNKNOWN_VALUE", -"HAS_NO_VALUE" -], -"enumDescriptions": [ -"There are values, but we don't know what they are. (e.g. person is known to have children, but we don't know who they are) (This is only sensible if len(values) == 0; it is an error to see this value if len(values) > 0)", -"We know that there are no values for this predicate. (e.g. person is known to have zero children) (This is only sensible if len(values) == 0; it is an error to see this value if len(values) > 0)" -], -"type": "string" -} -}, -"type": "object" -}, -"FreebaseTopic": { -"description": "A Topic represents a Knowledge Graph entity with its associated properties and their values.", -"id": "FreebaseTopic", -"properties": { -"id": { -"$ref": "FreebaseId", -"description": "The id (mid and human-readable id) of the topic. The id will always be present and will contain a mid value for topics in the topic sstable." -}, -"propertyValue": { -"description": "The property-value bindings associated with the topic. Note that in the case where a property is relevant to a topic based on its type, but no values of that property are present for the topic, the PropertyValue will simply not appear, rather than being present with a null value, or empty repeated value list.", -"items": { -"$ref": "FreebasePropertyValue" -}, -"type": "array" -} -}, -"type": "object" -}, -"FreebaseValue": { -"description": "Values are effectively a union of several possible Knowledge Graph types: simple primitive datatypes such as booleans, integers and floats, references to other Knowledge Graph topics (by id), or \"compound values\" which are expressed as embedded topics with associated properties and values. Values occur in indexed order (if any).", -"id": "FreebaseValue", -"properties": { -"boolValue": { -"description": "Present when value is bool.", -"type": "boolean" -}, -"citation": { -"$ref": "FreebaseCitation", -"deprecated": true, -"description": "Citation data for this value. See: http://go/kg-clap" -}, -"compoundValue": { -"$ref": "FreebaseTopic", -"description": "Compound values are those that contain either a number of simple valued facets (such as a latitude/longitude pair), or \"mediator\" topics representing multi-dimensional relationships between topics. In both cases we represent them here with an embedded topic, although the topic's identity is somewhat secondary to the property/value pairs it contains. (The identity is still made available so that it can be used to perform updates to that mediator on the Knowledge Graph.)" -}, -"deletionProvenance": { -"description": "Deletion provenance for this value.", -"items": { -"$ref": "StorageGraphBfgTripleProvenance" -}, -"type": "array" -}, -"displayLang": { -"description": "The lang of the display_value field.", -"type": "string" -}, -"displayValue": { -"description": "The display value of this value. This is a i18n-aware formatted value if present.", -"type": "string" -}, -"expectedProto": { -"description": "An optional name for a proto field.", -"type": "string" -}, -"floatValue": { -"description": "Present when value is float.", -"format": "double", -"type": "number" -}, -"idValue": { -"$ref": "FreebaseId", -"description": "Present when value is an id." -}, -"index": { -"deprecated": true, -"description": "Index of the value relative to the containing property (if any). Knowledge Graph supports a loose notion of indexing: some non-unique properties may have indices, while others may not. Furthermore, for a single property, some values may have indices (such as the top 5 actors in a film), while others may not (the film's supporting cast). Un-indexed values will appear at the end of the repeated value list. This field contains the index value only when is present in the Knowledge Graph.", -"format": "uint64", -"type": "string" -}, -"intValue": { -"description": "Present when value is int.", -"format": "int64", -"type": "string" -}, -"lang": { -"description": "Whenever the value is text with TYPE_TEXT, the lang field is populated with the III LanguageCode associated with the string_value field.", -"type": "string" -}, -"latLongValue": { -"$ref": "FreebaseLatLong", -"deprecated": true -}, -"measurementValue": { -"$ref": "FreebaseMeasurement", -"deprecated": true -}, -"nestedStruct": { -"$ref": "FreebaseNestedStruct", -"description": "Populated if this value holds NestedStruct. 'type' field needs to be set to TYPE_NESTED_STRUCT." -}, -"provenance": { -"description": "Provenance for this value.", -"items": { -"$ref": "StorageGraphBfgTripleProvenance" -}, -"type": "array" -}, -"rawValue": { -"description": "Similar to string_value/etc but contains raw bytes.", -"format": "byte", -"type": "string" -}, -"stringValue": { -"description": "Present when value is text, enum,", -"type": "string" -}, -"subgraphId": { -"deprecated": true, -"items": { -"format": "uint64", -"type": "string" -}, -"type": "array" -}, -"timestamp": { -"deprecated": true, -"description": "The ISO-8601 timestamp corresponding to when this value was created (when it was written to the Knowledge Graph). Deprecated in favor of timestamp_usec.", -"type": "string" -}, -"timestampUsec": { -"description": "The microsecond timestamp corresponding to when this value was created.", -"format": "int64", -"type": "string" -}, -"type": { -"enum": [ -"TYPE_NULL", -"TYPE_ID", -"TYPE_TEXT", -"TYPE_ENUM", -"TYPE_KEY", -"TYPE_URI", -"TYPE_DATETIME", -"TYPE_BOOL", -"TYPE_INT", -"TYPE_FLOAT", -"TYPE_COMPOUND", -"TYPE_PROTO", -"TYPE_EXTENSION", -"TYPE_NESTED_STRUCT", -"TYPE_SEMANTIC_REFERENCE", -"TYPE_LAT_LONG", -"TYPE_MEASUREMENT", -"TYPE_HAS_VALUE", -"TYPE_HAS_NO_VALUE" -], -"enumDescriptions": [ -"No fields are supplied.", -"id_value contains an Id of a referenced topic.", -"For text, string_value contains an UTF-8 string,", -"string_value contains an enum key string.", -"string_value contains a namespace key string.", -"string_value contains a URI string.", -"string_value contains a (possibly truncated)", -"bool_value contains a bool.", -"int_value contains an int. Uint64 are supported", -"float_value contains a float.", -"compound_value contains property/value pairs.", -"raw_value contains a raw proto.", -"This proto has extensions set.", -"nested_struct field is populated.", -"string_value contains a Meaning Schema(go/life-of-a-meaning-schema) type name which needs to be a valid ValueType or SemanticType. This is currently used by semantic type based answer value formatting(go/semantic-type-format) before Search Response Meaning(go/srm-design) is ready for use", -"lat_long_value contains a geopoint.", -"measurement_value contains a measurement.", -"No longer used; pending downstream code cleanup.", -"No longer used; pending downstream code cleanup." -], -"type": "string" -} -}, -"type": "object" -}, -"GDocumentBase": { -"description": "Next id: 127", -"id": "GDocumentBase", -"properties": { -"ContentExpiryTime": { -"description": "unix secs from epoch", -"format": "int32", -"type": "integer" -}, -"DisplayUrl": { -"description": "Sometimes the URL displayed in search results should be different from what gets indexed (e.g. in enterprise, content management systems). If this value is not set, we default to the regular URL.", -"type": "string" -}, -"DocId": { -"description": "64-bit docid of the document (usually fingerprint of URL, but not always). WARNING: This does NOT uniquely identify a document ANYMORE. For a unique identifier across all documents in production please refer to the field 'id().key()' listed above.", -"format": "uint64", -"type": "string" -}, -"ExternalFeedMetadata": { -"type": "string" -}, -"ExternalHttpMetadata": { -"description": "Enterprise-specific external metadata. See http://engdoc/eng/designdocs/enterprise/enterprise_indexing_metadata.html", -"type": "string" -}, -"FilterForSafeSearch": { -"deprecated": true, -"description": "Deprecated, do not use, this field is not populated since 2012.", -"format": "int32", -"type": "integer" -}, -"IPAddr": { -"description": "IP addr in binary (allows for IPv6)", -"format": "byte", -"type": "string" -}, -"NoArchiveReason": { -"format": "int32", -"type": "integer" -}, -"NoFollowReason": { -"format": "int32", -"type": "integer" -}, -"NoImageIndexReason": { -"format": "int32", -"type": "integer" -}, -"NoImageframeOverlayReason": { -"format": "int32", -"type": "integer" -}, -"NoIndexReason": { -"description": "When these reasons are set to a non zero value, the document should not be indexed, or show a snippet, or show a cache, etc. These reasons are bit maps of indexing.converter.RobotsInfo.RobotedReasons enum values reflecting the places where the restriction was found: //depot/google3/indexing/converter/proto/converter.proto", -"format": "int32", -"type": "integer" -}, -"NoPreviewReason": { -"format": "int32", -"type": "integer" -}, -"NoSnippetReason": { -"format": "int32", -"type": "integer" -}, -"NoTranslateReason": { -"format": "int32", -"type": "integer" -}, -"Pagerank": { -"deprecated": true, -"description": "This field is long-deprecated in favour of Pagerank_NS, it is no longer maintained and can break at any moment.", -"format": "int32", -"type": "integer" -}, -"PagerankNS": { -"description": "Pagerank-NearestSeeds is a pagerank score for the doc, calculated using NearestSeeds method. This is the production PageRank value teams should use.", -"format": "int32", -"type": "integer" -}, -"Repid": { -"description": " is the webmirror representative id of the canonical url. Urls with the same repid are considered as dups in webmirror. WARNING: use this field with caution! The webmirror duprules change frequently, so this value only reflects the duprules at the time when the canonical's docjoin is built.", -"format": "byte", -"type": "string" -}, -"ScienceMetadata": { -"$ref": "ScienceCitation", -"description": "Citation data for science articles." -}, -"URL": { -"description": "WARNING: the URL does NOT uniquely identify a document ANYMORE. For a unique identifier across all documents in production please refer to the field 'id().key()' listed above. Reason: foo.bar:/http and foo.bar:/http:SMARTPHONE share the same URL, but the body of the two documents might differ because of different crawl-context (desktop vs. smartphone in this example).", -"type": "string" -}, -"URLAfterRedirects": { -"type": "string" -}, -"URLEncoding": { -"description": "See webutil/urlencoding", -"format": "int32", -"type": "integer" -}, -"content": { -"$ref": "GDocumentBaseContent" -}, -"directory": { -"items": { -"$ref": "GDocumentBaseDirectory" -}, -"type": "array" -}, -"ecnFp": { -"description": "96-bit fingerprint of the canonical url's webmirror equivalence class name as of when this cdoc was exported.", -"format": "byte", -"type": "string" -}, -"id": { -"$ref": "IndexingCrawlerIdServingDocumentIdentifier", -"description": "The primary identifier of a production document is the document key given in the ServingDocumentIdentifier, which is the same as the row-key in Alexandria, and represents a URL and its crawling context. In your production code, please always assume that the document key is the only way to uniquely identify a document. ## Recommended way of reading: const string& doc_key = cdoc.doc().id().key(); ## CHECK(!doc_key.empty()); More background information can be found in google3/indexing/crawler_id/servingdocumentidentifier.proto The ServingDocumentIdentifier uniquely identifies a document in serving and also distinguishes between experimental vs. production documents. The SDI is also used as an input for the union/muppet key generation in serving." -}, -"localsearchDocInfo": { -"$ref": "LocalsearchDocInfo", -"description": "Localsearch-specific data." -}, -"oceanDocInfo": { -"$ref": "OceanDocInfo", -"description": "Ocean-specific data." -}, -"originalcontent": { -"$ref": "GDocumentBaseOriginalContent" -}, -"userAgentName": { -"description": "The user agent name used to crawl the URL. See //crawler/engine/webmirror_user_agents.h for the list of user-agents (e.g. crawler::WebmirrorUserAgents::kGoogleBot). NOTE: This field is copied from the first WEBMIRROR FetchReplyClientInfo in trawler_fetch_info column. We leave this field unpopulated if no WEBMIRROR FecthReplyClientInfo is found. As the submission of cl/51488336, Alexandria starts to populate this field. However, docjoins from freshdocs (or any other source), won't have this field populated, because we believe no one needs to read this field from freshdocs docjoins.", -"type": "string" -} -}, -"type": "object" -}, -"GDocumentBaseContent": { -"description": "Main content section", -"id": "GDocumentBaseContent", -"properties": { -"AuthMethod": { -"format": "int32", -"type": "integer" -}, -"ContentLength": { -"description": "The actual length of the content: If Representation is compressed, this equals to Content.UncompressedLength; otherwise it is the length of the representation string.", -"format": "int32", -"type": "integer" -}, -"ContentType": { -"description": "See enum ContentType in //depot/google3/webutil/http/content-type.proto.", -"format": "int32", -"type": "integer" -}, -"CrawlTime": { -"description": "Seconds since Unix epoch.", -"format": "int64", -"type": "string" -}, -"Encoding": { -"description": "See //depot/google3/i18n/encodings/public/encodings.h Encoding of representation", -"format": "int32", -"type": "integer" -}, -"HasHttpHeader": { -"description": "Set to false if Representation does not contain HTTP headers.", -"type": "boolean" -}, -"Language": { -"description": "A Language enum value. See: go/language-enum Default is english", -"format": "int32", -"type": "integer" -}, -"OriginalEncoding": { -"description": "If OriginalEncoding is present, the body part of the Representation was converted to UTF-8, Encoding was set to UTF8, and OriginalEncoding was set to the original encoding before conversion. However, the HTTP headers part of the content might not be valid UTF-8. -1=an invalid value", -"format": "int32", -"type": "integer" -}, -"Representation": { -"description": "Possibly compressed for old documents. It is not compressed for docjoins produced by Raffia after ~2012.", -"format": "byte", -"type": "string" -}, -"UncompressedLength": { -"description": "Historically present if Representation is compressed.", -"format": "int32", -"type": "integer" -}, -"VisualType": { -"description": "Whether the content was visual right-to-left, and if so, what type of visual document it is. Must be one of the values in enum VisualType from google3/repository/rtl/visualtype.h Default is NOT_VISUAL_DOCUMENT. See http://wiki/Main/RtlLanguages for background.", -"format": "int32", -"type": "integer" -}, -"crawledFileSize": { -"description": "Crawled file size of the original document.", -"format": "int32", -"type": "integer" -}, -"encodedGeometryAnnotations": { -"description": "GeometryAnnotations, encoded with GeometryUtil::DeltaEncode() to reduce disk space usage. Use GeometryUtil::DeltaDecode() to decode this field.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"GDocumentBaseDirectory": { -"description": "The Directory proto group holds snippet and title metadata which is made available to the snippet code. The proto group was originally created for metadata coming from the Google Web Directory (gwd) project. It has since come to be used to hold metadata from gwd and other sources. ", -"id": "GDocumentBaseDirectory", -"properties": { -"Category": { -"description": "encoded in UTF8", -"type": "string" -}, -"Description": { -"description": "encoded in UTF8", -"type": "string" -}, -"DescriptionScore": { -"deprecated": true, -"format": "float", -"type": "number" -}, -"Identifier": { -"description": "\"gwd\", etc.", -"type": "string" -}, -"Language": { -"description": "go/language-enum", -"format": "int32", -"type": "integer" -}, -"Title": { -"description": "encoded in UTF8", -"type": "string" -}, -"TitleScore": { -"deprecated": true, -"description": "Deprecated; do not use. There is no code populating these fields as of Oct 2017.", -"format": "float", -"type": "number" -}, -"URL": { -"type": "string" -} -}, -"type": "object" -}, -"GDocumentBaseOriginalContent": { -"description": "The original, unconverted document, typically PDF or Word. Copied from OriginalDoc field of doclogs. Unlike \"Content\", this does not contain any HTTP headers. The content may be compressed using the same method as \"Content\". In practice it is only compressed in the Teragoogle index. It is never compressed in docjoins because those are compressed at the sstable level. In doclogs content will only be compressed if the Trawler fetchreply is also compressed--which is currently never and unlikely to change for performance reasons.", -"id": "GDocumentBaseOriginalContent", -"properties": { -"Representation": { -"type": "string" -}, -"UncompressedLength": { -"description": "present iff rep is compressed", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GenericSnippetResponse": { -"description": "The generic version of a snippet response", -"id": "GenericSnippetResponse", -"properties": { -"debugInfo": { -"description": "Per-doc debug information.", -"items": { -"type": "string" -}, -"type": "array" -}, -"info": { -"$ref": "Proto2BridgeMessageSet", -"description": "Servlet-specific response info." -}, -"rankingSnippet": { -"description": "The experimental ranking snippet field. This will be populated only when `fastwa_want_ranking_snippet` is set in the request.", -"type": "string" -}, -"snippet": { -"description": "Lines of the snippet HTML. Typically gws concatenates these and lets the browser wrap. The values include trailing spaces, so inserting additional spaces is not necessary. However, for very old browsers, gws may insert break tags after each snippet line. This field is confusing and poorly named; \"snippet_line\" would be better. In particular, note that this does not return multiple snippets for a result. Nor are these fields the individual tidbits of the snippet.", -"items": { -"type": "string" -}, -"type": "array" -}, -"title": { -"description": "The title HTML. It may contain tags to denote query term matches. It may be already truncated and \"...\" is put instead (note that truncation does not always happen at the very end of the title text). However the existence of \"...\" does not guarantee that the snippet generation algorithm truncated it; e.g. webmasters themselves can write \"...\".", -"type": "string" -}, -"wwwSnippetResponse": { -"$ref": "WWWSnippetResponse", -"description": "Snippet-specific members (tag ids 16+, must be optional!) Example: optional NewContentResponse new_response;" -} -}, -"type": "object" -}, -"GeoOndemandAssistantSupportedActions": { -"description": "Actions supported by Madden for a local entity.", -"id": "GeoOndemandAssistantSupportedActions", -"properties": { -"allowsGuestCheckout": { -"description": "Whether this local entity allows guest checkout for reservations.", -"type": "boolean" -}, -"isAsynchronousRestaurantReservation": { -"description": "Whether or not this local entity supports asynchronous restaurant reservations, through the above restaurant_reservation_url.", -"type": "boolean" -}, -"restaurantReservationUrl": { -"description": "URL for the Madden restaurant reservation flow, e.g. for display in a WebView. Not populated if restaurant reservations are not supported for the local entity.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAccessPointProto": { -"description": "This class holds information about a single access point. An access point establishes a relationship between a feature (like a POI or building) and some other feature. For example, consider a TYPE_LOCALITY feature like Seattle. An access point might be the TYPE_AIRPORT feature for Seattle-Tacoma International Airport. The airport feature defines the access point to gain airplane-based access to Seattle. A feature like Seattle will typically have multiple access points. You can get to Seattle using airplanes, various forms of public transit, or by driving a car. Thus Seattle would have multiple access points. You may be able to get to Seattle by flying into SeaTac, or you might be able to fly into Boeing Field, or Paine Field in Everett. You could drive in from the North/South using I-5, or you could drive in from the East using I-90. Many access points are from the road network. Thus the access point for some building at 123 Main Street would likely be a segment that defines the 100-200 block of \"Main Street\". A feature at the corner of \"Hollywood\" and \"Vine\" streets might have access points from both named streets. Access points are an optional field. Data editors may ignore them when creating features or editing other fields. In these cases, other quality teams will synthesize and update them. Several fields are also optional, as they are derivable from other fields. Access points to non-TYPE_SEGMENT features should always have the following fields set: - feature_type - feature_id - point Location and reference fields: BASIC vs DERIVABLE Access points to TYPE_SEGMENT features must have all the following BASIC fields: - feature_type (of the segment, e.g. TYPE_ROAD or TYPE_VIRTUAL_SEGMENT) - point_off_segment (or point; see \"fuzzy point\" note below) - unsuitable_travel_mode (may be empty) - level (indoor access points only) The following are DERIVABLE fields, which should only be added if the supplier is confident about their accuracy: - feature_id - point_on_segment - segment_position Editing clients are encouraged to set all fields, but they may set only the BASIC fields, in which case quality teams may use the BASIC fields to snap to an appropriate segment and derive the remaining fields. Example: The segment is split, so that the portion that the access point is on has a new feature ID. Quality teams notice that the point_on_segment is no longer on the segment with feature_id, finds the new nearest segment based on feature_type and existing point_on_segment, and re-derives a new feature_id, point_on_segment, and segment_position, keeping other fields consistent. Fuzzy point special case If the editor does not have side-of-road information for access points or is otherwise unsure of the precise placement of the access point, it may supply the point field (and not point_off_segment) as basic data instead, in which case quality teams may generate the point_off_segment. Identity Access points are considered semantically equivalent if they have the same geometry, including derived fields, and the same references to other features (feature_id, level_feature_id). For the exact definition, see cs/symbol:geostore::AreAccessPointsEquivalent. Field definitions", -"id": "GeostoreAccessPointProto", -"properties": { -"canEnter": { -"description": "RESERVED", -"type": "boolean" -}, -"canExit": { -"description": "RESERVED", -"type": "boolean" -}, -"featureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "The ID of the feature that defines the access point. The bounding box of the feature is expanded to include the bounding box of the feature with the access point in accordance with the standard practice for bucketing map/reduce operations. See the wiki page at http://wiki/Main/OysterBucketingMapReduce for more information. For access points to TYPE_SEGMENT features, this may be re-derived if necessary by looking up the nearest segment to existing geometry." -}, -"featureType": { -"description": "The type of the feature. Required, to allow handling the access point differently based on feature type. For access points to non-TYPE_SEGMENT features, this cached type also makes things easier for clients that aren't running a bucketing map-reduce. For access points to TYPE_SEGMENT features, this is used to find to find the nearest segment of the given type.", -"format": "int32", -"type": "integer" -}, -"levelFeatureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "For indoor access points, this should be set to the level that the access point is on. The feature_id should point to the indoor segment, but when it is missing or invalid, and we need to derive it from geometry, only segments on this level will be considered. For non-indoor access points, level should remain unset, and when we derive feature_id from geometry, only segments not on any level (non-indoor segments) will be considered. The bounding box of the level feature is expanded to include the bounding box of the feature with the access point in accordance with the standard practice for bucketing map/reduce operations. See the wiki page at http://wiki/Main/OysterBucketingMapReduce for more information. (Though in general the feature should reside on the level already anyway..)" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this access point." -}, -"point": { -"$ref": "GeostorePointProto", -"description": "For access points to non-TYPE_SEGMENT features, the location of the access point. For access points to TYPE_SEGMENT features, this can be supplied as a fuzzy access point that is not guaranteed to be on the correct side of road. It should not be used by end clients in case of TYPE_SEGMENT access points." -}, -"pointOffSegment": { -"$ref": "GeostorePointProto", -"description": "If the access point is defined by a TYPE_SEGMENT feature, this is the location of the access point displaced slightly to the correct side of the segment. This offset is in a direction perpendicular to the direction of travel along the segment. The actual offset distance is unspecified. It would typically be relatively small (approximately 1 meter). You can subtract the \"off segment\" point from the \"on segment\" point to get a vector of unknown length pointing from \"on segment\" point to the \"off segment\" point. You can then scale that vector to whatever length you want. Note that extending this displacement vector a large distance (10s of meters) may result in a new point that is in the middle of some other feature (park, street, intersection). This is the preferred basic geometry field for incoming data from editing clients and importers, if side-of-road is well-established." -}, -"pointOnSegment": { -"$ref": "GeostorePointProto", -"description": "If the access point is defined by a TYPE_SEGMENT feature, this is the point on the centerline of the segment that is closest to the actual access point. May be re-derived if necessary to maintain precise placement on segment." -}, -"priority": { -"description": "LINT.ThenChange(//depot/google3/geostore/cleanup/callbacks/\\ ID_DUPLICATE_ACCESS_POINT.cc)", -"enum": [ -"TYPE_PRIMARY", -"TYPE_SECONDARY" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"segmentPosition": { -"description": "If the access point is defined by a TYPE_SEGMENT feature, this is the location of the access point expressed as a fractional distance along the segment. The value runs from 0 to 1 inclusive. May be re-derived if necessary to maintain precise placement on segment.", -"format": "float", -"type": "number" -}, -"unsuitableTravelMode": { -"description": "This list represents the travel modes for which this access-point should be avoided. If this list is empty, the access-point is suitable for any travel mode. If all access points are unsuitable for the current travel mode, client should revert to other heuristics (e.g. feature center). This is only used for access points to TYPE_SEGMENT features; access points to non-TYPE_SEGMENT features, e.g. TYPE_ESTABLISHMENT_POI features with gcid:transit_station GConcepts are just identified by feature_type and feature_id.", -"items": { -"enum": [ -"TRAVEL_MODE_MOTOR_VEHICLE", -"TRAVEL_MODE_AUTO", -"TRAVEL_MODE_TWO_WHEELER", -"TRAVEL_MODE_BICYCLE", -"TRAVEL_MODE_PEDESTRIAN", -"TRAVEL_MODE_PUBLIC_TRANSIT" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreAddressComponentProto": { -"description": "This class represents a parsed field within an address. NOTE: if you add a field to this proto, please update the AreAddressComponentsEquivalent() function in google3/geostore/base/internal/addresscomponent.cc", -"id": "GeostoreAddressComponentProto", -"properties": { -"featureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "The id of the corresponding Feature, if such a feature is defined. As discussed above for feature_type, components of TYPE_FEATURE or TYPE_LANDMARK may have a corresponding feature id." -}, -"featureType": { -"description": "For components of TYPE_FEATURE or TYPE_LANDMARK, this is the feature type (TYPE_COUNTRY, TYPE_LOCALITY, TYPE_ESTABLISHMENT_POI etc.). Note that some features may not actually exist in the geostore (e.g. a village that we've never heard of), in which case the feature_id will be missing but the feature_type is still specified. Please refer to IsValidAddressComponentFeatureType() in google3/geostore/base/public/addresscomponent.h for the definitive list of feature types allowed for the type (either TYPE_FEATURE or TYPE_LANDMARK) of components.", -"format": "int32", -"type": "integer" -}, -"index": { -"description": "The order of this address component relative to the ones that share the same feature_type in the AddressProto. For now, the primary use of this index field is to handle ordering issue of multiple occurrences of AddressComponentProto with feature_type of TYPE_ROUTE (and subtypes), or TYPE_POLITICAL, where the order of the address components matters as there are dependences. 0 is the smallest valid index value, representing the most specific address component. Index value of 1 represents a relatively less specific address component of the same feature_type on which the 0-indexed address component depends.", -"format": "int32", -"type": "integer" -}, -"parsedName": { -"description": "The parsed_name field contains one or more names of an address component. Its actual contents depends on where in the Geo/Google stack you are reading a feature: 1. When an address is initially parsed via a feed or other raw input and structured as an AddressProto, parsed_name should contain the set of names that corresponds to the (possibly normalized) raw text from the raw input. 2. In MapFacts, the address component may be linked to an actual feature via feature_id. Any address formatting directly from MapFacts should follow links to retrieve names when possible. The parsed_name contents may be formatted directly if the address component is unlinked following the same rules as selecting and formatting the name of a feature. The cached parsed_name is regularly refreshed from the linked feature with the minimal set of names for address components (usually just a single, preferred name, in the local language, plus a Latin-script name: go/story-of-ac-names). 3. In serving systems, the names of linked features may be denormalized into the parsed_name field to facilitate quicker address formatting or for simple data filtering (e.g. finding all geocodes in California by name). If reading a feature from such a system, the parsed_name field could contain multiple names in multiple languages that reflect a cached copy of the names associated with the linked features. Formatting of such names should follow the same rules as selecting and formatting the name of a feature itself.", -"items": { -"$ref": "GeostoreNameProto" -}, -"type": "array" -}, -"range": { -"$ref": "GeostoreAddressRangeProto", -"description": "Any numerical address component may optionally be specified as a range. For example if a component of TYPE_STREET_NUMBER has the optional \"range\" attribute, then it represents a range of addresses rather than a single address (see AddressRangeProto for details)." -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to an address component. Never set in MapFacts." -}, -"textAffix": { -"description": "Additional text to append before and/or after the parsed_name, when the address is formatted. Multiple instance should represent translations. Currently, this is only permitted on TYPE_LANDMARK components, and only one instance is permitted.", -"items": { -"$ref": "GeostoreTextAffixProto" -}, -"type": "array" -}, -"type": { -"description": "Every address component has a type. Most address components correspond to one of the feature types defined in FeatureProto, so rather than defining a separate category system here, instead we mark them as TYPE_FEATURE and store the FeatureProto type in the feature_type() field. This is how we handle countries, cities, streets, etc. However, there are a few types of address components that do not have a corresponding feature type (e.g. PO boxes). These components have their type defined here. An address component of TYPE_STREET_NUMBER may correspond to a physical entity that defines a street number, such as a geocoded address or a land parcel. In this case, the address component may have a link to the corresponding feature. A good reference for what types of address components are possible is the xAL standard, which is a published XML schema: http://www.oasis-open.org/committees/ciq/download.shtml. This standard is the basis of the PostalAddress protocol message.", -"enum": [ -"TYPE_FEATURE", -"TYPE_POSTAL_CODE_SUFFIX", -"TYPE_POST_BOX", -"TYPE_STREET_NUMBER", -"TYPE_FLOOR", -"TYPE_ROOM", -"TYPE_HOUSE_ID", -"TYPE_DISTANCE_MARKER", -"TYPE_LANDMARK", -"TYPE_PLUS_CODE" -], -"enumDescriptions": [ -"", -"Address contains a suffix that refines the postal code in this address (e.g. the '+4' portion of a US '5+4' zip code). Postal code suffixes do not point to features.", -"", -"", -"DEPRECATED, use TYPE_FEATURE/TYPE_LEVEL", -"DEPRECATED, use TYPE_FEATURE/TYPE_COMPOUND_SECTION", -"A building number unique relative to some political feature.", -"A component corresponding to a marker for the approximate distance of the address along the route. Distance markers do not point to features and must have a single name that corresponds to the distance measured along the route in the standard units of the locale in which the address is contained (units themselves omitted from the name). The name must be a canonical floating point number in the feature's locale. E.g. in the US, the name representing 1000.5 must be '1,000.5'. Trailing zeros after the decimal and superfluous zeros preceding the number are forbidden.", -"A component representing a reference to some nearby landmark, to aid navigation. Landmark components are distinguished by the fact that they have additional text to relate the feature with this address, for example \"near\" or \"opposite\". This text is stored in the affix field. Landmark components may point to features, but are not required to.", -"Plus codes (go/pluscodes) are short, user readable encodings of latitude and longitude, that are intended as a substitute for street addresses where these do not exist or are not commonly used. Used when the physical address of the feature contains a local plus code, e.g.'PXX7+RW' in 'PXX7+RW Damaturu, Nigeria'." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAddressLinesProto": { -"description": "Represents the unparsed portion of an address with an associated language.", -"id": "GeostoreAddressLinesProto", -"properties": { -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. We place extra restrictions on languages in addition to what the III library requires. See http://go/geo-schema-reference/feature-properties/languages.md", -"type": "string" -}, -"line": { -"description": "These lines are in display order.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreAddressProto": { -"description": "This class represents an address, partial address, or address range. It is intended to be attached to features to identify their address(es). Some important points about addresses: - The addresses in the Geo Schema do *not* include a component for the name of the feature, i.e. they are not self-referential. For example, the name of a feature might be \"Zack's Pizza\" and its address would be \"123 Main Street\". Similarly, streets, cities, and counties do not include themselves as part of their address. The address of \"Seattle\" is \"King County, Washington, USA\". If want to construct an address that *does* include the feature name, you can simply prepend it to the other address components. - Lakes, mountains, and other natural features do not normally have addresses. Countries also do not have addresses because they are at the top of the political hierarchy. - Address components in the Geo Schema are listed in a particular order, independent of the conventions used by the country in which they occur. The basic order is \"smallest to largest\" starting with street numbers and routes, then political features, and ending with postal features. The exact rules are defined by the implementation of the AddressComponentOrdering::IsLessThan() function. - Some types of address components may occur more than once in an address. For example, a UK address with a \"dependent thoroughfare\" would have two components of TYPE_ROUTE (i.e. street names). These are listed in the order they are normally written.", -"id": "GeostoreAddressProto", -"properties": { -"addressLines": { -"description": "The unparsed portion (lines) of the address. An address can have multiple unparsed portions. Multiple unparsed portions sharing the same language should be modeled as one address_lines instance having multiple lines. Historically, we also supported unparsed portions in different languages, but we no longer do. Consequently, only one value is allowed for this field despite the fact that it is repeated. See go/address-lines-multi-language for information about why we made this change. If any components are filled in, this is supplemental to (i.e. disjoint from) them. Furthermore, this must be the most specific portion of the address (except for the portion, if any, stored in the name field of feature.proto). Unparsed lines are always formatted together in a block. Other address components are never formatted between the address lines. This doesn't imply that the address lines are always either the first or the last part of the formatted output.", -"items": { -"$ref": "GeostoreAddressLinesProto" -}, -"type": "array" -}, -"component": { -"description": "A list of parsed address components, e.g. the street, city, etc. An address range is one type of component.", -"items": { -"$ref": "GeostoreAddressComponentProto" -}, -"type": "array" -}, -"crossStreet": { -"deprecated": true, -"description": "** DEPRECATED ** This field is now deprecated (see b/33268032). If you want to store cross street information as part of an address, use the address_lines field.", -"items": { -"$ref": "GeostoreAddressComponentProto" -}, -"type": "array" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this address." -}, -"partialDenormalization": { -"$ref": "GeostoreAddressProto", -"description": "reserved" -}, -"templateId": { -"description": "The opaque ID of the address template that contains rules for structuring this address. The id of the address template can be retrieved using google3/geostore/address_templates/public/address_templates.h", -"type": "string" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to an address. Never set in MapFacts." -} -}, -"type": "object" -}, -"GeostoreAddressRangeProto": { -"description": "This class represents a range of numbers in an address. It is an optional additional field in the 'AddressComponentProto' message. This structure can be used to model both single addresses and address ranges. There are two primary use-cases for address ranges: definitions and references. Ranges are being defined when they are present on the addresses of segment features. Ranges are being referenced when they are present on non-segment features. NOTE: If you add fields in this proto, consider updating the AreAddressRangesEquivalent() function in google3/geostore/base/internal/addressrange.cc", -"id": "GeostoreAddressRangeProto", -"properties": { -"number": { -"description": "Two or more address numbers. Each number represents an address that was mentioned by the data provider.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"parameter": { -"description": "For address range definitions: Two or more interpolation parameter values. The length of this array must match the length of the number array, and each parameter number specifies the position of the corresponding address number. Each value is an interpolation between 0.0 and 1.0 inclusive. The value is proportional to the distance traveled along the segment's polyline starting at its origin. The parameters must be provided in increasing order and the values in the number array must be in strictly increasing or decreasing order. We make an exception for singleton addresses, which are represented as two copies of a (number, parameter) pair, for backwards compatibility. For address range references: This array must be empty.", -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -}, -"prefix": { -"description": "If specified, the prefix or suffix is applied to all numbers in the range. For example, this can be used to indicate that addresses B1 through B99 are on one side of the street, while A1 through A99 are on the other side of the street.", -"type": "string" -}, -"sameParity": { -"description": "If 'same_parity' is true, then all 'number' values must have the same parity (even or odd), and this address range only includes addresses whose parity is the same as the given 'number' values.", -"type": "boolean" -}, -"suffix": { -"type": "string" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to an address range. Never set in MapFacts. Here are some examples: Example #1: Single non-numeric address (e.g., \"Twelve\") At the moment this can only be represented as a street number (with the value in the parsed_name field of the AddressComponentProto). We have future plans to make other changes so we can handle this case. Example #2: Single semi-numeric address (e.g., \"12bis\") The number array contains two copies of the single numeric value (12). The prefix is empty and the suffix contains \"bis\". The parameter array has two identical values specifying the position of the single address. Example #3: Simple address range (e.g., \"100 to 198, even numbers only\") The number array contains the two values \"100\" and \"198\". The prefix and suffix strings are empty in this example. The parameter array has two values, one for each number. The same_parity flag is set in this example." -} -}, -"type": "object" -}, -"GeostoreAnchoredGeometryProto": { -"description": "A feature\u2019s geometry that is populated from the 3D Geometry Store. Please see go/a3d-and-mapfacts for design details.", -"id": "GeostoreAnchoredGeometryProto", -"properties": { -"geometryId": { -"description": "The ID to be used to fetch the feature\u2019s geometry from the 3D Geometry Store.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAppliedSpeedLimitProto": { -"description": "A container for speed limits that allows tagging with a correctness trust level.", -"id": "GeostoreAppliedSpeedLimitProto", -"properties": { -"speedLimit": { -"$ref": "GeostoreSpeedLimitProto", -"description": "The actual speed limit value." -}, -"trustLevel": { -"description": "The level of trust we have in this speed limit value.", -"enum": [ -"SPEED_LIMIT_TRUST_LEVEL_UNKNOWN", -"LOW_QUALITY", -"HIGH_QUALITY", -"EXACT" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in MapFacts.", -"This speed limit is set, but may be completely wrong and potential users should consult external quality metrics before deciding whether to use it.", -"We have high confidence that this is the correct speed limit, and that it's good enough to be shown to users. However, we are not completely confident the value is correct.", -"We are certain that this is the correct speed limit." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAttachmentsAttachmentProto": { -"description": "An AttachmentProto contains structured data of a client-specified type. An attachment is uniquely identified by the combination of its attachment_id and client_name_space fields. ", -"id": "GeostoreAttachmentsAttachmentProto", -"properties": { -"attachmentId": { -"description": "attachment_id distinguishes messages of the same type_id associated with the same feature. It can not be set to 0x0.", -"format": "uint64", -"type": "string" -}, -"clientNameSpace": { -"description": "This field specifies a namespace identifier that can be used to track the sources of attachments in a human friendly format. Name spaces must be at most 64 characters long and must be composed entirely of alphanumeric characters, hyphens, and underscores. No other characters are allowed.", -"type": "string" -}, -"comment": { -"deprecated": true, -"description": "comment is a human-readable string that is logged whenever this attachment is processed by the framework.", -"type": "string" -}, -"messages": { -"$ref": "Proto2BridgeMessageSet", -"description": "messages contains the structured data for this attachment. It should contain a single message with a type ID matching the value of the type_id field below." -}, -"typeId": { -"description": "type_id determines the type of the actual attachment that should be set in the messages MessageSet. It can not be set to 0x0.", -"format": "uint64", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAttributeIdProto": { -"description": "Used to represent the unique id of an attribute.", -"id": "GeostoreAttributeIdProto", -"properties": { -"id": { -"description": "The id of the attribute. Stored as a stripped format of the gcid (e.g. \"foo\" instead of \"gcid:att_foo\").", -"type": "string" -}, -"providerId": { -"description": "Set because it's required, but not really meaningful in geostore (always set to \"Geo\").", -"type": "string" -}, -"type": { -"enum": [ -"ITEMCLASS", -"ATTRIBUTE", -"VALUESPACE", -"DATASTORE" -], -"enumDescriptions": [ -"deprecated", -"", -"deprecated", -"deprecated" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAttributeProto": { -"description": "Protocol buffer for attaching attributes and values to instances. This is for assigning a particular attribute and value to a repository item, not for metadata. For protocol buffers that represents metadata about attributes and values, see CanonicalAttribute in itemclass.proto and ValueSpace in valuespace.proto.", -"id": "GeostoreAttributeProto", -"properties": { -"applicationData": { -"$ref": "Proto2BridgeMessageSet", -"deprecated": true -}, -"attributeDisplay": { -"deprecated": true, -"items": { -"$ref": "GeostoreAttributeValueDisplayProto" -}, -"type": "array" -}, -"booleanValue": { -"type": "boolean" -}, -"canonicalAttributeId": { -"$ref": "GeostoreAttributeIdProto", -"description": "The canonical attribute for this attribute instance." -}, -"doubleValue": { -"format": "double", -"type": "number" -}, -"enumIdValue": { -"description": "For those attribute ids that expect their values to be taken from an enumeration-style set of values, that value's gcid should be stored here, e.g. \"gcid:attval_yes\".", -"type": "string" -}, -"floatValue": { -"format": "float", -"type": "number" -}, -"int64Value": { -"format": "int64", -"type": "string" -}, -"integerValue": { -"format": "int32", -"type": "integer" -}, -"itemClassId": { -"$ref": "GeostoreAttributeIdProto", -"deprecated": true -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this attribute" -}, -"protoValue": { -"$ref": "Proto2BridgeMessageSet", -"deprecated": true, -"description": "Fully qualified package name because genprotohdf uses genproto for this proto2 syntax: https://wiki.corp.google.com/twiki/bin/view/Main/Proto2WithGenproto" -}, -"stringValue": { -"description": "The attribute value falls into one of these fields, based on value_type:", -"type": "string" -}, -"uint32Value": { -"format": "uint32", -"type": "integer" -}, -"valueDisplay": { -"deprecated": true, -"description": "Used to store language-specific names of this attribute's value (e.g. a translation into another language).", -"items": { -"$ref": "GeostoreAttributeValueDisplayProto" -}, -"type": "array" -}, -"valueSpaceId": { -"$ref": "GeostoreAttributeIdProto", -"deprecated": true -}, -"valueType": { -"enum": [ -"NONE", -"STRING", -"INTEGER", -"DOUBLE", -"BOOLEAN", -"PROTO_VALUE", -"INT64", -"FLOAT", -"DISPLAY_ONLY", -"UINT32", -"ENUM_ID" -], -"enumDeprecated": [ -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"The value comes from an enumerated attribute valuespace." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreAttributeValueDisplayProto": { -"description": "Used to help display language-specific names of attributes.", -"id": "GeostoreAttributeValueDisplayProto", -"properties": { -"language": { -"type": "string" -}, -"synonym": { -"type": "string" -} -}, -"type": "object" -}, -"GeostoreBarrierLogicalMaterialProto": { -"id": "GeostoreBarrierLogicalMaterialProto", -"properties": { -"material": { -"items": { -"enum": [ -"UNKNOWN_LOGICAL_MATERIAL", -"CONCRETE", -"METAL", -"PLASTIC", -"STONE", -"TIMBER" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreBestLocaleProto": { -"description": "A BestLocaleProto holds information about the best-match locale for a feature. Clients may use this information to determine the appropriate local name of a feature.", -"id": "GeostoreBestLocaleProto", -"properties": { -"locale": { -"$ref": "GeostoreFeatureIdProto", -"description": "The ID of the best-match TYPE_LOCALE feature for this feature." -}, -"localizationPolicyId": { -"description": "The ID of the localization policy to apply when selecting a name for a feature. This field should always be set. If feature_id is also defined, this field should have the same localization policy ID as the referenced locale feature. Localization policy IDs are arbitrary identifiers (up to some number of bytes; see geostore/base/public/constants.h) that uniquely distinguish a set of language-selection rules.", -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this best locale." -} -}, -"type": "object" -}, -"GeostoreBizBuilderReferenceProto": { -"description": "The reference to a BizBuilder listing. For details on BizBuilder see http://g3doc/commerce/bizbuilder/backend/g3doc/index.md", -"id": "GeostoreBizBuilderReferenceProto", -"properties": { -"id": { -"description": "Listing id. Used in queries to BizBuilder backend for listing access.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreBorderProto": { -"description": "A border represents a line of division between two features of the same type (i.e. United States and Mexico, but not California and Mexico). Borders are only used for features that tile an area. For example, country features have borders with one another because they tile an area of land. Country features do not have borders with province features because those two types of features may intersect with each other. The geometry of a border will often be similar (or derived from) the geometry of the two features that it separates. However, it is useful to have borders represented by stand-alone features for map-styling purposes. Ideally, the geometry in a border feature would be exactly the same as the common edges of the polygonal geometry of the two features. This may not always be true in practice. At some point in the future we would like to build a network of borders for features that are supposed to tile with each other. The network would be composed of different border types meeting at endpoint intersections. In the process of building this network, we would perform small geometry corrections to ensure that the borders align properly at all zoom levels. Border features are intended primarily for map drawing, and they would rarely be useful for geocoding. One exception would be for famous borders like the \"Mason Dixon Line\" or the \"Berlin Wall.\" The standard feature properties have the following interpretations: name - Borders rarely have names unless they notable in their own right (e.g. \"Mason Dixon Line\", \"Berlin Wall\"). point - A border should not have point geometry. polyline - A border should have a single polyline that represents the division between the two features. polygon - A border should not have polygon geometry.", -"id": "GeostoreBorderProto", -"properties": { -"featureIdLeft": { -"$ref": "GeostoreFeatureIdProto", -"description": "The ids of the area features to the left and right of the border, relative to the start and end of this borders' polyline geometry. These features should have the same type as the \"type\" attribute above. These ids are not required because the corresponding features may be nonexistent or difficult to obtain." -}, -"featureIdRight": { -"$ref": "GeostoreFeatureIdProto" -}, -"logicalBorder": { -"description": "The logical borders which this border is a part of.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"overrideStatus": { -"description": "List of border status overrides. Due to legal reasons, we may be required to display some borders differently on some domains for instance.", -"items": { -"$ref": "GeostoreOverrideBorderStatusProto" -}, -"type": "array" -}, -"status": { -"description": "The border status identifies the legal status of the border line.", -"enum": [ -"STATUS_NORMAL", -"STATUS_DISPUTED", -"STATUS_UNSURVEYED", -"STATUS_INTERNATIONAL_WATER", -"STATUS_NEVER_DISPLAY", -"STATUS_TREATY", -"STATUS_PROVISIONAL", -"STATUS_NO_LABEL" -], -"enumDescriptions": [ -"Most border lines have this status, which typically means both parties agree on the location of the line. These might be called \"de jure\" borders.", -"This status is used when the two parties disagree on the location of the line. There can be multiple border lines in a given disputed area. One might be the line proposed by country \"A\" and another the line proposed by country \"B\". A third line might mark the de facto line of control. Other border lines might indicate historical borders, e.g., \"1949 Armistice Line\".", -"This is used for one section of border between Argentina and Chile. Both parties agree that a border line exists somewhere on the glacier, but the exact location has not been determined.", -"This is a border line between a country and international water.", -"This status is only used for the borders that should never display to users. Typically it is for the borders that we don't want to represent but that we would still like to use to model country boundaries, e.g. Hong Kong and Macau.", -"This is used for borders that refer to well-defined boundaries which have been established by a specific treaty, agreement, armistice or other such agreement between two or more parties, yet the position and/or status is not considered to be legal and final (e.g., the final demarcation of the exact position has not occurred or the final political status is not resolved).", -"This is used for borders for which a formal agreement has not been established between the parties involved, yet the existing line acts as a de facto functional border without existence of any dispute.", -"This is used for borders which should not have country labels to either side, but should otherwise be styled the same as a border with STATUS_NORMAL. This is typically for borders which are not themselves disputed but still form part of the boundary around a disputed area that is not otherwise modeled or labeled as a country, or as an override where we are obligated to remove labels for region specific versions of Maps." -], -"type": "string" -}, -"type": { -"description": "The type of the features this border separates. Should always be a subtype of TYPE_POLITICAL. NOTE: as of December 2019, we currently require this to be equal to TYPE_COUNTRY or TYPE_ADMINISTRATIVE_AREA1. In the future, we may support TYPE_BORDER for lower types of political features.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreBoundingMarkerProto": { -"description": "NOTE: BoundingMarkerProto could be compared against one another by canonicalizing them via GetCanonicalBoundingMarker() in google3/geostore/base/internal/lane.cc. Any fields that don't contribute to the definition of a bounding marker in the real world should be bundled with the annotative fields near the bottom and excluded in GetCanonicalBoundingMarker(). LINT.IfChange", -"id": "GeostoreBoundingMarkerProto", -"properties": { -"boundingMarker": { -"$ref": "GeostoreFeatureIdProto", -"description": "References to any gcid:physical_lane_marker features that bound this lane or lane connection." -}, -"boundingMarkerToken": { -"description": "A token that can be used to identify the version of the data about this bounding marker.", -"type": "string" -}, -"flowlineAdjacencyBeginFraction": { -"description": "Which part of the flowline does this association refer to? These should be between 0 and 1. These are optionally set, but can be approximated geometrically if they aren\u2019t set. NOTE: These refer to the geometry of this feature.", -"format": "double", -"type": "number" -}, -"flowlineAdjacencyEndFraction": { -"format": "double", -"type": "number" -}, -"markerAdjacencyBeginFraction": { -"description": "Which part of the marker track does this association refer to? These should be between 0 and 1. These are optionally set, but can be approximated geometrically if they aren\u2019t set. NOTE: These refer to the geometry of the marker feature.", -"format": "double", -"type": "number" -}, -"markerAdjacencyEndFraction": { -"format": "double", -"type": "number" -}, -"side": { -"description": "Which side of the flowline does the marker occur on.", -"enum": [ -"UNKNOWN", -"LEFT", -"RIGHT" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreBuildingProto": { -"description": "This protocol buffer holds the building-specific attributes for features of type TYPE_COMPOUND_BUILDING.", -"id": "GeostoreBuildingProto", -"properties": { -"baseHeightMetersAgl": { -"description": "The height of the base of this building, in meters above ground-level, if known.", -"format": "float", -"type": "number" -}, -"defaultDisplayLevel": { -"$ref": "GeostoreFeatureIdProto", -"description": "The level in this building that should get displayed by default. If present, the default display level must be one of this building's levels that are listed in the level[] field, and if a level is set as a default level of one building, all buildings sharing the level should have that same level as their default level. If not present, clients should not display any level by default for that building." -}, -"floors": { -"description": "The number of floors above the base of the building, if known. For example a regular 1-story building would set this to \"1\". Use a value of GeostoreConstants::kDefaultHeightPerFloor when converting \"floors\" to \"height_meters\".", -"format": "int32", -"type": "integer" -}, -"floorsMetadata": { -"$ref": "GeostoreFieldMetadataProto" -}, -"heightMeters": { -"description": "The height of the building above its base, in meters, if known.", -"format": "float", -"type": "number" -}, -"heightMetersMetadata": { -"$ref": "GeostoreFieldMetadataProto" -}, -"level": { -"description": "The levels in this building, in no particular order. These levels refer back to the building via another strong reference (the LevelProto.building field).", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"structure": { -"description": "\"Structure\" denotes a physical architecture of the building that is readily visible. This attribute is useful in that rarer structures can make good landmarks.", -"enum": [ -"STRUCTURE_ANY", -"STRUCTURE_TOWER", -"STRUCTURE_DOME", -"STRUCTURE_CASTLE", -"STRUCTURE_SHRINE", -"STRUCTURE_TEMPLE", -"STRUCTURE_TANK" -], -"enumDescriptions": [ -"ABSTRACT", -"The root of all structure categories. Not a meaningful value and should never be set on any feature.", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreBusinessChainProto": { -"description": "This holds data specific to business chain features.", -"id": "GeostoreBusinessChainProto", -"properties": { -"canonicalGconcepts": { -"description": "Canonical GConcepts describe the ideal state of the GConcepts of this business chain's members.", -"items": { -"$ref": "GeostoreCanonicalGConceptProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreBusinessHoursProto": { -"description": "A BusinessHoursProto stores a weekly schedule of opening hours for a business (represented as a BusinessHours message) together with other closely related information that is Geo-specific.", -"id": "GeostoreBusinessHoursProto", -"properties": { -"data": { -"$ref": "BusinessHours", -"description": "The actual hours represented by this BusinessHoursProto." -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for these hours." -} -}, -"type": "object" -}, -"GeostoreCallToActionProto": { -"description": "Message containing calls to action specified by the business owner.", -"id": "GeostoreCallToActionProto", -"properties": { -"ctaType": { -"description": "Required.", -"enum": [ -"CTA_TYPE_UNSPECIFIED", -"CTA_TYPE_BOOK", -"CTA_TYPE_BUY", -"CTA_TYPE_ORDER_ONLINE", -"CTA_TYPE_LEARN_MORE", -"CTA_TYPE_SIGN_UP", -"CTA_TYPE_GET_OFFER" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"url": { -"$ref": "GeostoreUrlProto" -} -}, -"type": "object" -}, -"GeostoreCanonicalGConceptProto": { -"description": "This proto represents a canonical gconcept of a business chain's members.", -"id": "GeostoreCanonicalGConceptProto", -"properties": { -"gconcept": { -"$ref": "GeostoreGConceptInstanceProto" -}, -"isRequired": { -"description": "Whether the gconcept must be on a member. This must be true for a primary gconcept.", -"type": "boolean" -} -}, -"type": "object" -}, -"GeostoreCellCoveringProto": { -"description": "This protocol buffer holds S2 cell covering for the feature. See util/geometry/s2cell_union.h for more information on S2 cells. See geostore/base/public/cellcovering.h for utility functions.", -"id": "GeostoreCellCoveringProto", -"properties": { -"cellId": { -"description": "Array of S2 cell ids that represent the covering. There is no preset limit on how many cells can be used.", -"items": { -"format": "uint64", -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCityJsonProto": { -"description": "CityJsonProto is a custom proto representation of the portion of the CityJSON spec (https://www.cityjson.org/) relevant to internal projects. See go/cityjsonproto-design for more information about the modeling and design decisions implemented here. LINT.IfChange", -"id": "GeostoreCityJsonProto", -"properties": { -"appearance": { -"$ref": "GeostoreCityJsonProtoAppearance", -"description": "Additional information that can be used to describe the appearance of CityObjects in this CityJsonProto." -}, -"cityObjects": { -"description": "City objects associated with this CityJsonProto.", -"items": { -"$ref": "GeostoreCityJsonProtoCityObject" -}, -"type": "array" -}, -"flattenedVertices": { -"description": "Vertices as local coordinates represented as a flattened list: [x1,y1,z1,x2,y2,z2,x3,y3,z3]. Vertices are relative to a local coordinate system and rounded to their nearest integer value. See `transform` for how vertices can be transformed from a local coordinate system into an Earth-centered, Earth-fixed coordinate system. Vertices are flattened for space efficiency, memory locality, and processing performance. To access the x,y,z coordinates of vertex N, read the values at indices 3N, 3N+1, and 3N+2.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"transform": { -"$ref": "GeostoreCityJsonProtoTransform", -"description": "Spec for converting vertices from a local coordinate system in arbitrary units to ECEF coordinates in meters (https://en.wikipedia.org/wiki/Earth-centered,_Earth-fixed_coordinate_system)." -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoAppearance": { -"description": "Contains additional ways to describe the appearance of a CityObject, e.g. definitions of materials and textures that can apply to geometry surfaces.", -"id": "GeostoreCityJsonProtoAppearance", -"properties": { -"materials": { -"description": "Predefined materials that may be referenced from geometric primitives.", -"items": { -"$ref": "GeostoreCityJsonProtoAppearanceMaterial" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoAppearanceMaterial": { -"description": "Representation of a single material that can describe a geometry surface. See technical definitions of these fields via the description and link at https://www.cityjson.org/specs/2.0.0/#material-object.", -"id": "GeostoreCityJsonProtoAppearanceMaterial", -"properties": { -"diffuseColor": { -"$ref": "GeostoreCityJsonProtoAppearanceMaterialRgbColor", -"description": "The color that this material shows under pure white light when incoming light is reflected in all directions equally." -}, -"isSmooth": { -"description": "Whether this material has an even, regular surface or consistency.", -"type": "boolean" -}, -"name": { -"description": "The name of this material.", -"type": "string" -}, -"shininess": { -"description": "The degree to which this material reflects light; value is [0,1].", -"format": "float", -"type": "number" -}, -"transparency": { -"description": "The degree to which this material allows light through; value is [0,1], with 1.0 being completely transparent.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoAppearanceMaterialRgbColor": { -"description": "RGB 0-1, where the range of each value is [0,1] instead of [0,255]. Values may be linear RGB or sRGB; RGB values are frequently stored as sRGB (https://stackoverflow.com/a/12894053 explains the difference).", -"id": "GeostoreCityJsonProtoAppearanceMaterialRgbColor", -"properties": { -"blue": { -"format": "float", -"type": "number" -}, -"green": { -"format": "float", -"type": "number" -}, -"red": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObject": { -"description": "Representation of an object with geometry.", -"id": "GeostoreCityJsonProtoCityObject", -"properties": { -"geometries": { -"description": "Geometries associated with this object.", -"items": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometry" -}, -"type": "array" -}, -"id": { -"description": "The ID of this CityObject, which must be unique within this CityJsonProto. The ID does not have a meaning outside this CityJsonProto.", -"type": "string" -}, -"type": { -"description": "Type of object represented, e.g. building.", -"enum": [ -"TYPE_UNSPECIFIED", -"BUILDING", -"OTHER_CONSTRUCTION" -], -"enumDescriptions": [ -"Should never be set in prod. If set locally and converted to CityJSON, the CityJSON will be invalid.", -"Building.", -"Unspecified other structure, e.g. storefront." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometry": { -"description": "Representation of geometry including geometric primitives which are used as building blocks to construct geometries of varying complexity. Geometries vary both in type and in level-of-detail, enabling representation of any shape at any level of granularity. All geometries are ultimately composed of `MultiPoint`s, which reference the actual vertices. Only linear and planar shapes are allowed, no curves or parametric surfaces.", -"id": "GeostoreCityJsonProtoCityObjectGeometry", -"properties": { -"lod": { -"description": "Level-of-detail (LoD) indicates how intricate the geometric representation is. May be a single digit per CityGML standards or X.Y per TU Delft (visual depiction at https://3d.bk.tudelft.nl/lod/). `CityObject`s may have multiple geometries with the same LoD.", -"type": "string" -}, -"multipoint": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometryMultiPoint", -"description": "For geometries consisting of a single point, line, or loop." -}, -"multisurface": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometryMultiSurface", -"description": "For geometries consisting of a collection of surfaces." -}, -"semantics": { -"description": "Predefined semantics that may be referenced from geometric primitives.", -"items": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometrySemantic" -}, -"type": "array" -}, -"solid": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometrySolid", -"description": "For geometries consisting of a watertight 3D shape." -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometryMaterialSpec": { -"description": "Used to reference a predefined material from a geometric primitive.", -"id": "GeostoreCityJsonProtoCityObjectGeometryMaterialSpec", -"properties": { -"materialsIndex": { -"description": "Index into CityJsonProto.appearance.materials.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometryMultiPoint": { -"description": "A single line, loop, or set of points.", -"id": "GeostoreCityJsonProtoCityObjectGeometryMultiPoint", -"properties": { -"xIndices": { -"description": "A collection of references to vertices in `CityJsonProto.flattened_vertices`. `CityJsonProto.flattened_vertices` is a flattened list of vertex coordinates. A value in `x_indices` should be the index of the x-coordinate of the desired vertex V; the full coordinates of vertex V can then be found at indices [V,V+1,V+2]. For example, an `x_indices` containing 3, 0, and 12 references three vertices, whose complete x,y,z coordinates can respectively be found at `CityJsonProto.flattened_vertices` indices [3,4,5], [0,1,2], and [12,13,14].", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometryMultiSurface": { -"description": "A collection of arbitrary surfaces that have no prescribed topological relationship.", -"id": "GeostoreCityJsonProtoCityObjectGeometryMultiSurface", -"properties": { -"surfaces": { -"description": "All surfaces are standalone entities with no relationship to each other.", -"items": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometrySurface" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometrySemantic": { -"description": "Representation of semantic information that can be used for reasoning about geometric primitives.", -"id": "GeostoreCityJsonProtoCityObjectGeometrySemantic", -"properties": { -"type": { -"description": "The type of semantic entity this geometric primitive is.", -"enum": [ -"TYPE_UNSPECIFIED", -"WINDOW", -"DOOR" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometrySolid": { -"description": "A 3D shape, composed of a watertight exterior shell with optional interior watertight shells.", -"id": "GeostoreCityJsonProtoCityObjectGeometrySolid", -"properties": { -"shells": { -"description": "The first shell is exterior; any additional are interior.", -"items": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometryMultiSurface" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoCityObjectGeometrySurface": { -"description": "A polygonal surface, composed of a closed exterior loop with optional closed interior loops.", -"id": "GeostoreCityJsonProtoCityObjectGeometrySurface", -"properties": { -"loops": { -"description": "The first loop is exterior; any additional are interior.", -"items": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometryMultiPoint" -}, -"type": "array" -}, -"materialSpecs": { -"description": "The materials this surface is made of. May be left blank if materials are unspecified for this surface. IMPORTANT: This field must not contain more than one MaterialSpec unless CityJSON's material \"theme\" is incorporated into CityJsonProto (see discussion in go/cityjson-for-hand-modeled-landmarks).", -"items": { -"$ref": "GeostoreCityJsonProtoCityObjectGeometryMaterialSpec" -}, -"type": "array" -}, -"semanticsIndex": { -"description": "The semantics of this surface (e.g. what part of a building it is) represented as an index into the containing Geometry's `semantics` field. May be left blank if semantics are unspecified for this surface.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoTransform": { -"description": "Information for transforming a point from an \"old\" to \"new\" coordinate frame. Applied as follows: vertex_new = scale * vertex_old + translate", -"id": "GeostoreCityJsonProtoTransform", -"properties": { -"scale": { -"description": "Relative scale of the vertices in the new coordinate system relative to the old coordinate system. Applies to all three x,y,z coordinates.", -"format": "double", -"type": "number" -}, -"translate": { -"$ref": "GeostoreCityJsonProtoTransformTranslate", -"description": "Offset of each vertex in the new coordinate system relative to the old coordinate system." -} -}, -"type": "object" -}, -"GeostoreCityJsonProtoTransformTranslate": { -"description": "How the object will be moved along the x, y, and z axes, respectively.", -"id": "GeostoreCityJsonProtoTransformTranslate", -"properties": { -"x": { -"format": "double", -"type": "number" -}, -"y": { -"format": "double", -"type": "number" -}, -"z": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreComposableItemProto": { -"description": "Generic item proto. This is intended to have only certain aspects filled (e.g. photo only, name + price). Valid combinations of properties are enforced by linters.", -"id": "GeostoreComposableItemProto", -"properties": { -"callToAction": { -"$ref": "GeostoreCallToActionProto", -"description": "Call to action for the individual product." -}, -"jobMetadata": { -"$ref": "GeostoreJobMetadata" -}, -"media": { -"description": "Any photos describing this item.", -"items": { -"$ref": "GeostoreMediaItemProto" -}, -"type": "array" -}, -"nameInfo": { -"description": "The repeated name_info field is for price list sections listed in multiple languages. At least one name_info containing id must be specified. There should be at most one name_info for any given language. When representing a job item, there should be exactly one name_info specified.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -}, -"offered": { -"description": "Represents if an item is offered at a business. For TYPE_JOB, this represents if this job is offered by the corresponding business", -"enum": [ -"OFFERED_UNSPECIFIED", -"OFFERED", -"OFFERED_NOT", -"OFFERED_ON_WEBSITE" -], -"enumDescriptions": [ -"Merchant says nothing", -"Merchant explicitly says this is offered", -"Merchant explicitly says this is NOT offered", -"Merchant mentioned this job on their websites" -], -"type": "string" -}, -"price": { -"$ref": "GeostorePriceRangeProto", -"description": "Price of the item. There should be at most one price for any given currency." -}, -"priceFormat": { -"description": "Represents which price format is being used by this item, which determines the usage/meaning of the \u201cprice\u201d field above. Optional \u2013 the default value is legal and safe (represents no price if the \u201cprice\u201d field is unset).", -"enum": [ -"PRICE_FORMAT_DEFAULT", -"PRICE_FORMAT_VARIES" -], -"enumDescriptions": [ -"Default price format which uses the PriceRangeProto (set or unset) in the \"price\" field above to represent a price interval, or lack thereof.", -"Value for a price which explicitly varies, i.e. it should show as \u201cPrice may vary\" or similar. If this is the price_format, contents of the \"price\" field will be ignored." -], -"type": "string" -}, -"rankingHint": { -"description": "Numerical score which can be provided by data sources to indicate preferred item ordering. This is purely a hint \u2013 we are not required to followed it if we have a different order we think is better. Higher scores represent items that should be shown more prominently/earlier. Optional.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreCountComparisonProto": { -"description": "A count value tagged with a comparison operator. This can be used for axle count, trailer count, etc.", -"id": "GeostoreCountComparisonProto", -"properties": { -"comparisonOperator": { -"enum": [ -"UNSPECIFIED", -"EQUAL", -"LESS_THAN", -"LESS_THAN_OR_EQUAL", -"GREATER_THAN", -"GREATER_THAN_OR_EQUAL" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"count": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreCrossingStripePatternProto": { -"description": "Possible patterns of a crossing stripe (any element that denotes a point on a segment or lane at which the vehicle must stop or yield). These include crosswalks, stop, and yield lines.", -"id": "GeostoreCrossingStripePatternProto", -"properties": { -"borderLine": { -"$ref": "GeostorePhysicalLineProto" -}, -"borderPattern": { -"enum": [ -"UNKNOWN_BORDER_PATTERN", -"NO_BORDER_PATTERN", -"SOLID", -"DASHED" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"color": { -"description": "Colors found on this crossing.", -"items": { -"$ref": "GeostorePaintedElementLogicalColorProto" -}, -"type": "array" -}, -"stripePattern": { -"enum": [ -"UNKNOWN_STRIPE_PATTERN", -"NO_STRIPE_PATTERN", -"LONGITUDINAL_STRIPE", -"DIAGONAL_STRIPE", -"LATERAL_STRIPE", -"SINGLE_CROSSING_LINE", -"DOUBLE_CROSSING_LINE", -"TRIANGLE_CROSSING_LINE_POINTING_LEFT", -"TRIANGLE_CROSSING_LINE_POINTING_RIGHT", -"STRUCTURED_CROSSING_LINE" -], -"enumDescriptions": [ -"", -"For crosswalk No crossing stripes. Usually comes with the bordered crosswalk.", -"", -"", -"", -"A crossing line consisting of a single stripe e.g. a stop line.", -"A crossing line consisting of a double stripe.", -"A crossing line made of repeating triangles e.g. a California yield line. The teeth are always facing the left of the crossing line.", -"A crossing line made of repeating triangles e.g. a California yield line. The teeth are always facing the right of the crossing line.", -"The crossing line has a more complex structure than just a single conceptual stripe or row of symbols." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreCurvatureProto": { -"id": "GeostoreCurvatureProto", -"properties": { -"pointCurvature": { -"description": "Curvature values at points along the flowline. A linear interpolation between two successive points will yield the curvature value at intermediate points.", -"items": { -"$ref": "GeostorePointCurvatureProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCurveConnectionProto": { -"description": "Protocol buffer describing a curve that connects two externally specified endpoints.", -"id": "GeostoreCurveConnectionProto", -"properties": { -"bezierParams": { -"$ref": "GeostoreCurveConnectionProtoBezierParams" -}, -"circleParams": { -"$ref": "GeostoreCurveConnectionProtoCircleParams" -}, -"type": { -"enum": [ -"UNSPECIFIED", -"BEZIER", -"CIRCLE", -"STRAIGHT_EDGE" -], -"enumDescriptions": [ -"The connection is unspecified. Rendering code will rely on heuristics to choose one.", -"Bezier curve interpolates the two endpoints.", -"Endpoints are connected via a circle arc.", -"Endpoints are connected by a straight edge." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreCurveConnectionProtoBezierParams": { -"id": "GeostoreCurveConnectionProtoBezierParams", -"properties": { -"controlPoint": { -"description": "Internal Bezier handles. One can be used for a quadratic curve, two for cubic Beziers.", -"items": { -"$ref": "GeostoreCurveConnectionProtoBezierParamsControlPoint" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreCurveConnectionProtoBezierParamsControlPoint": { -"id": "GeostoreCurveConnectionProtoBezierParamsControlPoint", -"properties": { -"angleDegrees": { -"description": "We use this parameterization to make curves change predictable when endpoints move. Each point P is defined in terms of the straight edge [S, E] between the start point of the curve S and its end point E. *P / / / S *------------* E Counter-clockwise angle between vector SE and vector SP.", -"format": "double", -"type": "number" -}, -"distanceMultiplier": { -"description": "Distance(S, P) in units of Distance(S, E).", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreCurveConnectionProtoCircleParams": { -"id": "GeostoreCurveConnectionProtoCircleParams", -"properties": { -"radius": { -"description": "Arc radius. Must be greater than half-distance between two endpoints.", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreDataSourceProto": { -"description": "Every data source used to construct a data repository has an associated feature that provides more information about it. The standard feature properties have the following interpretations: bound - The bounds must includes all features that refer to this data source, so that bucketing MapReduce passes work correctly. name - The provider name associated with this data source. It is expected to remain constant from release to release, and between datasets. address - should be empty. point, polyline, polygon - should be empty. source_info - should not be set. child - should be empty.", -"id": "GeostoreDataSourceProto", -"properties": { -"attributionUrl": { -"description": "This is the URL of a website representing this DataSource as a whole. If this DataSource feature is specific to a particular dataset or product, the page may contain information relevant to that dataset or product or may be the main page of the organization.", -"items": { -"$ref": "GeostoreUrlProto" -}, -"type": "array" -}, -"copyrightOwner": { -"description": "A UTF8 string that will be inserted in copyright messages to refer to this copyright owner, e.g. \"Tele Atlas\".", -"type": "string" -}, -"copyrightYear": { -"description": "The copyright year of this data (which may be different than the year of the release date), e.g. 2005.", -"format": "int32", -"type": "integer" -}, -"description": { -"description": "A free-form description of this data source. Ideally the description should include: - Where the data was obtained (URL, company name, individual, etc). - Where to find detailed documentation. - A brief summary of the licensing terms. - As much internal and external contact information as possible (e.g. who to ask about licensing questions, interpreting the data, updating the data, fixing bugs in the importer, etc).", -"type": "string" -}, -"importerBuildInfo": { -"description": "The build information of the importer binary used to generate this data source.", -"type": "string" -}, -"importerBuildTarget": { -"description": "The build target of the importer binary used to generate this data source.", -"type": "string" -}, -"importerClientInfo": { -"description": "The Perforce client information of the importer binary used to generate this data source.", -"type": "string" -}, -"importerMpmVersion": { -"description": "If the importer was built as an MPM, the version number can be stored in this field. As with build_info, this can be useful when tracking down issues that may be due to the use of a particular binary.", -"type": "string" -}, -"importerTimestamp": { -"description": "The timestamp of the importer binary used to generate this data source.", -"type": "string" -}, -"provider": { -"description": "The provider type of this data source.", -"enum": [ -"PROVIDER_ANY", -"PROVIDER_UNKNOWN", -"PROVIDER_NAVTEQ", -"PROVIDER_TELE_ATLAS", -"PROVIDER_TELE_ATLAS_MULTINET", -"PROVIDER_TELE_ATLAS_CODEPOINT", -"PROVIDER_TELE_ATLAS_GEOPOST", -"PROVIDER_TELE_ATLAS_DATAGEO", -"PROVIDER_TELE_ATLAS_ADDRESS_POINTS", -"PROVIDER_TELCONTAR", -"PROVIDER_EUROPA", -"PROVIDER_ROYAL_MAIL", -"PROVIDER_GOOGLE", -"PROVIDER_GOOGLE_HAND_EDIT", -"PROVIDER_GOOGLE_BORDERS", -"PROVIDER_GOOGLE_SUBRANGE", -"PROVIDER_GOOGLE_GT_FUSION", -"PROVIDER_GOOGLE_ZAGAT_CMS", -"PROVIDER_GOOGLE_PLACE_NAVBOOST", -"PROVIDER_GOOGLE_FOOTPRINT", -"PROVIDER_GOOGLE_PRODUCT_TERMS", -"PROVIDER_GOOGLE_POINTCARDS", -"PROVIDER_GOOGLE_BUSINESS_CHAINS", -"PROVIDER_GOOGLE_LOCAL_SUMMARIZATION", -"PROVIDER_GOOGLE_PRONUNCIATIONS", -"PROVIDER_GOOGLE_DUMPLING", -"PROVIDER_GOOGLE_DISTILLERY", -"PROVIDER_GOOGLE_LOCAL_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_RELATION_MINER", -"PROVIDER_GOOGLE_MAPSPAM", -"PROVIDER_GOOGLE_ROSE", -"PROVIDER_GOOGLE_LOCAL_PLACE_RATINGS", -"PROVIDER_GOOGLE_WIPEOUT", -"PROVIDER_GOOGLE_KNOWLEDGE_GRAPH", -"PROVIDER_GOOGLE_BEEGEES", -"PROVIDER_GOOGLE_REVIEW_SUMMARIZATION", -"PROVIDER_GOOGLE_OFFLINE_NON_CORE_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_GEO_WORLDMAPS", -"PROVIDER_GOOGLE_GEO_MODERATION", -"PROVIDER_GOOGLE_OYSTER_AUTO_EDITS", -"PROVIDER_GOOGLE_LOCAL_ALCHEMY", -"PROVIDER_GOOGLE_KEROUAC", -"PROVIDER_GOOGLE_MOBRANK", -"PROVIDER_GOOGLE_RAPTURE", -"PROVIDER_GOOGLE_CULTURAL_INSTITUTE", -"PROVIDER_GOOGLE_GEOCODES_FROM_LOCAL_FEEDS", -"PROVIDER_GOOGLE_ATTRIBUTES_FROM_CRAWLED_CHAINS", -"PROVIDER_GOOGLE_TACTILE_MAPS", -"PROVIDER_GOOGLE_MAPS_FOR_MOBILE", -"PROVIDER_GOOGLE_GEO_REALTIME", -"PROVIDER_GOOGLE_PROMINENT_PLACES", -"PROVIDER_GOOGLE_PLACE_ACTIONS", -"PROVIDER_GOOGLE_GT_AUTO_EDITS", -"PROVIDER_GOOGLE_WAZE", -"PROVIDER_GOOGLE_ONTHEGO", -"PROVIDER_GOOGLE_GT_IMPORT", -"PROVIDER_GOOGLE_STRUCTURED_DATA", -"PROVIDER_GOOGLE_HELICOPTER", -"PROVIDER_GOOGLE_ROLLBACK", -"PROVIDER_GOOGLE_RIGHTS_REPAIR", -"PROVIDER_GOOGLE_PERFUME", -"PROVIDER_GOOGLE_MAPS_TRANSLATION", -"PROVIDER_GOOGLE_CALL_ME_MAYBE", -"PROVIDER_GOOGLE_LOCAL_UNIVERSAL", -"PROVIDER_GOOGLE_CROUPIER", -"PROVIDER_GOOGLE_SKYSMART", -"PROVIDER_GOOGLE_RIDDLER", -"PROVIDER_GOOGLE_ROADCLOSURES", -"PROVIDER_GOOGLE_SPORE", -"PROVIDER_GOOGLE_LOCALIZATION", -"PROVIDER_GOOGLE_CATTERMS", -"PROVIDER_GOOGLE_GT_FIELD_OPS", -"PROVIDER_GOOGLE_MATCHMAKER", -"PROVIDER_GOOGLE_ARBITRATION", -"PROVIDER_GOOGLE_BIZBUILDER_OPS", -"PROVIDER_GOOGLE_LOCAL_INVENTORY_ADS", -"PROVIDER_GOOGLE_GT_DRAFTY", -"PROVIDER_GOOGLE_HOTELADS_OPS", -"PROVIDER_GOOGLE_MARKERS", -"PROVIDER_GOOGLE_STATE_MACHINE", -"PROVIDER_GOOGLE_ATTRIBUTES_INFERENCE", -"PROVIDER_GOOGLE_BIKESHARE", -"PROVIDER_GOOGLE_GHOSTWRITER", -"PROVIDER_GOOGLE_EDIT_PLATFORM", -"PROVIDER_GOOGLE_BLUE_GINGER", -"PROVIDER_GOOGLE_GEO_TIGER", -"PROVIDER_GOOGLE_HYADES", -"PROVIDER_GOOGLE_WEBQUARRY", -"PROVIDER_GOOGLE_GEO_MADDEN", -"PROVIDER_GOOGLE_ANDROID_PAY", -"PROVIDER_GOOGLE_OPENING_HOURS_TEAM", -"PROVIDER_GOOGLE_LOCAL_DISCOVERY", -"PROVIDER_GOOGLE_LOCAL_HEALTH", -"PROVIDER_GOOGLE_UGC_MAPS", -"PROVIDER_GOOGLE_FIBER", -"PROVIDER_GOOGLE_REVGEO", -"PROVIDER_GOOGLE_HOTELADS_PARTNER_FRONT_END", -"PROVIDER_GOOGLE_GEO_UGC_TASKS", -"PROVIDER_GOOGLE_GEOCODING", -"PROVIDER_GOOGLE_SPYGLASS", -"PROVIDER_GOOGLE_PLUS_CODES_AS_ADDRESSES", -"PROVIDER_GOOGLE_GEO_CHANGES", -"PROVIDER_GOOGLE_HUME", -"PROVIDER_GOOGLE_MEGAMIND", -"PROVIDER_GOOGLE_GT_ROADSYNTH", -"PROVIDER_GOOGLE_FIREBOLT", -"PROVIDER_GOOGLE_LOCAL_PLACE_OFFERINGS", -"PROVIDER_GOOGLE_UGC_SERVICES", -"PROVIDER_GOOGLE_GEOALIGN", -"PROVIDER_GOOGLE_GT_COMPOUNDS", -"PROVIDER_GOOGLE_FOOD_ORDERING", -"PROVIDER_GOOGLE_HOTEL_KNOWLEDGE_OPS", -"PROVIDER_GOOGLE_URAW", -"PROVIDER_GOOGLE_FLYEYE", -"PROVIDER_GOOGLE_YOUKE", -"PROVIDER_GOOGLE_GT_ZEPHYR", -"PROVIDER_GOOGLE_USER_SAFETY", -"PROVIDER_GOOGLE_ADDRESS_MAKER", -"PROVIDER_GOOGLE_UGC_PHOTOS", -"PROVIDER_GOOGLE_GT_WINDCHIME", -"PROVIDER_GOOGLE_SNAG_FIXER", -"PROVIDER_GOOGLE_GEO_DEALS", -"PROVIDER_GOOGLE_LOCAL_PLACE_TOPICS", -"PROVIDER_GOOGLE_PROPERTY_INSIGHTS", -"PROVIDER_GOOGLE_GEO_CONSUMER_MERCHANT_EXPERIMENTS", -"PROVIDER_GOOGLE_GEO_PORTKEY", -"PROVIDER_GOOGLE_ROAD_MAPPER", -"PROVIDER_GOOGLE_LOCATION_PLATFORM", -"PROVIDER_GOOGLE_POSTTRIP", -"PROVIDER_GOOGLE_TRAVEL_DESTINATION", -"PROVIDER_GOOGLE_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_BIZBUILDER_CLEANUP", -"PROVIDER_GOOGLE_USER", -"PROVIDER_GOOGLE_STATION", -"PROVIDER_GOOGLE_GEO_FOOD", -"PROVIDER_GOOGLE_GEO_AR", -"PROVIDER_GOOGLE_GEO_TEMPORAL", -"PROVIDER_GOOGLE_SERVICES_MARKETPLACE", -"PROVIDER_GOOGLE_IMT_CLEANUP", -"PROVIDER_GOOGLE_GEO_FOOD_MENU", -"PROVIDER_GOOGLE_CARENAV", -"PROVIDER_GOOGLE_DRIVING_FEEDS", -"PROVIDER_GOOGLE_DRIVING_UGC", -"PROVIDER_GOOGLE_POLAR", -"PROVIDER_GOOGLE_TRIWILD", -"PROVIDER_GOOGLE_CROWD_COMPUTE_OPS", -"PROVIDER_GOOGLE_SA_FROM_WEB", -"PROVIDER_GOOGLE_POI_ALIGNMENT", -"PROVIDER_GOOGLE_SA_FROM_HULK", -"PROVIDER_GOOGLE_SERVICES_INTERACTIONS", -"PROVIDER_GOOGLE_ROADS_UGC_EDITOR", -"PROVIDER_GOOGLE_SA_FROM_NG_INFERENCE", -"PROVIDER_GOOGLE_GEO_DRIVING_VIZ", -"PROVIDER_GOOGLE_GEO_TASKING", -"PROVIDER_GOOGLE_CROWDTASK_DATACOMPUTE", -"PROVIDER_GOOGLE_CROWDTASK_TASKADS", -"PROVIDER_GOOGLE_CROWDTASK_TASKMATE", -"PROVIDER_GOOGLE_CROWDTASK_FURBALL", -"PROVIDER_GOOGLE_CROWDTASK_ADAP", -"PROVIDER_GOOGLE_GPAY", -"PROVIDER_GOOGLE_GEO_UGC_TRUSTED_USERS", -"PROVIDER_GOOGLE_THIRD_PARTY_DATA_PRODUCTION", -"PROVIDER_GOOGLE_GEOTRACKER", -"PROVIDER_GOOGLE_LOCAL_LANDMARK_INFERENCE", -"PROVIDER_GOOGLE_GEO_CLOSED_LOOP", -"PROVIDER_GOOGLE_SA_FROM_MERCHANT_POSTS", -"PROVIDER_GOOGLE_CORE_DATA_RIGHTS", -"PROVIDER_GOOGLE_SA_FROM_USER_REVIEWS", -"PROVIDER_GOOGLE_GEO_CONTENT_FIXER", -"PROVIDER_GOOGLE_POLYGON_REFINEMENT", -"PROVIDER_GOOGLE_HANASU", -"PROVIDER_GOOGLE_FULLRIGHTS_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_FULLRIGHTS_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_ATTRIBUTION_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_SA_FROM_FOOD_MENUS", -"PROVIDER_GOOGLE_GT_CONSISTENCY_EDITS", -"PROVIDER_GOOGLE_SA_QUALITY", -"PROVIDER_GOOGLE_GDCE_CLEANUP", -"PROVIDER_GOOGLE_UGC_QUALITY_CHAINS", -"PROVIDER_GOOGLE_ATTRIBUTES_DISCOVERY", -"PROVIDER_GOOGLE_GEO_LDE", -"PROVIDER_GOOGLE_GEO_SIGNAL_TRACKING", -"PROVIDER_GOOGLE_UGC_AGGREGATION", -"PROVIDER_GOOGLE_3D_BASEMAP", -"PROVIDER_GOOGLE_MAPFACTS_PRIVACY", -"PROVIDER_GOOGLE_GT_ALF", -"PROVIDER_GOOGLE_GT_OPERATOR_PROVENANCE", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS", -"PROVIDER_GOOGLE_GT_LANE_AUTOMATION", -"PROVIDER_GOOGLE_GEO_NG_LOCAL", -"PROVIDER_GOOGLE_MAPFACTS_CLEANUP", -"PROVIDER_GOOGLE_THIRD_PARTY_UGC", -"PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", -"PROVIDER_GOOGLE_VACATION_RENTAL_PARTNERS", -"PROVIDER_GOOGLE_FEED_PROCESSOR_ROAD_INCIDENTS", -"PROVIDER_GOOGLE_DYNAMIC_BASEMAP", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS_EMEA", -"PROVIDER_GOOGLE_LOCALSEARCH", -"PROVIDER_GOOGLE_TRANSIT", -"PROVIDER_GOOGLE_GEOWIKI", -"PROVIDER_GOOGLE_CHINA_LOCAL_TEAM", -"PROVIDER_GOOGLE_SYNTHESIZED", -"PROVIDER_GOOGLE_INTERNAL_TEST", -"PROVIDER_GOOGLE_DISPUTED_AREAS", -"PROVIDER_GOOGLE_3DWAREHOUSE", -"PROVIDER_GOOGLE_GROUNDS_BUILDER", -"PROVIDER_GOOGLE_SESAME", -"PROVIDER_GOOGLE_GT", -"PROVIDER_GOOGLE_GT_BASEMAP_UPLOAD", -"PROVIDER_GOOGLE_ADSDB", -"PROVIDER_GOOGLE_MACHINE_TRANSLITERATION", -"PROVIDER_GOOGLE_TRAVELSEARCH", -"PROVIDER_GOOGLE_PANORAMIO", -"PROVIDER_GOOGLE_YOUTUBE", -"PROVIDER_GOOGLE_OLD", -"PROVIDER_GOOGLE_STREETVIEW", -"PROVIDER_GOOGLE_STREETVIEW_BIZVIEW", -"PROVIDER_GOOGLE_ZIPIT", -"PROVIDER_GOOGLE_OYSTER_CONNECT_ROUTES", -"PROVIDER_GOOGLE_GOLDEN", -"PROVIDER_GOOGLE_INNERSPACE", -"PROVIDER_GOOGLE_MAPSEARCH", -"PROVIDER_GOOGLE_CATEGORIES_TEAM", -"PROVIDER_GOOGLE_CROWDSENSUS", -"PROVIDER_GOOGLE_LOCAL_ALGORITHMIC_IDENTITY", -"PROVIDER_GOOGLE_FREEBASE", -"PROVIDER_GOOGLE_HOTELADS", -"PROVIDER_GOOGLE_AUTHORITY_PAGES", -"PROVIDER_GOOGLE_PLACES_API", -"PROVIDER_GOOGLE_NAMEHEATMAP", -"PROVIDER_GOOGLE_MAPMAKER", -"PROVIDER_GOOGLE_MAPMAKER_MOBILE", -"PROVIDER_GOOGLE_MAPMAKER_PANCAKE", -"PROVIDER_GOOGLE_MAPMAKER_V2", -"PROVIDER_GOOGLE_LOCAL_CLUSTERING_OPERATOR_OVERRIDE", -"PROVIDER_GOOGLE_SERVED_ON_MAPMAKER", -"PROVIDER_GOOGLE_GT_LOCAL", -"PROVIDER_GOOGLE_GT_LOCAL_WITH_RIGHTS", -"PROVIDER_GOOGLE_LOGS_RANKING_SIGNALS", -"PROVIDER_GOOGLE_ENTITY_NAVBOOST", -"PROVIDER_GOOGLE_RELATED_PLACES", -"PROVIDER_GOOGLE_KNOWN_FOR_TERMS", -"PROVIDER_GOOGLE_SYNTHETIC_AREAS", -"PROVIDER_GOOGLE_AUTHORITY_PAGE_PHOTOS", -"PROVIDER_GOOGLE_CROSS_STREETS", -"PROVIDER_GOOGLE_CORRIDORS", -"PROVIDER_GOOGLE_BICYCLE_RENTAL", -"PROVIDER_GOOGLE_CONCRETE_URLS", -"PROVIDER_GOOGLE_LEANBACK", -"PROVIDER_GOOGLE_LOCKED_LISTINGS", -"PROVIDER_GOOGLE_MONITORING", -"PROVIDER_GOOGLE_SPROUT", -"PROVIDER_GOOGLE_LOCAL_SEARCH_QUALITY", -"PROVIDER_GOOGLE_GOBY", -"PROVIDER_GOOGLE_PROBLEM_REPORT", -"PROVIDER_GOOGLE_CANDID", -"PROVIDER_GOOGLE_BIZBUILDER", -"PROVIDER_AUTOMOTIVE_NAVIGATION_DATA", -"PROVIDER_MAPDATA_SCIENCES", -"PROVIDER_MAPONICS", -"PROVIDER_SKI_RESORTS", -"PROVIDER_ZENRIN", -"PROVIDER_SANBORN", -"PROVIDER_URBAN_MAPPING", -"PROVIDER_US_GOVERNMENT", -"PROVIDER_US_CENSUS", -"PROVIDER_US_POSTAL_SERVICE", -"PROVIDER_US_GEOLOGICAL_SURVEY", -"PROVIDER_US_GNIS", -"PROVIDER_US_LANDSAT", -"PROVIDER_US_NATIONAL_GEOSPATIAL_INTELLIGENCE_AGENCY", -"PROVIDER_US_NGA_GNS", -"PROVIDER_US_SSIBL", -"PROVIDER_US_BUREAU_OF_TRANSPORTATION_STATISTICS", -"PROVIDER_US_NATIONAL_OCEANIC_AND_ATMOSPHERIC_ADMINISTRATION", -"PROVIDER_US_POLAR_GEOSPATIAL_CENTER", -"PROVIDER_US_DEPARTMENT_OF_AGRICULTURE", -"PROVIDER_US_NPI_REGISTRY", -"PROVIDER_US_BUREAU_OF_INDIAN_AFFAIRS", -"PROVIDER_DMTI_SPATIAL", -"PROVIDER_INTERNATIONAL_HYDROGRAPHIC_ORGANIZATION", -"PROVIDER_MAPLINK", -"PROVIDER_KINGWAY", -"PROVIDER_GEOCENTRE", -"PROVIDER_CN_NATIONAL_FOUNDAMENTAL_GIS", -"PROVIDER_CN_MAPABC", -"PROVIDER_SMITHSONIAN_INSTITUTE", -"PROVIDER_TRACKS_FOR_AFRICA", -"PROVIDER_PPWK", -"PROVIDER_LEADDOG", -"PROVIDER_CENTRE_DONNEES_ASTRONOMIQUES_STRASBOURG", -"PROVIDER_GISRAEL", -"PROVIDER_BASARSOFT", -"PROVIDER_MAPINFO", -"PROVIDER_MAPIT", -"PROVIDER_GEOBASE", -"PROVIDER_ORION", -"PROVIDER_CENTRAL_EUROPEAN_DATA_AGENCY", -"PROVIDER_ANASAT", -"PROVIDER_MINED_POSTCODES", -"PROVIDER_DMAPAS", -"PROVIDER_COMMON_LOCALE_DATA_REPOSITORY", -"PROVIDER_CH_SBB", -"PROVIDER_SKENERGY", -"PROVIDER_GBRMPA", -"PROVIDER_KOREA_POST", -"PROVIDER_CN_AUTONAVI", -"PROVIDER_MINED_POI", -"PROVIDER_ML_INFOMAP", -"PROVIDER_SNOOPER", -"PROVIDER_GEOSISTEMAS", -"PROVIDER_AFRIGIS", -"PROVIDER_TRANSNAVICOM", -"PROVIDER_EASYCONNECT", -"PROVIDER_LANTMATERIET", -"PROVIDER_LOGICA", -"PROVIDER_MAPKING", -"PROVIDER_DIANPING", -"PROVIDER_GEONAV", -"PROVIDER_HEIBONSHA", -"PROVIDER_DEUTSCHE_TELEKOM", -"PROVIDER_LINGUISTIC_DATA_CONSORTIUM", -"PROVIDER_ACXIOM", -"PROVIDER_DUN_AND_BRADSTREET", -"PROVIDER_FEDERAL_AVIATION_ADMINISTRATION", -"PROVIDER_INFOUSA", -"PROVIDER_INFOUSA_NIXIE", -"PROVIDER_THOMSON_LOCAL", -"PROVIDER_TELEFONICA_PUBLICIDAD_E_INFORMACION", -"PROVIDER_WIKIPEDIA", -"PROVIDER_INFOBEL", -"PROVIDER_MX_GOVERNMENT", -"PROVIDER_MX_NATIONAL_INSTITUTE_STATISTICS_GEOGRAPHY", -"PROVIDER_MX_SERVICIO_POSTAL_MEXICANO", -"PROVIDER_TELEGATE", -"PROVIDER_TELELISTAS", -"PROVIDER_MAPCITY", -"PROVIDER_EXPLAINER_DC", -"PROVIDER_DAIKEI", -"PROVIDER_NL_CHAMBER_OF_COMMERCE", -"PROVIDER_KOREA_INFO_SERVICE", -"PROVIDER_WIKITRAVEL", -"PROVIDER_FLICKR", -"PROVIDER_DIANCO", -"PROVIDER_VOLT_DELTA", -"PROVIDER_SG_GOVERNMENT", -"PROVIDER_SG_LAND_TRANSPORT_AUTHORITY", -"PROVIDER_MAPBAR", -"PROVIDER_LONGTU", -"PROVIDER_SA_GOVERNMENT", -"PROVIDER_SA_SAUDI_POST", -"PROVIDER_PEAKLIST", -"PROVIDER_LOCAL_BUSINESS_CENTER", -"PROVIDER_LOCAL_FEED_XML", -"PROVIDER_WEB", -"PROVIDER_RAILS_TO_TRAILS", -"PROVIDER_INDIACOM", -"PROVIDER_INFOMEDIA", -"PROVIDER_PICASA", -"PROVIDER_AT_GOVERNMENT", -"PROVIDER_AT_BUNDESAMT_FUR_EICH_UND_VERMESSUNGSWESEN", -"PROVIDER_AT_NATIONAL_TOURIST_OFFICE", -"PROVIDER_AT_AUSTRIA_POST", -"PROVIDER_NO_GOVERNMENT", -"PROVIDER_NO_NORSK_EIENDOMSINFORMASJON", -"PROVIDER_NO_POSTEN_NORGE_AS", -"PROVIDER_CH_GOVERNMENT", -"PROVIDER_CH_SWISS_POST", -"PROVIDER_CH_SWISSTOPO", -"PROVIDER_CH_SWISS_NATIONAL_PARK", -"PROVIDER_NAVIT", -"PROVIDER_GEOSEARCH", -"PROVIDER_DE_GOVERNMENT", -"PROVIDER_BUNDESAMT_KARTOGRAPHIE_UND_GEODASIE", -"PROVIDER_BUNDESNETZAGENTUR", -"PROVIDER_SCHOBER_GROUP", -"PROVIDER_MIREO", -"PROVIDER_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WEBSTER_TEXAS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_AMHERST_MASSACHUSETTS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BLOOMINGTON_INDIANA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PASADENA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_CHULA_VISTA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_TEMPE_ARIZONA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_COLUMBUS_OHIO", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PORTAGE_MICHIGAN", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GEORGETOWN_KENTUCKY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GREENVILLE_SOUTH_CAROLINA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_NASHVILLE_TENNESSEE", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WASHINGTON_DISTRICT_OF_COLUMBIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BOULDER_COLORADO", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY_ENVIRONMENT_BAY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY_BIELSKO_BIALA", -"PROVIDER_DE_PUBLIC_MUNICIPALITY", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_FRANKFURT", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_HAMBURG", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_KARLSRUHE", -"PROVIDER_PT_PUBLIC_MUNICIPALITY", -"PROVIDER_PT_PUBLIC_MUNICIPALITY_SANTA_CRUZ", -"PROVIDER_AT_PUBLIC_MUNICIPALITY", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_KLAGENFURT", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_LINZ", -"PROVIDER_ES_PUBLIC_MUNICIPALITY", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_AZKOITIA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_BEASAIN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_GIRONA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_SAN_SEBASTIAN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_CATALUNYA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_HONDARRIBIA", -"PROVIDER_AU_PUBLIC_MUNICIPALITY", -"PROVIDER_AU_PUBLIC_MUNICIPALITY_LAUNCESTON_TASMANIA", -"PROVIDER_IS_PUBLIC_MUNICIPALITY", -"PROVIDER_IS_PUBLIC_MUNICIPALITY_REYKJAVIK", -"PROVIDER_NL_PUBLIC_MUNICIPALITY", -"PROVIDER_NL_PUBLIC_MUNICIPALITY_AMELSTEVEEN", -"PROVIDER_BE_PUBLIC_MUNICIPALITY", -"PROVIDER_BE_PUBLIC_MUNICIPALITY_ANTWERPEN", -"PROVIDER_CA_PUBLIC_MUNICIPALITY", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_FREDERICTON_NEW_BRUNSWICK", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_KAMLOOPS_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_NANAIMO_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_BANFF_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_CALGARY_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_TORONTO_ONTARIO", -"PROVIDER_SE_PUBLIC_MUNICIPALITY", -"PROVIDER_SE_PUBLIC_MUNICIPALITY_UMEA", -"PROVIDER_UA_PUBLIC_MUNICIPALITY", -"PROVIDER_UA_PUBLIC_MUNICIPALITY_KHARKIV", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY_AQUA_CALIENTE_CAHUILLA_INDIANS", -"PROVIDER_FR_PUBLIC_MUNICIPALITY", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_PONT_AUDEMER", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_BORDEAUX", -"PROVIDER_SG_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY_RIO_DE_JANEIRO", -"PROVIDER_MAPCUBE", -"PROVIDER_3D_REALITYMAPS", -"PROVIDER_DEUTSCHES_ZENTRUM_FUR_LUFT_UND_RAUMFAHRT", -"PROVIDER_3D_CITIES_SOCIEDADE_ANONIMA", -"PROVIDER_DISNEY", -"PROVIDER_CYBERCITY", -"PROVIDER_PRECISION_LIGHTWORKS_MODELWORKS", -"PROVIDER_VIRTUAL_HUNGARY_LIMITED", -"PROVIDER_VIRTUEL_CITY", -"PROVIDER_SCREAMPOINT_INTERNATIONAL", -"PROVIDER_AGENTSCHAP_VOOR_GEOGRAFISCHE_INFORMATIE_VLAANDEREN", -"PROVIDER_FR_GOVERNMENT", -"PROVIDER_FR_INSTITUT_GEOGRAPHIQUE_NATIONAL", -"PROVIDER_FR_CADASTRE", -"PROVIDER_DIADIEM", -"PROVIDER_THE_WEATHER_CHANNEL", -"PROVIDER_COWI", -"PROVIDER_FALKPLAN_ANDES", -"PROVIDER_NL_GOVERNMENT", -"PROVIDER_NL_KADASTER", -"PROVIDER_NL_BOARD_OF_TOURISM_AND_CONVENTIONS", -"PROVIDER_DIGITAL_MAP_PRODUCTS", -"PROVIDER_SILICE_DIGITAL", -"PROVIDER_TYDAC", -"PROVIDER_ALBRECHT_GOLF", -"PROVIDER_HEALTH_CH", -"PROVIDER_VISITDENMARK", -"PROVIDER_FLYHERE", -"PROVIDER_DIGITAL_DATA_SERVICES", -"PROVIDER_MECOMO", -"PROVIDER_ZA_GOVERNMENT", -"PROVIDER_ZA_RURAL_DEVELOPMENT_LAND_REFORM", -"PROVIDER_SENSIS", -"PROVIDER_JJCONNECT", -"PROVIDER_OPPLYSNINGEN", -"PROVIDER_TELLUS", -"PROVIDER_IQONIA", -"PROVIDER_BE_GOVERNMENT", -"PROVIDER_BE_NATIONAAL_GEOGRAFISCH_INSTITUUT", -"PROVIDER_BE_BRUSSELS_MOBILITY", -"PROVIDER_YELLOWMAP_AG", -"PROVIDER_STIFTUNG_GESUNDHEIT", -"PROVIDER_GIATA", -"PROVIDER_SANPARKS", -"PROVIDER_CENTRE_DINFORMATIQUE_POUR_LA_REGION_BRUXELLOISE", -"PROVIDER_INFOPORTUGAL", -"PROVIDER_NEGOCIOS_DE_TELECOMUNICACOES_E_SISTEMAS_DE_INFORMACAO", -"PROVIDER_COLLINS_BARTHOLOMEW", -"PROVIDER_PROTECT_PLANET_OCEAN", -"PROVIDER_KARTTAKESKUS", -"PROVIDER_FI_GOVERNMENT", -"PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION", -"PROVIDER_FI_NATIONAL_LAND_SURVEY", -"PROVIDER_FI_STATISTICS_FINLAND", -"PROVIDER_GB_GOVERNMENT", -"PROVIDER_GB_ORDNANCE_SURVEY", -"PROVIDER_NATURAL_ENGLAND", -"PROVIDER_WELSH_GOVERNMENT", -"PROVIDER_GB_OFFICE_FOR_NATIONAL_STATISTICS", -"PROVIDER_EPSILON", -"PROVIDER_PARTNER_FRONT_END", -"PROVIDER_CARTESIA", -"PROVIDER_SE_GOVERNMENT", -"PROVIDER_SE_TRAFIKVERKET", -"PROVIDER_SE_NATURVARDSVERKET", -"PROVIDER_IE_GOVERNMENT", -"PROVIDER_IE_ORDNANCE_SURVEY_IRELAND", -"PROVIDER_LU_GOVERNMENT", -"PROVIDER_LU_P_AND_T_LUXEMBOURG", -"PROVIDER_LU_ADMINISTRATION_DU_CADASTRE_ET_DE_LA_TOPOGRAPHIE", -"PROVIDER_LU_NATIONAL_TOURIST_OFFICE", -"PROVIDER_MAPFLOW", -"PROVIDER_TKARTOR", -"PROVIDER_JUMPSTART", -"PROVIDER_EPTISA", -"PROVIDER_MC_GOVERNMENT", -"PROVIDER_MC_PRINCIPAUTE_DE_MONACO", -"PROVIDER_MONOLIT", -"PROVIDER_ENVIRONMENTAL_SYSTEMS_RESEARCH_INSTITUTE", -"PROVIDER_MODIS", -"PROVIDER_GEOX", -"PROVIDER_GEODIRECTORY", -"PROVIDER_GEOPLAN", -"PROVIDER_INFODIREKT", -"PROVIDER_GEOGLOBAL", -"PROVIDER_DEUTSCHE_POST", -"PROVIDER_TRACASA", -"PROVIDER_CORREOS", -"PROVIDER_ES_GOVERNMENT", -"PROVIDER_ES_CENTRO_NACIONAL_DE_INFORMACION_GEOGRAFICA", -"PROVIDER_EDIMAP", -"PROVIDER_VERIZON", -"PROVIDER_NATIONAL_GEOGRAPHIC_MAPS", -"PROVIDER_PROMAPS", -"PROVIDER_CONSODATA", -"PROVIDER_DE_AGOSTINI", -"PROVIDER_FEDERPARCHI", -"PROVIDER_NAVIGO", -"PROVIDER_ITALIAMAPPE", -"PROVIDER_CZECOT", -"PROVIDER_NATURAL_EARTH", -"PROVIDER_REGIO", -"PROVIDER_SHIPWRECK_CENTRAL", -"PROVIDER_RUTGERS_STATE_UNIVERSITY", -"PROVIDER_TWINICE", -"PROVIDER_NORTHERN_IRELAND_TOURIST_BOARD", -"PROVIDER_INFOGROUP", -"PROVIDER_TNET", -"PROVIDER_CTT_CORREIOS_DE_PORTUGAL", -"PROVIDER_EUROPARC", -"PROVIDER_IUPPITER", -"PROVIDER_MICHAEL_BAUER_INTERNATIONAL", -"PROVIDER_LEPTON", -"PROVIDER_MAPPOINT", -"PROVIDER_GEODATA", -"PROVIDER_RU_GOVERNMENT", -"PROVIDER_RU_FNS_KLADR", -"PROVIDER_BR_GOVERNMENT", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DO_MEIO_AMBIENTE_E_DOS_RECURSOS_NATURAIS_RENOVAVEIS", -"PROVIDER_BR_MINISTERIO_DO_MEIO_AMBIENTE", -"PROVIDER_BR_AGENCIA_NACIONAL_DE_AGUAS", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DE_GEOGRAFIA_E_ESTATISTICA", -"PROVIDER_BR_FUNDACAO_NACIONAL_DO_INDIO", -"PROVIDER_BR_DEPARTAMENTO_NACIONAL_DE_INFRAESTRUTURA_DE_TRANSPORTES", -"PROVIDER_AZAVEA", -"PROVIDER_NORTHSTAR", -"PROVIDER_COMMEDI", -"PROVIDER_NEXUS_GEOGRAFICS", -"PROVIDER_INFOERA", -"PROVIDER_AD_GOVERNMENT", -"PROVIDER_AD_AREA_DE_CARTOGRAFIA", -"PROVIDER_MAXXIMA", -"PROVIDER_SI_GOVERNMENT", -"PROVIDER_SI_AGENCY_FOR_ENVIRONMENT", -"PROVIDER_TRANSPORT_HI_TECH_CONSULTANTS", -"PROVIDER_L1_TECHNOLOGIES", -"PROVIDER_TELEMEDIA", -"PROVIDER_CDCOM_PROGOROD", -"PROVIDER_MIT_CITYGUIDE", -"PROVIDER_SUNCART", -"PROVIDER_MICROMAPPER", -"PROVIDER_RICHI", -"PROVIDER_FORUM44", -"PROVIDER_SEAT", -"PROVIDER_VALASSIS", -"PROVIDER_NAVICOM", -"PROVIDER_COLTRACK", -"PROVIDER_PSMA_AUSTRALIA", -"PROVIDER_PT_DUTA_ASTAKONA_GIRINDA", -"PROVIDER_CA_GOVERNMENT", -"PROVIDER_STATISTICS_CANADA", -"PROVIDER_TOCTOC", -"PROVIDER_RMSI", -"PROVIDER_TRUE_TECHNOLOGY", -"PROVIDER_INCREMENT_P_CORPORATION", -"PROVIDER_GOJAVAS", -"PROVIDER_GEOINFORMATION_GROUP", -"PROVIDER_CYBERSOFT", -"PROVIDER_TSENTR_EFFEKTIVNYKH_TEKHNOLOGIY", -"PROVIDER_EE_GOVERNMENT", -"PROVIDER_EE_MAA_AMET", -"PROVIDER_GASBUDDY", -"PROVIDER_DK_GOVERNMENT", -"PROVIDER_DK_GEODATASTYRELSEN", -"PROVIDER_MURCIA_REGION_GOVERNMENT", -"PROVIDER_CORREIOS", -"PROVIDER_WEST_WORLD_MEDIA", -"PROVIDER_INTERNATIONAL_MAPPING_ASSOCIATION", -"PROVIDER_MEDICARE", -"PROVIDER_POLARIS", -"PROVIDER_TW_GOVERNMENT", -"PROVIDER_TW_MINISTRY_OF_THE_INTERIOR_SURVEYING_AND_MAPPING_CENTER", -"PROVIDER_NORDECA", -"PROVIDER_AFRIMAPPING", -"PROVIDER_OVERDRIVE", -"PROVIDER_PROVIDER_NETWORK_DIRECTORIES", -"PROVIDER_BR_MINISTERIO_DA_SAUDE", -"PROVIDER_DIGITAL_EGYPT", -"PROVIDER_INRIX", -"PROVIDER_ARPINDO", -"PROVIDER_IT_GOVERNMENT", -"PROVIDER_ISTITUTO_GEOGRAFICO_MILITARE", -"PROVIDER_EAST_END_GROUP", -"PROVIDER_INGEOLAN", -"PROVIDER_SEMACONNECT", -"PROVIDER_BLINK", -"PROVIDER_EVGO", -"PROVIDER_CHARGEPOINT", -"PROVIDER_TPL_TRAKKER", -"PROVIDER_OI", -"PROVIDER_MAPARADAR", -"PROVIDER_SINGAPORE_POST", -"PROVIDER_CHARGEMASTER", -"PROVIDER_TESLA", -"PROVIDER_VISICOM", -"PROVIDER_GEOLYSIS", -"PROVIDER_ZEPHEIRA", -"PROVIDER_HUBJECT", -"PROVIDER_PODPOINT", -"PROVIDER_CHARGEFOX", -"PROVIDER_KR_GOVERNMENT", -"PROVIDER_KR_MOLIT", -"PROVIDER_KR_MINISTRY_OF_THE_INTERIOR_AND_SAFETY", -"PROVIDER_CRITCHLOW", -"PROVIDER_EIFRIG", -"PROVIDER_GIREVE", -"PROVIDER_CN_NAVINFO", -"PROVIDER_JAPAN_CHARGE_NETWORK", -"PROVIDER_NOBIL", -"PROVIDER_INDIA_BANKS", -"PROVIDER_INDONESIA_ELECTION_KPU", -"PROVIDER_CAREERS360", -"PROVIDER_SOURCE_LONDON", -"PROVIDER_EVBOX", -"PROVIDER_JP_GOVERNMENT", -"PROVIDER_JP_MINISTRY_OF_THE_ENVIRONMENT", -"PROVIDER_YUMYUM", -"PROVIDER_HWW_AUSTRALIA", -"PROVIDER_CINERGY", -"PROVIDER_MTIME", -"PROVIDER_KULTUNAUT", -"PROVIDER_BLITZ", -"PROVIDER_PIA", -"PROVIDER_INTERPARK", -"PROVIDER_CINEMA_ONLINE", -"PROVIDER_BELBIOS", -"PROVIDER_MOVIESEER", -"PROVIDER_SODAMEDYA", -"PROVIDER_ATMOVIES", -"PROVIDER_HOTELBEDS", -"PROVIDER_VERICRED", -"PROVIDER_CIRRANTIC", -"PROVIDER_GOGO_LABS", -"PROVIDER_ELECTRIFY_AMERICA", -"PROVIDER_CMS_MPPUF", -"PROVIDER_DIGIROAD", -"PROVIDER_KONTEX_GEOMATICS", -"PROVIDER_NZ_GOVERNMENT", -"PROVIDER_NZ_LINZ", -"PROVIDER_NZ_DOC", -"PROVIDER_FASTNED", -"PROVIDER_DESTINY_CS", -"PROVIDER_IONITY", -"PROVIDER_EV_CONNECT", -"PROVIDER_PANPAGES", -"PROVIDER_ETECNIC", -"PROVIDER_VOLTA", -"PROVIDER_NISSAN_MEXICO", -"PROVIDER_BMW_GROUP_LATIN_AMERICA", -"PROVIDER_FEDERAL_ELECTRICITY_COMMISSION_MEXICO", -"PROVIDER_VOLVO_CARS_BRASIL", -"PROVIDER_CHARGE_AND_PARKING", -"PROVIDER_DEDUCE_TECHNOLOGIES", -"PROVIDER_SK_TELECOM", -"PROVIDER_ECO_MOVEMENT", -"PROVIDER_GOOGLE_GMS", -"PROVIDER_EASYWAY", -"PROVIDER_PHYSICIAN_COMPARE", -"PROVIDER_HOSPITAL_COMPARE", -"PROVIDER_ENDOLLA_BARCELONA", -"PROVIDER_BE_CHARGE", -"PROVIDER_ONE_NETWORK", -"PROVIDER_CARENAV_DUPLEX", -"PROVIDER_CARENAV_POI", -"PROVIDER_IN_GOVERNMENT", -"PROVIDER_SURVEY_OF_INDIA", -"PROVIDER_E_ON", -"PROVIDER_ELECTRIFY_CANADA", -"PROVIDER_GRIDCARS", -"PROVIDER_DRIVECO", -"PROVIDER_GREEN_ACTION_STUDIOS", -"PROVIDER_GREEN_ACTION_STUDIO", -"PROVIDER_EVINY", -"PROVIDER_MASTERCARD", -"PROVIDER_VATTENFALL", -"PROVIDER_VIETGIS", -"PROVIDER_UNITE", -"PROVIDER_NEOGY", -"PROVIDER_AMPUP", -"PROVIDER_LOOP", -"PROVIDER_ZEST", -"PROVIDER_EZVOLT", -"PROVIDER_JOLT", -"PROVIDER_CHARGESMITH", -"PROVIDER_PLUGO", -"PROVIDER_ELECTRIC_ERA", -"PROVIDER_FLO", -"PROVIDER_DIGITAL_CHARGING_SOLUTIONS", -"PROVIDER_ELECTRIC_PE" -], -"enumDeprecated": [ -false, -false, -true, -false, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -true, -false, -true, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -true, -true, -false, -true, -false, -true, -true, -false, -true, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -false, -false, -false, -true, -false, -true, -false, -true, -true, -true, -false, -true, -true, -true, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"ABSTRACT The root of all provider types. This should never be present on an actual feature, but can be useful when calling InCategory.", -"not actually a legal value, used as sentinel", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730C2", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This is an internal *only* provider meant for sending wipeout requests to mapfacts.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Never rely on source infos with this provider to identify whether or not a feature is a Prominent Place! Instead, use the proper API, localsearch_clustering::QualityTierHelper::IsProminentPlace().", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Shopping Attributes Discovery", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"", -"The next new \"Google\" provider entries should be placed above.", -"UMBRELLA", -"", -"", -"", -"This is a testing provider for teams that wish to integrate with components of the Geo Data Infrastructure that require a valid provider. No production data should ever be sent using this provider.", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"0x1117F must not be used, since its range extends the PROVIDER_GOOGLE hierarchy.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Deprecated in favor of PROVIDER_GOOGLE_STRUCTURED_DATA (for attributes) and PROVIDER_GOOGLE_GEO_TIGER (for categories).", -"", -"0x1117FF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. aka Local AI.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"No data is obtained from this provider. It is only used to identify features that must be served on MapMaker.", -"", -"", -"", -"", -"", -"", -"0x1117FFF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. Synthetically generated areas (sublocalities/neighborhoods/ postal codes/etc) based on dropped terms from approximate geocoding. More info on go/syntheticareas.", -"", -"", -"", -"Similar to Google Transit, a provider that aggregates positions of bicycle rental points that we have agreements with to show on maps", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This range has been depleted. For new Ids see PROVIDER_GOOGLE_SUBRANGE above.", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"Small Scale International Boundary Lines", -"", -"NOAA", -"PGC", -"USDA", -"National Provider Identifier Registry", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"old name for PROVIDER_NAVIGO", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"See b/33687395", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"ABSTRACT", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"IBAMA", -"MMA", -"ANA", -"IBGE", -"FUNAI", -"DNIT", -"", -"", -"", -"", -"", -"ABSTRACT", -"Department of Cartography", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"0x12 and 0x120 are not to be used. OOO CET", -"ABSTRACT", -"Estonian Land Board", -"", -"ABSTRACT", -"Danish Geodata Agency", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"Ministry of land, infrastructure and transport, \uad6d\ud1a0\uad50\ud1b5\ubd80, Guktogyotongbu", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Use PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION.", -"", -"ABSTRACT", -"Land Information New Zealand", -"NZ Department of Conservation", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Note: Next available value is 0x1275." -], -"type": "string" -}, -"rawMetadata": { -"description": "For every key that is used in raw_data from this source, there must be a corresponding entry in raw_metadata that describes this key.", -"items": { -"$ref": "GeostoreRawMetadataProto" -}, -"type": "array" -}, -"release": { -"description": "A release string that doesn't have to be a date. This is provided so that we can preserve provider release strings that aren't based on dates. If you don't set it, the release_date will get formatted into this field for debugging purposes.", -"type": "string" -}, -"releaseDate": { -"$ref": "GeostoreDateTimeProto", -"description": "The release date of this data." -}, -"sourceDataset": { -"description": "A data provider defined string describing the source dataset from which the features of this data source were generated. For example, the MultiNet \"fra\" dataset produces features for both France and Monaco.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreDateTimeProto": { -"description": "WARNING: Outside of FeatureProto, please avoid in favor of a standard civil time type. Direct usage is error-prone due to the conflation of physical time and civil time (go/httat). In a protocol buffer, please use google.type.Date, with an additional google.type.TimeOfDay for precision finer-grained than a day. (For google.type.DateTime, go/prototime#types cites go/httat#zoned_datetime as a caveat). In a programming language, see go/time-devguide/languages. Additionally in C++, google3/geostore/base/public/datetime.h has conversion functions between DateTimeProto and Abseil's civil time types.", -"id": "GeostoreDateTimeProto", -"properties": { -"precision": { -"description": "This attribute describes the precision of the date and time. It would be unusual for a data provider to provide a precision along with their date. It is more likely that the precision of a date will be inferred from the date format. For example \"19th century\" is likely to be correct to the century, while \"1800\" is probably correct to the year. The precision should be semantically interpreted as a cast, so a DateTimeProto object with a seconds value corresponding to 2018-03-28 18:40:00 UTC and a precision of MONTH should be interpreted as \"March 2018\". The enums above are only some of the possible precision levels for dates and times. Clients may wish to add more precision enums in the future. However, these enums must be ordered by decreasing duration. Clients should be able to write date formatting code that looks like this: if (datetime.precision() <= DateTimeProto::PRECISION_CENTURY) { date = FormatCenturyDate(proto.seconds()); } else if (proto.precision() <= case DateTimeProto::PRECISION_DECADE) { date = FormatDecadeDate(proto.seconds()); } else { ... } See geostore/base/public/datetime.h for date formatting utility functions.", -"enum": [ -"PRECISION_CENTURY", -"PRECISION_DECADE", -"PRECISION_YEAR", -"PRECISION_MONTH", -"PRECISION_DAY", -"PRECISION_HOUR", -"PRECISION_MINUTE", -"PRECISION_SECOND" -], -"enumDescriptions": [ -"The enum values here should be ordered from low precision to high precision.", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"seconds": { -"description": "Number of seconds since (or before) the UNIX epoch (January 1, 1970). This is also the standard epoch for Java and Python time representations. If it is important for this time be displayed correctly for different time zones, convert the time to Coordinated Universal Time (UTC).", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreDimensionComparisonProto": { -"description": "A dimension value tagged with a comparison operator. This can be used for height, width, or length.", -"id": "GeostoreDimensionComparisonProto", -"properties": { -"comparisonOperator": { -"enum": [ -"UNSPECIFIED", -"EQUAL", -"LESS_THAN", -"LESS_THAN_OR_EQUAL", -"GREATER_THAN", -"GREATER_THAN_OR_EQUAL" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"dimensionWithUnit": { -"$ref": "GeostoreDimensionProto" -} -}, -"type": "object" -}, -"GeostoreDimensionProto": { -"description": "A dimension with a numerical value and unit. This can be a height, width, or length.", -"id": "GeostoreDimensionProto", -"properties": { -"dimension": { -"format": "float", -"type": "number" -}, -"unit": { -"enum": [ -"UNIT_UNKNOWN", -"METERS", -"FEET" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in MapFacts.", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreDisplayDataProto": { -"description": "This holds data specific to rendering a POI on a map. It's derived from data already in MapFacts, e.g. containing features and the feature's point field. If empty, this proto should be ignored for rendering. See go/maps-render-alignment for motivation and more details.", -"id": "GeostoreDisplayDataProto", -"properties": { -"displayLocation": { -"$ref": "GeostorePointProto", -"description": "The location where this feature should be rendered." -} -}, -"type": "object" -}, -"GeostoreDisputedAreaProto": { -"description": "This protocol buffer is used to store information about disputed areas. E.g., the political power that administers a disputed area or the countries that claim it. It should only be used for features of TYPE_DISPUTED_AREA.", -"id": "GeostoreDisputedAreaProto", -"properties": { -"administeredBy": { -"description": "If present this feature is administered by the power present in the field. In most cases this will be a two letter ISO-3166 region code, but may be a descriptive string if no region code is available.", -"type": "string" -}, -"claimant": { -"description": "Feature IDs (usually a country feature ID) of all parties who claim this feature. No claimants means the claim by the country that contains it (based on country polygon) is not disputed.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreDoodleProto": { -"description": "This protocol buffer holds the doodle-specific attributes for features of type TYPE_DOODLE.", -"id": "GeostoreDoodleProto", -"properties": { -"type": { -"description": "The type of this feature -- see comments above.", -"enum": [ -"TYPE_ANY", -"TYPE_USER_DEFINED_LABEL", -"TYPE_POINT_ANNOTATION", -"TYPE_LINE_ANNOTATION", -"TYPE_AREA_ANNOTATION" -], -"enumDescriptions": [ -"ABSTRACT", -"A label that a cartographer has manually placed on the map. These may be read in from configuration files in a renderer and drawn on the map in the appropriate location.", -"Point, line, and area annotations. There used to be a more detailed list of doodle types here, but the doodles encoded with them turned out to have a fair bit of tricky datasource-specific baggage. Rather than list provider-specific types here, they've been pulled out and are now private to the importer and the renderer. The point/line/area distinction however has proven to be useful for configuration and filtering, and remains.", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreDurationBasedRateProto": { -"description": "A single cost which will apply based on the duration of utilization. The cost may apply once, or repeatedly on some interval, to account for the total utilization. If the duration expressed by range_start_seconds and range_end_seconds do not cover the entire duration of the utilization (i.e. from 0 to some time greater than the total utilization time), this must be combined with other DurationBasedRateProtos such that the entire duration of the utilization is accounted for. See go/rate-schema for more details.", -"id": "GeostoreDurationBasedRateProto", -"properties": { -"isFree": { -"description": "If true, represents that the rate is free; i.e. the price is 0 in any currency. If this is true, price must be empty.", -"type": "boolean" -}, -"periodicitySeconds": { -"description": "The billable unit of the rate; i.e. after having utilized the service for exactly periodicity_seconds, the total cost should increase by \u2018price\u2019. For example, if the rate expresses a price per hour, then periodicity_seconds should be set to 3600. If this is unset, then the rate does not vary based on duration, and price represents a flat cost. May only be set if price is nonempty.", -"format": "int32", -"type": "integer" -}, -"price": { -"description": "The total price, in each applicable currency, of utilizing the service for periodicity_seconds, or for the entire duration expressed by range_start_seconds and range_end_seconds if periodicity_seconds is 0. Each entry should have an ID of /measurement_unit/money_value and consist of two properties: one with an ID of /measurement_unit/money_value/amount and a float value with the amount, and another with the ID /measurement_unit/money_value/currency and an ID value with the MID of the proper currency. May only be set if is_free is false.", -"items": { -"$ref": "FreebaseTopic" -}, -"type": "array" -}, -"rangeEndSeconds": { -"description": "Upper bound for durations to match, exclusive. Unset implies indefinite.", -"format": "int32", -"type": "integer" -}, -"rangeStartSeconds": { -"description": "Lower bound for durations to match, inclusive. Required; a value of 0 expresses that the price applies from the start of the utilization period.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreElevationModelProto": { -"description": "Represents raster digital elevation model data.", -"id": "GeostoreElevationModelProto", -"properties": { -"blendOrder": { -"description": "Defines the relative order in which terrain data should be rendered. Features with higher blend_order should be blended on top of features with lower blend_order. NOTE: this is backwards from the way BlendRank works in Magrathean.", -"format": "int32", -"type": "integer" -}, -"dataLevel": { -"description": "The zoom level at which this data is defined. Level 0 is world level data, and each increase in zoom level corresponds to a factor of 2 increase in scale.", -"format": "int32", -"type": "integer" -}, -"dataMaxlevel": { -"description": "The maximum (finest) level at which this terrain data has sufficient resolution to be displayed.", -"format": "int32", -"type": "integer" -}, -"elevationData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place to store an elevation data protocol buffer. Currently, this must be a keyhole::AssetTileCompressed (see google3/keyhole/common/proto/magrathean.protodevel)." -}, -"fullChildDataAvailable": { -"description": "If true, all of the data contained in this feature is available at the next highest (more detailed) level. If this is true, partial_child_data_available should also be true.", -"type": "boolean" -}, -"partialChildDataAvailable": { -"description": "If true, at least part of the data contained in this feature is available at the next highest (more detailed) level.", -"type": "boolean" -} -}, -"type": "object" -}, -"GeostoreElevationProto": { -"description": "This protocol buffer holds elevation and related data.", -"id": "GeostoreElevationProto", -"properties": { -"averageElevationMeters": { -"description": "The average elevation of the feature in meters above the local mean sea level.", -"format": "double", -"type": "number" -}, -"peak": { -"$ref": "GeostorePeakProto", -"description": "Additional details for TYPE_PEAK and TYPE_VOLCANO features." -} -}, -"type": "object" -}, -"GeostoreEntranceProto": { -"description": "This protocol buffer holds entrance-specific attributes for features of type TYPE_ENTRANCE.", -"id": "GeostoreEntranceProto", -"properties": { -"allowance": { -"enum": [ -"ENTER_AND_EXIT", -"ENTER_ONLY", -"EXIT_ONLY" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"canEnter": { -"deprecated": true, -"description": "DEPRECATED. Please use enter_or_exit instead.", -"type": "boolean" -}, -"canExit": { -"deprecated": true, -"description": "Whether the target can be entered through this entrance. Whether the target can be exited through this entrance.", -"type": "boolean" -} -}, -"type": "object" -}, -"GeostoreEntranceReferenceProto": { -"description": "Models a relationship between a feature and its entrance or exit.", -"id": "GeostoreEntranceReferenceProto", -"properties": { -"featureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "Feature ID of the related entrance. References should refer to TYPE_ENTRANCE or TYPE_COMPOUND features that are entrances or exits of the referencing feature." -} -}, -"type": "object" -}, -"GeostoreEstablishmentProto": { -"description": "This protocol buffer holds establishment-specific attributes for features of type TYPE_ESTABLISHMENT.", -"id": "GeostoreEstablishmentProto", -"properties": { -"bizbuilderReference": { -"$ref": "GeostoreBizBuilderReferenceProto", -"description": "Reference to BizBuilder data for this establishment. The bizbuilder_reference field indicates that a feature is claimed in CBDB (with the canonical state in MapFacts). The bizbuilder_reference is different from the social_reference's claimed_gaia_id because some BizBuilder clients will not have +Pages. All claimed businesses should have a bizbuilder_reference." -}, -"hours": { -"$ref": "GeostoreTimeScheduleProto", -"description": "Regular opening hours for the establishment (weekly schedule)." -}, -"openingHours": { -"$ref": "GeostoreOpeningHoursProto", -"description": "Opening hours for this establishment, including regular weekly hours and exceptional hours (e.g. on holidays). NOTE: in practice, only the exceptional hours are filled in this message. A schema migration for regular weekly hours was planned back in 2015 (see b/23105782) but was not completed and is (as of May 2018) not prioritized. Clients should continue getting regular opening hours from the `hours` field above. In openinghours.h there is a utility function `GetOpeningHoursFromFeature` that merges `EstablishmentProto.hours` into this proto." -}, -"priceInfo": { -"$ref": "GeostorePriceInfoProto", -"description": "Pricing for products and services offered. Example: menus for restaurants." -}, -"serviceArea": { -"$ref": "GeostoreServiceAreaProto" -}, -"telephone": { -"description": "Telephone number and related information.", -"items": { -"$ref": "GeostoreTelephoneProto" -}, -"type": "array" -}, -"type": { -"deprecated": true, -"description": "** DEPRECATED ** This is deprecated in favor of the top-level (in FeatureProto) set of GConcepts. The type of establishment -- see comments above.", -"enum": [ -"TYPE_ANY", -"TYPE_UNDEFINED", -"TYPE_DEPRECATED_AVIATION", -"TYPE_BUSINESS", -"TYPE_TRAVEL_SERVICE", -"TYPE_LODGING", -"TYPE_HOTEL", -"TYPE_RESORT", -"TYPE_MOTEL", -"TYPE_HOSTEL", -"TYPE_GUESTHOUSE", -"TYPE_VACATION_RENTAL", -"TYPE_GAS_STATION", -"TYPE_REST_AREA", -"TYPE_CASH_MACHINE", -"TYPE_CAR_RENTAL", -"TYPE_CAR_REPAIR", -"TYPE_TAXI_STAND", -"TYPE_TRAVEL_AGENT", -"TYPE_BICYCLE_RENTAL_POINT", -"TYPE_ELECTRIC_VEHICLE_CHARGING_STATION", -"TYPE_SHOPPING", -"TYPE_GROCERY", -"TYPE_ANTIQUES", -"TYPE_APPAREL", -"TYPE_GIFTS", -"TYPE_JEWELRY", -"TYPE_SPORTING_GOODS", -"TYPE_VEHICLE", -"TYPE_SHOPPING_CENTER", -"TYPE_SUPERMARKET", -"TYPE_FAIRGROUNDS", -"TYPE_MARKET", -"TYPE_PRODUCE_MARKET", -"TYPE_FARMERS_MARKET", -"TYPE_LIQUOR_STORE", -"TYPE_SOUVENIR_SHOP", -"TYPE_INDUSTRIAL", -"TYPE_CONSTRUCTION", -"TYPE_BUILDING_MATERIAL", -"TYPE_SECURITY_PRODUCTS", -"TYPE_MECHANICAL", -"TYPE_TEXTILE", -"TYPE_CHEMICAL", -"TYPE_METAL", -"TYPE_TRANSPORTATION", -"TYPE_FREIGHT", -"TYPE_AVIATION", -"TYPE_COURIER", -"TYPE_MOVING", -"TYPE_PACKAGING", -"TYPE_RAIL", -"TYPE_PUBLIC_TRANSIT", -"TYPE_WAREHOUSE", -"TYPE_DEFENSE", -"TYPE_AGRICULTURE", -"TYPE_PLANTATION", -"TYPE_PLANT_NURSERY", -"TYPE_DESIGN", -"TYPE_UTILITIES", -"TYPE_POWER_PLANT", -"TYPE_SEWAGE_TREATMENT_PLANT", -"TYPE_WATER_TREATMENT_PLANT", -"TYPE_SUBSTATION", -"TYPE_MANUFACTURING", -"TYPE_BIOTECH", -"TYPE_MATERIALS", -"TYPE_MINING", -"TYPE_QUARRY", -"TYPE_TANNERY", -"TYPE_OIL_REFINERY", -"TYPE_ADVERTISING_MARKETING", -"TYPE_LEGAL", -"TYPE_FAMILY_LAW", -"TYPE_IP_LAW", -"TYPE_LABOR_LAW", -"TYPE_PERSONAL_INJURY_LAW", -"TYPE_CRIMINAL_LAW", -"TYPE_PERSONAL_FINANCE", -"TYPE_LIFE_INSURANCE", -"TYPE_LENDING", -"TYPE_ACCOUNTING", -"TYPE_INVESTING", -"TYPE_BANKING", -"TYPE_HEALTH", -"TYPE_HEALTH_EQUIPMENT", -"TYPE_NURSING", -"TYPE_HEALTH_INSURANCE", -"TYPE_HEALTH_FACILITY", -"TYPE_DIAGNOSTIC_CENTER", -"TYPE_HEALTH_RESOURCES", -"TYPE_NUTRITION", -"TYPE_VISION", -"TYPE_COUNSELING", -"TYPE_MASSAGE", -"TYPE_BLOOD_BANK", -"TYPE_HEARING", -"TYPE_HOME_GARDEN", -"TYPE_INTERIOR_DECOR", -"TYPE_DOMESTIC_SERVICES", -"TYPE_APPLIANCES", -"TYPE_PEST_CONTROL", -"TYPE_GARDENING", -"TYPE_ELECTRONICS", -"TYPE_COMPUTER", -"TYPE_COMPUTER_HARDWARE", -"TYPE_COMPUTER_SECURITY", -"TYPE_COMPUTER_SOFTWARE", -"TYPE_COMPUTER_SUPPORT", -"TYPE_AUDIO_DEVICES", -"TYPE_VIDEO_DEVICES", -"TYPE_REAL_ESTATE", -"TYPE_OFFICE_SERVICES", -"TYPE_ENTERTAINMENT", -"TYPE_GAMES", -"TYPE_CASINO", -"TYPE_LOTTO", -"TYPE_VIDEO", -"TYPE_CLUBS", -"TYPE_DISCOTHEQUE", -"TYPE_ANIMATION", -"TYPE_MODELING", -"TYPE_HUMOR", -"TYPE_MOVIES", -"TYPE_MOVIE_RENTAL", -"TYPE_MOVIE_THEATER", -"TYPE_MUSIC", -"TYPE_RADIO", -"TYPE_TV", -"TYPE_BAR", -"TYPE_PRINT_MEDIA", -"TYPE_ADULT", -"TYPE_SEXUAL_SERVICES", -"TYPE_ANIMALS", -"TYPE_PETS", -"TYPE_FISHERY", -"TYPE_ARTS", -"TYPE_BOOKS", -"TYPE_PERFORMING_ARTS", -"TYPE_GALLERY", -"TYPE_AUTOMOTIVE", -"TYPE_PARTS", -"TYPE_AUTO_FINANCE", -"TYPE_AUTO_INSURANCE", -"TYPE_RV", -"TYPE_MOTORCYCLES", -"TYPE_CARS", -"TYPE_TRUCKS_SUVS", -"TYPE_LICENSING", -"TYPE_MAINTENANCE", -"TYPE_PERSONAL_CARE", -"TYPE_BODY_ART", -"TYPE_COSMETICS", -"TYPE_FITNESS", -"TYPE_YOGA_CENTER", -"TYPE_GYM", -"TYPE_HAIR_CARE", -"TYPE_SPA", -"TYPE_BEAUTY_SALON", -"TYPE_CORPORATE_EVENTS", -"TYPE_HUMAN_RESOURCES", -"TYPE_FOOD_AND_DRINK", -"TYPE_BEVERAGE", -"TYPE_RECIPES", -"TYPE_COOKWARE", -"TYPE_CULINARY", -"TYPE_RETAIL", -"TYPE_RESTAURANT", -"TYPE_COFFEE", -"TYPE_BAKERY", -"TYPE_FOOD_CENTER", -"TYPE_TEA", -"TYPE_CAFE", -"TYPE_WINERY", -"TYPE_BREWERY", -"TYPE_FAST_FOOD", -"TYPE_FOOD_DELIVERY", -"TYPE_INTERNET", -"TYPE_WEB_DESIGN", -"TYPE_WEB_HOSTING", -"TYPE_WEB_SERVICES", -"TYPE_LIFESTYLE", -"TYPE_PHOTO_VIDEO", -"TYPE_ACTIVITIES", -"TYPE_BOATING", -"TYPE_CYCLING", -"TYPE_EQUESTRIAN", -"TYPE_FISHING", -"TYPE_HIKING", -"TYPE_HUNTING", -"TYPE_SWIMMING", -"TYPE_GOLF", -"TYPE_GOLF_COURSE", -"TYPE_BASEBALL", -"TYPE_BASKETBALL", -"TYPE_HOCKEY", -"TYPE_MOTOR_SPORTS", -"TYPE_WINTER_SPORTS", -"TYPE_FOOTBALL", -"TYPE_SOCCER", -"TYPE_ICE_SKATING", -"TYPE_BOXING", -"TYPE_CRICKET", -"TYPE_ROWING", -"TYPE_RUGBY", -"TYPE_RACQUET_SPORTS", -"TYPE_ROCK_CLIMBING", -"TYPE_REFERENCES", -"TYPE_MAPS", -"TYPE_TIME", -"TYPE_SCIENTIFIC_EQUIPMENT", -"TYPE_TELECOMMUNICATIONS", -"TYPE_EVENT_VENUE", -"TYPE_BANQUET_HALL", -"TYPE_CONFERENCE_HALL", -"TYPE_WEDDING_HALL", -"TYPE_EXHIBITION_HALL", -"TYPE_COMMUNITY_CENTER", -"TYPE_AUDITORIUM", -"TYPE_FUNCTION_HALL", -"TYPE_CONCERT_HALL", -"TYPE_AMPHITHEATER", -"TYPE_LAUNDRY", -"TYPE_LAUNDROMAT", -"TYPE_DRY_CLEANER", -"TYPE_MORTUARY", -"TYPE_REPAIR_AND_MAINTENANCE", -"TYPE_GOVERNMENT", -"TYPE_BORDER_CROSSING", -"TYPE_CITY_HALL", -"TYPE_COURTHOUSE", -"TYPE_EMBASSY", -"TYPE_LIBRARY", -"TYPE_PRISON", -"TYPE_TAX_OFFICE", -"TYPE_PROSECUTORS_OFFICE", -"TYPE_CONGRESS", -"TYPE_TOWN_COUNCIL", -"TYPE_CAPITOL_BUILDING", -"TYPE_VOTING_FACILITY", -"TYPE_CHECKPOINT", -"TYPE_SCHOOL", -"TYPE_UNIVERSITY", -"TYPE_ORPHANAGE", -"TYPE_KINDERGARTEN", -"TYPE_DAYCARE", -"TYPE_ACADEMY", -"TYPE_TRADE_SCHOOL", -"TYPE_SKILL_INSTRUCTION", -"TYPE_EMERGENCY", -"TYPE_HOSPITAL", -"TYPE_PHARMACY", -"TYPE_POLICE", -"TYPE_FIRE", -"TYPE_DOCTOR", -"TYPE_DENTIST", -"TYPE_VETERINARIAN", -"TYPE_FIRST_AID", -"TYPE_CIVIL_DEFENSE", -"TYPE_TOURIST_DESTINATION", -"TYPE_ECO_TOURIST_DESTINATION", -"TYPE_BIRD_WATCHING", -"TYPE_NATURE_RESERVE", -"TYPE_MUSEUM", -"TYPE_VISITOR_CENTER", -"TYPE_OBSERVATION_DECK", -"TYPE_OBSERVATORY", -"TYPE_SCENIC_POINT", -"TYPE_ZOO", -"TYPE_AQUARIUM", -"TYPE_AMUSEMENT_PARK", -"TYPE_MONUMENT", -"TYPE_PALACE", -"TYPE_FORT", -"TYPE_TOWER", -"TYPE_LIGHTHOUSE", -"TYPE_TEMPLE", -"TYPE_CHURCH", -"TYPE_GURUDWARA", -"TYPE_HINDU_TEMPLE", -"TYPE_MOSQUE", -"TYPE_SYNAGOGUE", -"TYPE_BUDDHIST_TEMPLE", -"TYPE_JAIN_TEMPLE", -"TYPE_BAHAI_TEMPLE", -"TYPE_SHINTO_TEMPLE", -"TYPE_MORMON_TEMPLE", -"TYPE_SPORTS_COMPLEX", -"TYPE_STADIUM", -"TYPE_BOWLING", -"TYPE_BADMINTON", -"TYPE_TENNIS", -"TYPE_TABLE_TENNIS", -"TYPE_PARK", -"TYPE_LOCAL_PARK", -"TYPE_NATIONAL_PARK", -"TYPE_US_NATIONAL_PARK", -"TYPE_US_NATIONAL_MONUMENT", -"TYPE_NATIONAL_FOREST", -"TYPE_NATIONAL_GRASSLAND", -"TYPE_NATIONAL_PRESERVE", -"TYPE_NATIONAL_RECREATION_AREA", -"TYPE_NATIONAL_MONUMENT", -"TYPE_NATIONAL_HISTORIC_AREA", -"TYPE_NATIONAL_SCENIC_AREA", -"TYPE_NATIONAL_SCENIC_ROADWAY_AREA", -"TYPE_NATIONAL_SCENIC_RIVER_AREA", -"TYPE_PROVINCIAL_PARK", -"TYPE_PROVINCIAL_FOREST", -"TYPE_CAMPGROUNDS", -"TYPE_WILDERNESS_AREA", -"TYPE_WILDLIFE_AREA", -"TYPE_BOTANICAL_GARDEN", -"TYPE_GARDEN", -"TYPE_ARBORETUM", -"TYPE_MARINE_PROTECTED_AREA", -"TYPE_AIRPORT", -"TYPE_TARMAC", -"TYPE_HELIPORT", -"TYPE_SEAPLANE_BASE", -"TYPE_MILITARY_AIRBASE", -"TYPE_CEMETERY", -"TYPE_MILITARY", -"TYPE_ENCLOSED_TRAFFIC_AREA", -"TYPE_PARKING", -"TYPE_OFF_ROAD_AREA", -"TYPE_POST_OFFICE", -"TYPE_HOUSING_DEVELOPMENT", -"TYPE_BRIDGE", -"TYPE_ARCHAEOLOGICAL", -"TYPE_HISTORICAL", -"TYPE_RUINS", -"TYPE_TUNNEL", -"TYPE_RESIDENTIAL_DWELLING", -"TYPE_DETACHED_DWELLING", -"TYPE_ATTACHED_DWELLING", -"TYPE_APARTMENT", -"TYPE_GATED_COMMUNITY", -"TYPE_RETIREMENT_HOME", -"TYPE_TOLL_BOOTH", -"TYPE_CULTURAL", -"TYPE_CULTURAL_CENTER", -"TYPE_OVERPASS", -"TYPE_REST_ROOM", -"TYPE_PUBLIC_PHONE", -"TYPE_PHONE_BOOTH", -"TYPE_MANNED_PCO", -"TYPE_RESEARCH_INSTITUTE", -"TYPE_NON_GOVERNMENTAL_ORGANIZATION", -"TYPE_OFFICE_PARK", -"TYPE_MEDITATION_CENTER", -"TYPE_RELIGIOUS", -"TYPE_MONASTERY", -"TYPE_ASHRAM", -"TYPE_PAGODA", -"TYPE_MISSION", -"TYPE_PILGRIM_DESTINATION", -"TYPE_SOCIAL_SERVICE", -"TYPE_RANGER_STATION", -"TYPE_TRANSIT_STATION", -"TYPE_BUS_STATION", -"TYPE_TRAMWAY_STATION", -"TYPE_TRAIN_STATION", -"TYPE_SUBWAY_STATION", -"TYPE_FERRY_TERMINAL", -"TYPE_CABLE_CAR_STATION", -"TYPE_GONDOLA_LIFT_STATION", -"TYPE_FUNICULAR_STATION", -"TYPE_HORSE_CARRIAGE_STATION", -"TYPE_MONORAIL_STATION", -"TYPE_SEAPORT", -"TYPE_NATURAL_FEATURE", -"TYPE_ELEVATED", -"TYPE_PEAK" -], -"enumDescriptions": [ -"ABSTRACT", -"Can be used for features where no establishment information is available. For example, if an importer generates a TYPE_ESTABLISHMENT_BUILDING feature, but source data does not have any information on the actual purpose of the building, the importer should use TYPE_UNDEFINED for establishment type.", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Note: 0xF213F is reserved to extend TYPE_INDUSTRIAL", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"0xF6F is reserved and any new types should start with 0xF6F1", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"State parks, county parks, city parks, wilderness areas. This also includes underwater preserves, wild and scenic rivers, etc. Should we have things like baseball diamonds?", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Wildlife management area. This includes preservation areas for fish, game, and birds.", -"", -"", -"", -"", -"", -"An airport is a location where aircraft such as fixed-wing aircraft, helicopters, and blimps takeoff and land. NOTE: TYPE_TARMAC establishment type has been deprecated in favor of TYPE_TARMAC feature type. Once all data is modeled using the new feature type, the entry below should be removed.", -"DEPRECATED", -"A heliport is a small airport suitable only for use by helicopters.", -"A seaplane base is a special airport suitable for seaplanes.", -"An airbase (sometimes referred to correctly as an airfield, military airport, or Royal Air Force Station or Air Force Station) is a military airfield that provides basing and support of military aircraft.", -"", -"", -"The term \"enclosed traffic area\" is used extensively in Europe. It generally refers to a parking lot or other similar area through which traffic passes but are not generally considered to be a road. Another example is a piazza in an Italian town.", -"Establishment TYPE_PARKING is used for parking lots or garages. The actual feature type can be TYPE_ESTABLISHMENT_GROUNDS or TYPE_ESTABLISHMENT_BUILDING to distinguish between those.", -"", -"", -"In some country (like China), there will have a small area of housing development. Normally covers one or more city blocks.", -"In the Australian data we get bridges as POIs effectively. We'd like to include them so we can mapsearch for \"Sydney Harbour Bridge\" and others. Many other data sets have bridges modeled directly as segments, and so don't require this.", -"", -"In some countries historical objects are well known and important.", -"", -"Feature must be TYPE_ESTABLISHMENT_GROUNDS.", -"These types, when used with appropriate feature type (TYPE_ESTABLISHMENT_BUILDING, TYPE_ESTABLISHMENT_GROUNDS) can refer to a single building or a residential area (respectively).", -"", -"", -"All multi-unit housing such as duplex, condominium, apartment belongs to this.", -"", -"", -"In Korea most toll booths have their own names, and are often used as reference points. If an importer generates TYPE_TOLL_BOOTH establishment features with exact positions and names, then mapsearch will index them. Also this can be used to render them.", -"", -"", -"An overpass, or flyover is a structure that passes over another road or railway.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Hierarchy of transit station types. See go/transit-schema for more details.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreExceptionalHoursProto": { -"description": "An ExceptionalHoursProto holds information about exceptional (non-regular) hours for a business, such as holiday hours.", -"id": "GeostoreExceptionalHoursProto", -"properties": { -"hours": { -"$ref": "GeostoreBusinessHoursProto", -"description": "The weekly schedule to be applied for the dates that fall within the range. The schedule may contain hours only for days of the week that occur during the date range specified in the range field." -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this exception." -}, -"range": { -"$ref": "GeostoreTimeIntervalProto", -"description": "The dates for which this exception applies, expressed as a half open interval. For example, an exception that applies for the entire month of December 2015 should have a range December 1, 2015 to January 1, 2016. Any regular hours that start on days in this range are ignored and replaced by the exceptional hours for that day. The TimeIntervalProto for the range must be a fully specified, non-empty, and non-inverted range of dates. Concretely, the requirements are: * the range must be a TYPE_RANGE interval * the interval may not be inverted * the endpoints of the interval must specify a year, month, and day * the day_type of each endpoint must be type DAY_OF_MONTH * the endpoints may not specify hour, minute, second, week, or week_type * the begin endpoint must predate the end endpoint" -} -}, -"type": "object" -}, -"GeostoreExistenceProto": { -"id": "GeostoreExistenceProto", -"properties": { -"closeReason": { -"description": "Structured reason for the permanent closure (if any).", -"enum": [ -"CLOSED", -"MOVED", -"REBRANDED" -], -"enumDescriptions": [ -"The establishment is permanently closed.", -"The establishment moved to another location. There may be a new feature representing the \"same\" establishment at its new location and if so, this feature should have a RELATION_BUSINESS_MOVED relation to that other feature.", -"The establishment was rebranded. There may be a new feature representing the \"same\" establishment with its new branding and if so, this feature should have a RELATION_BUSINESS_REBRANDED relation to that other feature. WARNING: New edits no longer use REBRANDED or RELATION_BUSINESS_REBRANDED. For more context, see go/geo-schema-reference:operational-lifecycle#rebranded-schema-status." -], -"type": "string" -}, -"closed": { -"description": "Indicates whether the place is closed (permanently or temporarily), i.e., not operational in the present, but was in the past. WARNING: New code should prefer go/geo-schema-reference:operational-status.", -"type": "boolean" -}, -"endAsOfDate": { -"$ref": "GeostoreDateTimeProto", -"description": "The earliest known date of this feature's \"end\" (e.g. an establishment's permanent closure), if the actual date is unknown; i.e. the best known upper bound of the day end_date would represent. end_as_of_date must be in local (civil) time and of PRECISION_DAY. WARNING: Not necessarily consistent yet with `closed` or the ClosureAttachmentProto. If you believe you need to use this field directly, please go/contact-geo-schema. " -}, -"endDate": { -"$ref": "GeostoreDateTimeProto" -}, -"featureBirthTimestampSeconds": { -"deprecated": true, -"description": "** DEPRECATED ** This field is now deprecated (see b/22878252). Please use the Geo Schema GetFeatureBirthTimestamp() API to extract the birth timestamp of a feature. The timestamp in seconds since the UNIX epoch (January 1, 1970) when this feature becomes live in the Geo repository. This was the birth date of the place's representation in Google whereas start_date is the birth date of the place's operations in the physical world.", -"format": "int64", -"type": "string" -}, -"removed": { -"description": "Indicates whether the feature is marked as removed in the Geo repository. Removed features are still present in the Geo repository but are considered to be in an inactive state (not valid for lint purposes, not retrievable except explicitly by feature ID, etc.). NOTE: If you have access to a complete FeatureProto, do NOT read this bit directly to find out whether a feature is removed. Instead, rely on the IsFeatureRemoved() API, available in C++ (geostore/base/public/feature.h) and Java (geostore/base/Feature.java).", -"type": "boolean" -}, -"removedReason": { -"description": "Structured reason why the feature is marked as removed. Relevant only when removed == true.", -"enum": [ -"UNKNOWN", -"BOGUS", -"PRIVATE", -"PRIVATE_MUST_PURGE", -"SPAM", -"UNSUPPORTED", -"PENDING", -"DUPLICATE", -"OLD_SCHEMA", -"REPLACED", -"ROLLED_BACK" -], -"enumDescriptions": [ -"Reason unknown or not captured by one of the other values.", -"Bogus data - doesn't represent a real world geographic place or is just largely bad/incorrect data.", -"Considered \"private\" data. We do not want to display this data to users, but we allow it to remain in MapFacts indefinitely.", -"Considered \"private\" data, which has been or will soon be fully removed from MapFacts and related systems (other than feature id and existence metadata).", -"Bad data that was intentionally/maliciously contributed.", -"Not enough data to support the existence of the feature. Different from the BOGUS case in that there is no positive evidence that place is/was incorrect. NOTE: This removal reason can only be set/unset by PROVIDER_GOOGLE_LOCAL_ALCHEMY and PROVIDER_GOOGLE_OYSTER_AUTO_EDITS.", -"Indicates that the existence of the place is yet to be justified.", -"Place has been identified as duplicate of and merged to another place. NOTE: This removal reason cannot be set for TYPE_SEGMENT. NOTE: If you have access to a complete FeatureProto, do NOT read this bit directly to find out whether a feature is a duplicate. Instead, rely on the IsDuplicateFeature() API, available in C++ (geostore/base/public/feature.h) and Java (geostore/base/Feature.java).", -"The feature uses an old version of the Geo Schema (e.g. it has a deprecated feature type).", -"Feature has been replaced by one or more other features. E.g. a segment got split and is now replaced by 2 segments. This is different than DUPLICATE because the feature is not a duplicate (in the identity sense) of the feature(s) that replace it. NOTE: this removal reason is currently allowed to be set only on TYPE_SEGMENT features.", -"The edit that created the feature has been rolled back." -], -"type": "string" -}, -"startDate": { -"$ref": "GeostoreDateTimeProto", -"description": "(Initial) start and (permanent) end dates of operations, such that start_date is the first day operational and end_date is the first day when operations have permanently ended. The only allowed precisions are PRECISION_DAY, PRECISION_MONTH, PRECISION_YEAR. DateTimeProto.seconds should have the lowest legal value for the desired date/time and precision. E.g. for PRECISION_MONTH, 2019-02-15 21:10:30 is not valid, it should be 2019-02-01 00:00:00 instead. NOTE: The start_date and end_date are stored in UTC but should be interpreted as being in the local timezone. So clients should convert the DateTimeProto to local (civil) time using UTC+0, and then treat the result as local to the feature." -} -}, -"type": "object" -}, -"GeostoreFeatureFieldMetadataProto": { -"description": "Provenance information for sub-fields of this feature - go/subfield-provenance.", -"id": "GeostoreFeatureFieldMetadataProto", -"properties": { -"fieldProvenance": { -"items": { -"$ref": "GeostoreFeatureFieldMetadataProtoFieldProvenance" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreFeatureFieldMetadataProtoFieldProvenance": { -"id": "GeostoreFeatureFieldMetadataProtoFieldProvenance", -"properties": { -"fieldPath": { -"description": "Represents all fields for which this SourceInfo is valid. NOTE: Field paths are rooted at FeatureProto level.", -"items": { -"$ref": "GeostoreStableFieldPathProto" -}, -"type": "array" -}, -"provenance": { -"$ref": "GeostoreProvenanceProto" -} -}, -"type": "object" -}, -"GeostoreFeatureHistoryMetadataProto": { -"description": "Metadata related to the history of a given feature in the Geo repository.", -"id": "GeostoreFeatureHistoryMetadataProto", -"properties": { -"featureBirthTimestampUs": { -"description": "The timestamp (in microseconds since the UNIX epoch) when this feature first went live in the Geo repository. Note that this has no relation to the birth data of that geographical entity in the real world.", -"format": "int64", -"type": "string" -}, -"lastModificationTimestampUs": { -"description": "The timestamp (in microseconds since the UNIX epoch) of the last modification to the feature. Note this includes attachment modifications. The feature's initial creation is also considered as a modification. This is useful for those that consume features via both listening to notifications and reading from repository snapshots. This timestamp can be used to decide whether a feature in the snapshot was already seen in a more recent state through the notifications.", -"format": "int64", -"type": "string" -}, -"removalTimestampUs": { -"description": "The timestamp (in microseconds since the UNIX epoch) of the deletion time of the feature. If the feature is currently removed, this field gets populated with the timestamp the feature first became removed after being live (or being removed from beginning). This field won't be set if the feature is live.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreFeatureIdForwardingsProto": { -"description": "Feature ID forwardings. There are many different types of ID forwardings, some of which are attached to live features, others to removed features. This information is available in multiple forms (with different completeness guarantees): (1) in RPC responses to read requests to the live Geo repository; (2) on disk, as part of the metadata section of features found in the (inactive) features snapshots; (3) on disk, as part of a separate feature_id_forwardings side table.", -"id": "GeostoreFeatureIdForwardingsProto", -"properties": { -"duplicateOf": { -"$ref": "GeostoreFeatureIdProto", -"description": "If the feature has been marked as a DUPLICATE of another feature, this is the feature ID of that other feature. Note that the other feature may itself be removed. This field is always set." -}, -"forwardedId": { -"$ref": "GeostoreFeatureIdProto", -"description": "The feature ID of the forwarded feature. This field is only set in case (3)." -}, -"inactiveDuplicate": { -"description": "If other features have been marked as DUPLICATE of this feature, this is the set of all such feature IDs. All feature IDs in this set should be for removed (aka inactive) features. Note that in the context of historical read requests against MapFacts (when ReadRequest.version_selection.timestamp is set), this field won't be set.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"replacedBy": { -"$ref": "GeostoreFeatureIdListProto", -"deprecated": true, -"description": "DEPRECATED - Use feature.metadata.feature_replacement_info instead. This field was never populated." -}, -"transitivelyDuplicateOf": { -"$ref": "GeostoreFeatureIdProto", -"description": "If the feature has been transitively marked as a DUPLICATE of another feature (via a chain of size >= 1), this is the feature ID of that other feature which is the end of the chain. The field is always set even if the chain is of size 1. Note that the other feature may itself be removed. This field is only set in case (3)." -} -}, -"type": "object" -}, -"GeostoreFeatureIdListProto": { -"description": "A simple list of feature IDs.", -"id": "GeostoreFeatureIdListProto", -"properties": { -"id": { -"description": "The list of feature IDs. While the exact semantics of these IDs are usage-dependent, the list should never be empty or contain duplicates.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreFeatureIdProto": { -"description": "A globally unique identifier associated with each feature. We use 128-bit identifiers so that we have lots of bits available to distinguish between features. The feature id currently consists of a 64-bit \"cell id\" that **sometimes** corresponds to the approximate centroid of the feature, plus a 64-bit fingerprint of other identifying information. See more on each respective field in its comments. Feature ids are first assigned when the data is created in MapFacts. After initial creation of the feature, they are immutable. This means that the only properties that you should rely on are that they are unique, and that cell_ids often - but not always - preserve spatial locality. The degree of locality varies as the feature undergoes geometry changes, and should not in general be considered a firm guarantee of the location of any particular feature. In fact, some locationless features have randomized cell IDs! Consumers of FeatureProtos from Mapfacts are guaranteed that fprints in the id field of features will be globally unique. Using the fprint allows consumers who don't need the spatial benefit of cell ids to uniquely identify features in a 64-bit address space. This property is not guaranteed for other sources of FeatureProtos.", -"id": "GeostoreFeatureIdProto", -"properties": { -"cellId": { -"description": "The S2CellId corresponding to the approximate location of this feature as of when it was first created. This can be of variable accuracy, ranging from the exact centroid of the feature at creation, a very large S2 Cell, or even being completely randomized for locationless features. Cell ids have the nice property that they follow a space-filling curve over the surface of the earth. (See s2cellid.h for details.) WARNING: Clients should only use cell IDs to perform spatial locality optimizations. There is no strict guarantee that the cell ID of a feature is related to the current geometry of the feature in any way.", -"format": "uint64", -"type": "string" -}, -"fprint": { -"description": "A 64-bit fingerprint used to identify features. Most clients should rely on MapFacts or OneRing to choose fingerprints. If creating new fprints, the strategy should be chosen so that the chance of collision is remote or non-existent, and the distribution should be reasonably uniform. For example, if the source data assigns unique ids to features, then a fingerprint of the provider name, version, and source id is sufficient.", -"format": "uint64", -"type": "string" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a feature ID. Never set in MapFacts." -} -}, -"type": "object" -}, -"GeostoreFeatureMetadataProto": { -"description": "General metadata related to a given feature in the Geo repository.", -"id": "GeostoreFeatureMetadataProto", -"properties": { -"bulkUpdatable": { -"description": "This field indicates whether the feature is subject to bulk updates. Caution must be exercised while editing such features since the changes made by the edits will be overwritten by the bulk update (if the feature is bulk updated). See go/mapfacts-abu for more information.", -"enum": [ -"NOT_BULK_UPDATABLE", -"BULK_UPDATABLE" -], -"enumDescriptions": [ -"The feature will not be bulk updated.", -"The feature has been bulk updated, and may be bulk updated again in the future. Note that this refers specifically to the atomic bulk-upload process, not methods of applying large sets of (independent) edits (e.g. with BEST)." -], -"type": "string" -}, -"coreVersionToken": { -"description": "core_version_token is an opaque token representing the version of the core fields of the feature. This field is not updated when attachments are changed.", -"format": "byte", -"type": "string" -}, -"featureReplacementInfo": { -"$ref": "GeostoreFeatureReplacementInfoProto", -"description": "Metadata for tracking when a feature is derived from or replaced by another feature or set of features." -}, -"fieldMetadata": { -"$ref": "GeostoreFeatureFieldMetadataProto", -"description": "Metadata about certain repeated fields and their subfields, for which field type is not granular enough." -}, -"forwardings": { -"$ref": "GeostoreFeatureIdForwardingsProto", -"description": "Feature ID forwardings, if applicable." -}, -"history": { -"$ref": "GeostoreFeatureHistoryMetadataProto", -"description": "Metadata related to the history." -}, -"versionToken": { -"description": "version_token is an opaque token representing the version of this feature. It can be used as a concurrency token when sending edits.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreFeaturePropertyIdProto": { -"description": "Message to represent a \"feature property\" as an abstract construct. Most feature properties are mapped one to one with the EditProto field types. However in some cases the EditProto field type granularity is too coarse to support use-cases that rely on feature properties (such as per-value rights tracking). When that is the case, the feature property is augmented with a secondary field.", -"id": "GeostoreFeaturePropertyIdProto", -"properties": { -"attachmentTypeId": { -"description": "Required when field_type == ATTACHMENT.", -"format": "uint64", -"type": "string" -}, -"attributeId": { -"description": "Required when field_type == FEATURE_ATTRIBUTE.", -"type": "string" -}, -"fieldType": { -"enum": [ -"NONE", -"ACCESS_POINT", -"ADDRESS", -"ANCHORED_GEOMETRY_GEOMETRY_ID", -"ASSOCIATED_EV_CHARGING_STATION", -"ATTACHMENT", -"BIZBUILDER_REFERENCE", -"BORDER_FEATURE_ID_LEFT", -"BORDER_FEATURE_ID_RIGHT", -"BORDER_OVERRIDE_STATUS", -"BORDER_STATUS", -"BORDER_TYPE", -"BORDER_LOGICAL_BORDER", -"BOUND", -"BUILDING_BASE_HEIGHT_METERS_AGL", -"BUILDING_DEFAULT_DISPLAY_LEVEL", -"BUILDING_FLOORS", -"BUILDING_HEIGHT_METERS", -"BUILDING_LEVEL", -"BUILDING_STRUCTURE", -"BUSINESS_CHAIN_CANONICAL_GCONCEPT", -"BUSINESS_HOURS", -"DATA_SOURCE", -"DETAILED_3D_MODEL", -"DISPLAY_DATA", -"DISPUTED_AREA_ADMINISTERED_BY", -"DISPUTED_AREA_CLAIMANT", -"ENTRANCE_ALLOWANCE", -"ESTABLISHMENT_OPENING_HOURS_EXCEPTION", -"ESTABLISHMENT_OPENING_HOURS_REGULAR_HOURS", -"ESTABLISHMENT_PRICE_INFO", -"ESTABLISHMENT_SERVICE_AREA_SERVED_FEATURE", -"EXISTENCE_STATUS", -"FEATURE_AVERAGE_ELEVATION", -"FEATURE_BEST_LOCALE", -"FEATURE_CELL_COVERING", -"FEATURE_CENTER", -"FEATURE_CHILD", -"FEATURE_EXEMPT_REGULATED_AREA", -"FEATURE_INTERIOR_CELL_COVERING", -"FEATURE_NAME", -"FEATURE_PARENT", -"FEATURE_POSE", -"FEATURE_PREFERRED_VIEWPORT", -"FEATURE_TRACK", -"FEATURE_TYPE", -"FEATURE_WEBSITE", -"FIELD_RIGHTS", -"FUTURE_GEOMETRY", -"FUTURE_GEOMETRY_FOR", -"GCONCEPT", -"GEOMETRY_3D", -"GEOMETRY_PRECISION_METERS", -"GEOPOLITICAL_CONVEYS_ATTRIBUTION_TO", -"GEOPOLITICAL_REGION_SPECIFIC_NAME", -"GEOPOLITICAL_REGIONAL_POLYGON_ADJUSTMENT", -"GEOPOLITICAL_REGIONAL_POLYGON_COMPOSING_CLAIMS", -"GEOPOLITICAL_GEOMETRY_REST_OF_WORLD_POLYGON", -"GEOPOLITICAL_GEOMETRY_SELF_POLYGON", -"INFERRED_GEOMETRY_GEOMETRY_COMPOSITION", -"INFERRED_GEOMETRY_DEFINES_GEOMETRY_FOR", -"INTERSECTION", -"INTERSECTION_GROUP", -"INTERSECTION_GROUP_CHILD_GROUP", -"INTERSECTION_GROUP_GROUP_TYPE", -"INTERSECTION_GROUP_PARENT_GROUP", -"INTERSECTION_IN_GROUP", -"INTERSECTION_TOLL_CLUSTER", -"IN_SEGMENT", -"KNOWLEDGE_GRAPH_PROPERTY", -"LABEL_BACKGROUND_COLOR", -"LABEL_TEXT_COLOR", -"LANE_MARKER_BARRIER_MATERIALS", -"LANE_MARKER_CROSSING_PATTERN", -"LANE_MARKER_LINEAR_PATTERN", -"LEVEL_BUILDING", -"LEVEL_NUMBER", -"LOCALE_LANGUAGE", -"LOCALE_LOCALIZATION_POLICY_ID", -"LOGICAL_BORDER_BORDER_SEGMENT", -"LOGICAL_BORDER_STATUS", -"OPERATIONS_TEMPORARY_CLOSURE", -"PARKING_ALLOWANCE", -"PARKING_AVAILABLE", -"PARKING_OPENING_HOURS_EXCEPTION", -"PARKING_OPENING_HOURS_REGULAR_HOURS", -"PARKING_PROVIDER_FEATURE", -"PARKING_RESTRICTION", -"PEAK_PROMINENCE", -"PHONE_NUMBER", -"POINT", -"POLITICAL_CLAIM", -"POLYGON", -"POLYGON_FOR_DISPLAY", -"POLYLINE", -"RANK", -"RANK_SIGNAL", -"REGULATED_AREA_RESTRICTION", -"RELATED_BORDER", -"RELATED_ENTRANCE", -"RELATED_FEATURE", -"RELATED_TERMINAL_POINT", -"RELATED_TIMEZONE", -"RESTRICTION_GROUP_SEGMENT", -"ROAD_MONITOR_MONITORED_ROAD", -"ROUTE_CHILD_TYPE", -"SCHOOL_DISTRICT_TYPE", -"SEGMENT_ADVISORY_MAXIMUM_SPEED", -"SEGMENT_AVERAGE_SPEED", -"SEGMENT_BARRIER", -"SEGMENT_BICYCLE_FACILITY", -"SEGMENT_BICYCLE_SAFETY", -"SEGMENT_CONDITION", -"SEGMENT_CONSTRUCTION_BEGIN_DATE", -"SEGMENT_CONSTRUCTION_END_DATE", -"SEGMENT_CONSTRUCTION_STATUS", -"SEGMENT_COVERED", -"SEGMENT_DISTANCE_TO_EDGE", -"SEGMENT_EDGE_FOLLOWS_SEGMENT_BEGIN_FRACTION", -"SEGMENT_EDGE_FOLLOWS_SEGMENT_END_FRACTION", -"SEGMENT_ELEVATION", -"SEGMENT_ENDPOINT", -"SEGMENT_GRADE_LEVEL_LIST", -"SEGMENT_INTERNAL_DISALLOWED_CONNECTIONS", -"SEGMENT_INTERNAL_DISALLOWED_PRIMARY_CONNECTION", -"SEGMENT_INTERNAL_TRAVEL_ALLOWANCE", -"SEGMENT_INTERPOLATION_OFFSET_METERS", -"SEGMENT_IS_MAX_PERMITTED_SPEED_DERIVED", -"SEGMENT_LANE", -"SEGMENT_LEGAL_MAXIMUM_SPEED", -"SEGMENT_LEGAL_MINIMUM_SPEED", -"SEGMENT_MAX_SPEED", -"SEGMENT_ON_RIGHT", -"SEGMENT_PATH", -"SEGMENT_PEDESTRIAN_CROSSING", -"SEGMENT_PEDESTRIAN_FACILITY", -"SEGMENT_PEDESTRIAN_GRADE", -"SEGMENT_PRIORITY", -"SEGMENT_RAMP_MAX_CONNECTED_PRIORITY", -"SEGMENT_RESTRICTION", -"SEGMENT_ROAD_CAMERA", -"SEGMENT_ROAD_SIGN", -"SEGMENT_ROUTE", -"SEGMENT_ROUTE_ASSOCIATION", -"SEGMENT_SEPARATED_ROADWAYS", -"SEGMENT_SLOPE", -"SEGMENT_SURFACE", -"SEGMENT_SWEEP", -"SEGMENT_TOLL_ROAD", -"SEGMENT_USAGE", -"SEGMENT_VISIBLE_LANDMARK", -"SIGN_COMPONENT", -"SOCIAL_REFERENCE_CLAIMED_GAIA_ID", -"SOURCE_INFO", -"STATUS_CLOSED", -"STATUS_CLOSE_REASON", -"STATUS_END_AS_OF_DATE", -"STATUS_END_DATE", -"STATUS_REMOVED", -"STATUS_REMOVED_REASON", -"STATUS_START_DATE", -"STOREFRONT_GEOMETRY", -"STOREFRONT_GEOMETRY_MODEL", -"SYNTHETIC_GEOMETRY", -"THREE_DIMENSIONAL_MODEL", -"TOLL_CLUSTER_INTERSECTION", -"TOLL_PATH_TOLL_CLUSTER_SEQUENCE", -"TRANSIT_LINE_AGENCY", -"TRANSIT_LINE_STATION", -"TRANSIT_LINE_VARIANT_LINE_CONCEPT", -"TRANSIT_LINE_VARIANT_STOP", -"TRANSIT_LINE_VEHICLE_TYPE", -"TRANSIT_STATION_AGENCY_ASSOCIATION", -"VERTICAL_ORDERING_LEVEL", -"VISUALIZATION_AREA", -"WATER_REMOVED_POLYGON", -"DEPRECATED_DO_NOT_USE_EMAIL_ADDRESS", -"DEPRECATED_DO_NOT_USE_RANK_GEOMETRY", -"DEPRECATED_DO_NOT_USE_SEGMENT_INFO", -"DEPRECATED_DO_NOT_USE_SEGMENT_LANE_LIST", -"DEPRECATED_DO_NOT_USE_SEGMENT_WRONG_WAY", -"DEPRECATED_DO_NOT_USE_WEBSITE", -"FEATURE_ATTRIBUTE", -"SOCIAL_REFERENCE", -"CATEGORY", -"DEPRECATED_DO_NOT_USE_CAPITAL", -"DEPRECATED_DO_NOT_USE_DESCRIPTION", -"DEPRECATED_DO_NOT_USE_DISTINCT", -"DEPRECATED_DO_NOT_USE_DUPLICATE", -"EDIT_PRECEDENCE", -"DEPRECATED_DO_NOT_USE_EDIT_PRECEDENCE", -"DEPRECATED_DO_NOT_USE_ENTRANCE_TARGET", -"DEPRECATED_DO_NOT_USE_ESTABLISHMENT_PLACE_ACTION_PAGE", -"DEPRECATED_DO_NOT_USE_ESTABLISHMENT_TYPE", -"DEPRECATED_DO_NOT_USE_EVENT", -"DEPRECATED_DO_NOT_USE_GEOMETRIC_ACCURACY", -"DEPRECATED_DO_NOT_USE_HIGHEST_GRADE", -"DEPRECATED_DO_NOT_USE_ID_TO_OVERRIDE", -"DEPRECATED_DO_NOT_USE_ISSUE_HISTORY", -"DEPRECATED_DO_NOT_USE_ISSUE_METADATA", -"DEPRECATED_DO_NOT_USE_KNOWLEDGE_GRAPH_ID", -"DEPRECATED_DO_NOT_USE_LOWEST_GRADE", -"DEPRECATED_DO_NOT_USE_PAYMENT_TYPES", -"DEPRECATED_DO_NOT_USE_PHOTO", -"DEPRECATED_DO_NOT_USE_PHOTO_URL", -"DEPRECATED_DO_NOT_USE_PLACE_CLOSED", -"DEPRECATED_DO_NOT_USE_POPULATION", -"DEPRECATED_DO_NOT_USE_RANK_USER", -"DEPRECATED_DO_NOT_USE_REMOVE_DUPLICATE", -"DEPRECATED_DO_NOT_USE_REMOVE_PLACE", -"DEPRECATED_DO_NOT_USE_SCHOOL_TYPE", -"DEPRECATED_DO_NOT_USE_SEGMENT_ELEVATION_BEGIN", -"DEPRECATED_DO_NOT_USE_SEGMENT_ELEVATION_END", -"DEPRECATED_DO_NOT_USE_SEGMENT_ELEVATION_MIDDLE", -"DEPRECATED_DO_NOT_USE_SYLLABUS", -"DEPRECATED_DO_NOT_USE_TRACK_CLASS", -"DEPRECATED_DO_NOT_USE_VIEWCODE_INFO", -"DEPRECATED_DO_NOT_USE_WORKAREA", -"DEPRECATED_DO_NOT_USE_INFERRED_GEOMETRY_INCLUDES_GEOMETRY_OF", -"DEPRECATED_DO_NOT_USE_INFERRED_GEOMETRY_EXCLUDES_GEOMETRY_OF", -"DEPRECATED_DO_NOT_USE_TRANSIT_STATION_AGENCY" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"kgPropertyId": { -"description": "Required when field_type == KNOWLEDGE_GRAPH_PROPERTY.", -"type": "string" -}, -"nameLanguage": { -"description": "RESERVED", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreFeatureProto": { -"description": "Every entry in the GeoStore database is called a \"feature\". A feature is represented as a discriminated union of all the different feature types, where the actual feature type is specified by the \"type\" field. There are also various fields that are meaningful for most or all feature types, such as bounding regions and names. Every feature has a globally unique id that can be used to refer to it from other features.", -"id": "GeostoreFeatureProto", -"properties": { -"accessPoint": { -"description": "Optional access point information. Access points hold detailed information about routing endpoints. For example, the main Google office is at \"1600 Amphitheatre Parkway\". The feature representing that office has a polygon, a center, and an address with components for the street number, route, locality, etc. The access point information, on the other hand, identifies the specific segment, the latitude/longitude of the driveway, and so forth.", -"items": { -"$ref": "GeostoreAccessPointProto" -}, -"type": "array" -}, -"address": { -"description": "Address for this feature. A Geo Schema address is designed to model a mailing address, so only features that have mailing addresses in the real world may have addresses. Each feature should have only one address. If you want to describe the geographic location of a feature which does not have a mailing address with respect to other well-known features, some other schema constructs should be used. Note that the field is defined as repeated though features that use this field with its intended semantics are constrained to have a single address even if they may have multiple mailing addresses in the real world. The \u201csingle address\u201d rule is enforced by lint. Current exceptions to the single address rule and mailing address rule are described in the g3doc. Bear note that the schema team is actively working on eliminating these exceptions. http://go/geo-addresses Note the following conventions: - Addresses follow the postal hierarchy, not the political hierarchy. Addresses may have components that refer to political entities when those entities also appear in the postal hierarchy. - As stated previously, but it bears repeating, addresses on features are mailing addresses. In many cases the physical address and the mailing address are the same but the address stored on a feature represents the mailing address of the feature. An example of a non-physical mailing address would be a PO Box. - These addresses are commonly defined and verifiable by a governmental authority (e.g. the United States Postal Service in the United States, Royal Mail in the United Kingdom, Correios in Brazil, etc.) and should follow conventions and rules defined by those authorities.", -"items": { -"$ref": "GeostoreAddressProto" -}, -"type": "array" -}, -"anchoredGeometry": { -"$ref": "GeostoreAnchoredGeometryProto", -"description": "Represents information about the feature\u2019s anchored geometry." -}, -"associatedEvChargingStation": { -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"attachment": { -"description": "The collection of attachments for this feature. Documentation: http://go/geo-attachments", -"items": { -"$ref": "GeostoreAttachmentsAttachmentProto" -}, -"type": "array" -}, -"attribute": { -"deprecated": true, -"description": "** DEPRECATED ** A list of attributes that describe defined aspects of this feature. An attribute must be a concrete, high quality, and editable piece of information about a feature, and must be used on some general consumer facing Google property. The data types used for attributes must be primitive types or reusable in a generic manner.", -"items": { -"$ref": "GeostoreAttributeProto" -}, -"type": "array" -}, -"bestLocale": { -"$ref": "GeostoreBestLocaleProto", -"description": "Describes the best-match locale for this feature." -}, -"border": { -"$ref": "GeostoreBorderProto" -}, -"bound": { -"$ref": "GeostoreRectProto", -"description": "A latitude-longitude rectangle used by bucketing MapReduces. See the documentation on bucketing MapReduce for details. This field can be a source of confusion. Because it is called \"bound\", it is often assumed that it is a tight bound on the geometry but it can be (and often is) much larger. If a tight bound is needed then use the standard GetFeatureGeometryBound() function instead. To be more explicit, if you are using this field for *anything* else than a bucketing MapReduce, you are doing the wrong thing. Not all features are required to have bounding boxes. See geostore::IsBoundRequiredForFeatureType() for the list of feature types required to have a bounding box. This bound field will be updated when a feature changes in MapFacts to include its geometry. Also, a GeoSchema pipeline, go/geo-schema-pipelines-docs#expand-bounds runs periodically to update the field for strong references from other features. Therefore, most editors don't need to edit this field explicitly. See go/geo-changes:no-edit-for-feature-bound for the details." -}, -"building": { -"$ref": "GeostoreBuildingProto" -}, -"businessChain": { -"$ref": "GeostoreBusinessChainProto", -"description": "Data specific to business chain features, e.g., Canonical GConcepts." -}, -"center": { -"$ref": "GeostorePointProto", -"description": "The conceptual center of the feature, used for routing. For cities, this would be the center of the downtown, or maybe the location of city hall. For states and countries it might be the capital city. Most feature types will not have a conceptual center - by default, routing will use the centroid of the feature's geometry. If you need a feature center point consider using GetFeatureGeometryCenter() function from geostore/base/public/feature.h rather than reading from this field directly." -}, -"child": { -"description": "Features can define themselves as a collection of other features. For example, a route is a collection of road segments, and a feature for the \"Great Lakes\" could be defined as lakes Superior, Michigan, Huron, Erie, and Ontario. It is not recommended to design a multi level tree using the child field to build up a feature because it requires fetching many features to see the details of the feature. In practice this is used to model archipelago, route, transit (agencies, lines, trips, departures), and river features. The geometry of a feature is implicitly defined by its children, so if a feature has children then it should not have any points, polylines, or polygons. In general, this field should not be used to represent political or postal hierarchies. For example, a county would not list its cities as children, because the county is not defined in terms of its cities (it also contains unincorporated areas, etc.).", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"covering": { -"$ref": "GeostoreCellCoveringProto", -"description": "S2 cell coverings for this feature. See util/geometry/s2cell_union.h for more information about S2 cells. Coverings are useful for quick containment or intersection tests. S2 covering that consists of cells that intersect with the feature." -}, -"dataSource": { -"$ref": "GeostoreDataSourceProto" -}, -"detailed3dModel": { -"$ref": "GeostoreGeometryStoreReferenceProto", -"description": "Geometry Store ID and materialized geometry representing the feature's physical presence in the world. Like geometry_3d above, but with additional semantic or renderable details, e.g. labeled surfaces (\"door\", \"window\"), material specifications, etc. IMPORTANT: Clients may not write to this field directly; see go/geometry-store-reading-and-writing#writing-to-geometry-store." -}, -"displayData": { -"$ref": "GeostoreDisplayDataProto", -"description": "Data used to render this feature on a map." -}, -"disputedArea": { -"$ref": "GeostoreDisputedAreaProto", -"description": "Represents information about disputed areas. Only TYPE_DISPUTED_AREA features should have the field set." -}, -"doodle": { -"$ref": "GeostoreDoodleProto", -"deprecated": true, -"description": "** DEPRECATED **" -}, -"elevation": { -"$ref": "GeostoreElevationProto" -}, -"elevationModel": { -"$ref": "GeostoreElevationModelProto", -"description": "Captures elevation data used on TYPE_DIGITAL_ELEVATION_MODEL features." -}, -"entrance": { -"$ref": "GeostoreEntranceProto" -}, -"establishment": { -"$ref": "GeostoreEstablishmentProto", -"description": "Also allowed on TYPE_BUSINESS_CHAIN and TYPE_TRANSIT_AGENCY features, to model the feature's phone number(s). Other fields within EstablishmentProto are not permitted on non-TYPE_ESTABLISHMENT features." -}, -"exemptRegulatedArea": { -"description": "A list of feature ids of polygon based restrictions that do not apply to this feature. This may only include features of TYPE_REGULATED_AREA that also have a feature.regulated_area.restriction field defined. Setting this field opts the feature out of all restrictions set on that regulated area.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"futureGeometry": { -"$ref": "GeostoreFeatureIdProto", -"description": "Specifies the TYPE_FUTURE_GEOMETRY whose geometry will replace this feature's geometry. If this field is populated, the referenced future geometry must have a future_geometry_for referencing this feature." -}, -"futureGeometryFor": { -"$ref": "GeostoreFeatureIdProto", -"description": "Specifies the feature that this feature's geometry will replace. If this field is populated, the referenced feature must have a future_geometry reference back to this feature. This field is only allowed (and required) for TYPE_FUTURE_GEOMETRY features." -}, -"geometry3d": { -"$ref": "GeostoreGeometryStoreReferenceProto", -"description": "Geometry Store ID and materialized geometry representing the feature's physical shape in the world. IMPORTANT: Clients may not write to this field directly; see go/geometry-store-reading-and-writing#writing-to-geometry-store." -}, -"geometryPrecisionMeters": { -"description": "If set, the feature's actual location can be assumed to be somewhere within a circle of this radius, centered on the feature's location. More information on this field at go/gpm-definition-update. NOTE: Only applicable to features with 'point' geometry. Please contact geo-schema-team@ if you have non-point use cases for which this field would be useful.", -"format": "double", -"type": "number" -}, -"geopolitical": { -"$ref": "GeostoreGeopoliticalProto" -}, -"geopoliticalGeometry": { -"$ref": "GeostoreGeopoliticalGeometryProto", -"description": "Geopolitical (unsimplified) polygons for a feature for different geopolitical use cases." -}, -"htmlText": { -"deprecated": true, -"description": "** DEPRECATED ** Features can have zero or more HTML texts associated with them. These might be HTML balloons used by Google Earth, for example.", -"items": { -"$ref": "GeostoreHtmlTextProto" -}, -"type": "array" -}, -"id": { -"$ref": "GeostoreFeatureIdProto", -"description": "The globally unique id for this feature." -}, -"inferredGeometry": { -"$ref": "GeostoreInferredGeometryProto" -}, -"interiorCovering": { -"$ref": "GeostoreCellCoveringProto", -"description": "S2 interior covering that consists of cells completely enclosed within the feature's geometry (for features with polygonal geometry)." -}, -"internal": { -"$ref": "GeostoreInternalFeatureProto", -"description": "Data for this feature that is less about the feature and more about other data in the feature. For example, it might be data about the rights we have to other data in this FeatureProto, or MapFacts-internal shape IDs for this feature's polygons." -}, -"intersection": { -"$ref": "GeostoreIntersectionProto" -}, -"intersectionGroup": { -"$ref": "GeostoreIntersectionGroupProto" -}, -"kgProperty": { -"description": "Properties that apply to this feature whose schema is defined in the Knowledge Graph schema (see https://hume.google.com/graph/schema). Not all properties that exist in the KG schema can be asserted via this mechanism. The set of properties that are allowed to be set on a feature depends on the feature's GConcepts (and feature type). For instance, only gcid:country features may have the /geo/type/country/president property (made up example, since that property doesn't actually exist in the KG schema). GConcept hierarchy is taken into account for deciding the set of allowed properties. Additionally, the specific properties allowed are further constrained by the list specified at go/kg-property-allowlist. NOTE: not all types of properties are allowed to appear in the Geo Schema. For now, we limit ourselves to properties whose value type is TYPE_BOOL, TYPE_COMPOUND, TYPE_DATETIME, TYPE_FLOAT, TYPE_ID, TYPE_INT, TYPE_NESTED_STRUCT, TYPE_TEXT, or TYPE_URI. NOTE(b/35039936): We are in the process of changing how a KG property with multiple values is stored in this field. Currently, such a KG property is stored in a single instance of the kg_property field. However, we will be changing this so that each value will be stored in its own instance of kg_property. Any client that wants to read from this field should be prepared to read data represented in either format. See b/35039936 or the announcement at http://g/geo-schema-announce/7IXR3Fex8to/7yFyT5UoAwAJ for an example and more details. The mechanism to assert that a KG property has no value is via the property_value_status field below. freebase.PropertyValue.value_status is not allowed be set here for consistency reason.", -"items": { -"$ref": "FreebasePropertyValue" -}, -"type": "array" -}, -"knowledgeGraphReference": { -"$ref": "GeostoreKnowledgeGraphReferenceProto", -"description": "RESERVED" -}, -"laneMarker": { -"$ref": "GeostoreLaneMarkerProto" -}, -"level": { -"$ref": "GeostoreLevelProto", -"description": "Represents information about TYPE_LEVEL features." -}, -"locale": { -"$ref": "GeostoreLocaleProto" -}, -"logicalBorder": { -"$ref": "GeostoreLogicalBorderProto" -}, -"metadata": { -"$ref": "GeostoreFeatureMetadataProto", -"description": "Metadata about this particular feature. Metadata is managed internally by the Geo Data Infrastructure and in general should not be set by clients. Features that don't ultimately come from the Geo repository (MapFacts) won't have any metadata set." -}, -"name": { -"description": "The name(s) of this feature. A feature may have different names in different languages, colloquial or \"vanity\" names, etc.", -"items": { -"$ref": "GeostoreNameProto" -}, -"type": "array" -}, -"operations": { -"$ref": "GeostoreOperationsProto", -"description": "Information about this feature's operations, e.g. when this feature is temporarily closed. NOTE: for legacy reasons, some closure-specifc information (e.g. permanent closure reason) lives in ExistenceProto instead. In the future, such information should move here in OperationsProto." -}, -"originalId": { -"$ref": "GeostoreFeatureIdProto", -"description": "This field is used internally by the pipeline for id stability. It should not be set by individual importers, nor should it be read by consumer clients. In particular, this field will not be present in features read or snapshotted from the Mapfacts Repository." -}, -"parent": { -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"parking": { -"$ref": "GeostoreParkingProto", -"description": "Describes parking details for the feature." -}, -"point": { -"description": "Defines the geometry of the feature. The geometry may be specified as an arbitrary union of points, poses, polylines, tracks, and polygons. Points, poses, polylines, and tracks are assumed to represent regions of unspecified size or width rather than regions of zero area. Most features should have some sort of geometry. Geometry may be synthesized if none is available (e.g., polygons for postal codes). The synthetic_geometry flag should be set in that case. Point is currently enforced as a non-repeating field for all feature types, though it is defined as repeating in case future modeling requires multiple points. The number of allowed polylines, tracks, or polygons vary based on feature type. A feature can have at most one pose (it is an optional field).", -"items": { -"$ref": "GeostorePointProto" -}, -"type": "array" -}, -"political": { -"$ref": "GeostorePoliticalProto" -}, -"polygon": { -"items": { -"$ref": "GeostorePolygonProto" -}, -"type": "array" -}, -"polygonForDisplay": { -"$ref": "GeostorePolygonProto", -"description": "Provide version of the geometry suitable for display. This has been subject to water removal and (possibly) moderate simplification." -}, -"polyline": { -"items": { -"$ref": "GeostorePolyLineProto" -}, -"type": "array" -}, -"pose": { -"$ref": "GeostorePoseProto", -"description": "Defines the geometry of a feature as a 6D pose, including lat, lng, altitude, roll, pitch, and yaw along the WGS-84 ellipsoid. Only the lat and lng are strictly required." -}, -"preferredViewport": { -"$ref": "GeostoreRectProto", -"description": "The preferred viewport for this feature. If present, this latitude-longitude rectangle holds the preferred viewport for the feature. For example, it might hold the bounds of the \"central\" portion of a large city. There are no aspect ratio requirements. This is an optional field: if no viewport is supplied, interested clients can use heuristics to determine a viewport. Calling the standard GetFeatureGeometryBound() function would be a good way to start but note that it can return an empty bounding box (e.g., if the feature has no geometry). The preferred viewport is not necessarily fully contained by the above bounding box." -}, -"propertyValueStatus": { -"description": "The value status of properties on this feature. For example, this specifies whether the feature is known to have no name (this is the value status of the 'FEATURE_NAME' property). Only property IDs which have no specific value are allowed to have a value status. Note: not all field types will be supported, please contact geo schema team if you want to enable this field for a field type that is not currently supported.", -"items": { -"$ref": "GeostorePropertyValueStatusProto" -}, -"type": "array" -}, -"rank": { -"description": "WARNING: Please do NOT introduce new uses of this field; treat it as if it were deprecated. For appropriate ranking contacts, see g3doc/company/teams/gdeng/geo-schema-reference/home/feature-properties/rank.md. A floating-point number between 0.0 and 1.0 indicating how \"important\" we think this feature is. This can be used to decide which features to render on maps, and how to rank results when the user does a search. The rank can depend on any number of factors such as the number of references to this feature in web pages, geographic size, population, number of referring geographic entities, \"priority\" information encoded in the source data, etc.", -"format": "float", -"type": "number" -}, -"rankDetails": { -"$ref": "GeostoreRankDetailsProto", -"description": "The rank field is computed as a weighted sum of several signals. This field contains a protocol buffer whose fields give those signals and their weights. Clients should try very hard not to depend on these individual signals and use the single rank field instead. At some point in the future, this field will not be exposed anymore." -}, -"rawGconceptInstanceContainer": { -"$ref": "GeostoreOntologyRawGConceptInstanceContainerProto", -"description": "Geo Ontology GConcept Instances - Design doc linked off http://go/geo-ontology - In order to shield clients from changes in GConcept representation we provide an accessor library: geostore/base/public/gconcept_instance.h" -}, -"regulatedArea": { -"$ref": "GeostoreRegulatedAreaProto" -}, -"relatedBorder": { -"description": "For TYPE_COUNTRY or TYPE_ADMINISTRATIVE_AREA1 features, this field defines the associated TYPE_BORDERs which reference this feature. The linked TYPE_BORDERs must have the feature.border set, pointing to this feature. TYPE_COUNTRY or TYPE_ADMINISTRATIVE_AREA1 features must have this field set for each TYPE_BORDER referencing them.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"relatedEntrance": { -"description": "Logical relationship to other features that are entrances or exits to this feature.", -"items": { -"$ref": "GeostoreEntranceReferenceProto" -}, -"type": "array" -}, -"relatedFeature": { -"description": "Geographic or logical relationships to other features. Importers don't need to fill a geographic relationship in - it is handled by related feature processing by a standalone pipeline. Adding \"contained by\" country relations is however encouraged (and required for TYPE_ROUTE features). WARNING: Updates to this field handled by standalone pipelines are NOT atomic with regard to updates to the features being referenced; we do not guarantee that a given MapFacts snapshot will be consistent between this field and the related features.", -"items": { -"$ref": "GeostoreRelationProto" -}, -"type": "array" -}, -"relatedTerminalPoint": { -"description": "Terminal points associated with this feature. For instance, an airport terminal may have specifically designated pickup and drop-off points.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"relatedTimezone": { -"description": "Contains time zones known to be associated with a feature. Most features are associated with the single time zone that contains them. However, some larger features (countries, continents, etc.) are associated with all of the time zones they contain. Most features can have any number of related time zones, but TYPE_SEGMENT and TYPE_ESTABLISHMENT_POI features can have at most 1.", -"items": { -"$ref": "GeostoreTimezoneProto" -}, -"type": "array" -}, -"restrictionGroup": { -"$ref": "GeostoreRestrictionGroupProto" -}, -"roadMonitor": { -"$ref": "GeostoreRoadMonitorProto" -}, -"route": { -"$ref": "GeostoreRouteProto", -"description": "Additional details on the feature types below can be found in the individual protocol buffer definitions. These extensions capture data that is specific to a set of feature types and which makes no sense for other feature types." -}, -"schoolDistrict": { -"$ref": "GeostoreSchoolDistrictProto" -}, -"segment": { -"$ref": "GeostoreSegmentProto" -}, -"segmentPath": { -"$ref": "GeostoreSegmentPathProto" -}, -"sign": { -"$ref": "GeostoreRoadSignProto" -}, -"skiBoundary": { -"$ref": "GeostoreSkiBoundaryProto" -}, -"skiLift": { -"$ref": "GeostoreSkiLiftProto" -}, -"skiTrail": { -"$ref": "GeostoreSkiTrailProto" -}, -"socialReference": { -"$ref": "GeostoreSocialReferenceProto", -"description": "All establishments must have a social reference. WARNING: Aside from creating new establishments, please do NOT introduce new uses; treat social references as if they were deprecated. For alternatives and more, see g3doc/company/teams/gdeng/geo-schema-reference/home/feature-types/establishments/social-reference.md." -}, -"sourceInfo": { -"description": "A list of the data sources that were used to construct this feature, together with optional \"raw data\" in the provider's format. Raw data should not be used by production clients but may be useful for exploring data that is not currently converted to a canonical form.", -"items": { -"$ref": "GeostoreSourceInfoProto" -}, -"type": "array" -}, -"status": { -"$ref": "GeostoreExistenceProto", -"description": "All features can have \"existence\" information associated with them." -}, -"storefrontGeometry": { -"description": "Represents information about the store front geometry. Only TYPE_ESTABLISHMENT_POI should have this field set.", -"items": { -"$ref": "GeostoreAnchoredGeometryProto" -}, -"type": "array" -}, -"storefrontGeometryModel": { -"$ref": "GeostoreGeometryStoreReferenceProto", -"description": "Geometry Store ID and materialized geometry for a POI feature's storefront(s). IMPORTANT: Clients may not write to this field directly; see go/geometry-store-reading-and-writing#writing-to-geometry-store." -}, -"syntheticGeometry": { -"description": "We prefer features that have geometry over those that do not. In some cases we synthesize geometry (e.g., polygons for postal codes). This flag is set to indicate features that have such synthetic geometry.", -"type": "boolean" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a feature. Never set in MapFacts." -}, -"threeDimModel": { -"$ref": "GeostoreThreeDimensionalModelProto", -"description": "Captures full model representing the feature's 3D geometry. Should only be found on TYPE_COMPOUND_BUILDING features for now, but not part of the BuildingProto extension for possible future extensions." -}, -"tollCluster": { -"$ref": "GeostoreTollClusterProto", -"description": "Represents information about TYPE_TOLL_CLUSTER features." -}, -"tollPath": { -"$ref": "GeostoreTollPathProto", -"description": "Represents information about /geo/type/toll_path features." -}, -"track": { -"description": "Defines the geometry of a feature as a sequence of 6D poses, including lat, lng, altitude, roll, pitch, and yaw. Only lat and lng are typically required. Each track has an index so that they can be viewed in a stable order.", -"items": { -"$ref": "GeostoreTrackProto" -}, -"type": "array" -}, -"transitLine": { -"$ref": "GeostoreTransitLineProto" -}, -"transitLineVariant": { -"$ref": "GeostoreTransitLineVariantProto", -"description": "RESERVED" -}, -"transitStation": { -"$ref": "GeostoreTransitStationProto" -}, -"type": { -"description": "The type of this feature -- see comments above.", -"enum": [ -"TYPE_ANY", -"TYPE_TRANSPORTATION", -"TYPE_ROUTE", -"TYPE_DEPRECATED_HIGHWAY_DO_NOT_USE", -"TYPE_HIGHWAY", -"TYPE_HIGHWAY_1", -"TYPE_HIGHWAY_2", -"TYPE_HIGHWAY_3", -"TYPE_HIGHWAY_4", -"TYPE_HIGHWAY_5", -"TYPE_HIGHWAY_6", -"TYPE_HIGHWAY_7", -"TYPE_HIGHWAY_8", -"TYPE_HIGHWAY_9", -"TYPE_BICYCLE_ROUTE", -"TYPE_TRAIL", -"TYPE_SEGMENT", -"TYPE_ROAD", -"TYPE_RAILWAY", -"TYPE_STANDARD_TRACK", -"TYPE_JR_TRACK", -"TYPE_NARROW_TRACK", -"TYPE_MONORAIL_TRACK", -"TYPE_SUBWAY_TRACK", -"TYPE_LIGHT_RAIL_TRACK", -"TYPE_BROAD_TRACK", -"TYPE_HIGH_SPEED_RAIL", -"TYPE_TROLLEY_TRACK", -"TYPE_FERRY", -"TYPE_FERRY_BOAT", -"TYPE_FERRY_TRAIN", -"TYPE_VIRTUAL_SEGMENT", -"TYPE_INTERSECTION", -"TYPE_TRANSIT", -"TYPE_TRANSIT_STATION", -"TYPE_BUS_STATION", -"TYPE_TRAMWAY_STATION", -"TYPE_TRAIN_STATION", -"TYPE_SUBWAY_STATION", -"TYPE_FERRY_TERMINAL", -"TYPE_AIRPORT", -"TYPE_AIRPORT_CIVIL", -"TYPE_AIRPORT_MILITARY", -"TYPE_AIRPORT_MIXED", -"TYPE_HELIPORT", -"TYPE_SEAPLANE_BASE", -"TYPE_AIRSTRIP", -"TYPE_CABLE_CAR_STATION", -"TYPE_GONDOLA_LIFT_STATION", -"TYPE_FUNICULAR_STATION", -"TYPE_SPECIAL_STATION", -"TYPE_HORSE_CARRIAGE_STATION", -"TYPE_MONORAIL_STATION", -"TYPE_SEAPORT", -"TYPE_TRANSIT_STOP", -"TYPE_TRANSIT_TRIP", -"TYPE_TRANSIT_DEPARTURE", -"TYPE_TRANSIT_LEG", -"TYPE_TRANSIT_LINE", -"TYPE_TRANSIT_AGENCY_DEPRECATED_VALUE", -"TYPE_TRANSIT_TRANSFER", -"TYPE_SEGMENT_PATH", -"TYPE_ROAD_SIGN", -"TYPE_INTERSECTION_GROUP", -"TYPE_PATHWAY", -"TYPE_RESTRICTION_GROUP", -"TYPE_TOLL_CLUSTER", -"TYPE_POLITICAL", -"TYPE_COUNTRY", -"TYPE_ADMINISTRATIVE_AREA", -"TYPE_ADMINISTRATIVE_AREA1", -"TYPE_US_STATE", -"TYPE_GB_COUNTRY", -"TYPE_JP_TODOUFUKEN", -"TYPE_ADMINISTRATIVE_AREA2", -"TYPE_GB_FORMER_POSTAL_COUNTY", -"TYPE_GB_TRADITIONAL_COUNTY", -"TYPE_ADMINISTRATIVE_AREA3", -"TYPE_ADMINISTRATIVE_AREA4", -"TYPE_ADMINISTRATIVE_AREA5", -"TYPE_ADMINISTRATIVE_AREA6", -"TYPE_ADMINISTRATIVE_AREA7", -"TYPE_ADMINISTRATIVE_AREA8", -"TYPE_ADMINISTRATIVE_AREA9", -"TYPE_COLLOQUIAL_AREA", -"TYPE_RESERVATION", -"TYPE_LOCALITY", -"TYPE_GB_POST_TOWN", -"TYPE_JP_GUN", -"TYPE_JP_SHIKUCHOUSON", -"TYPE_JP_SUB_SHIKUCHOUSON", -"TYPE_COLLOQUIAL_CITY", -"TYPE_SUBLOCALITY", -"TYPE_US_BOROUGH", -"TYPE_GB_DEPENDENT_LOCALITY", -"TYPE_JP_OOAZA", -"TYPE_JP_KOAZA", -"TYPE_JP_GAIKU", -"TYPE_GB_DOUBLE_DEPENDENT_LOCALITY", -"TYPE_JP_CHIBAN", -"TYPE_JP_EDABAN", -"TYPE_SUBLOCALITY1", -"TYPE_SUBLOCALITY2", -"TYPE_SUBLOCALITY3", -"TYPE_SUBLOCALITY4", -"TYPE_SUBLOCALITY5", -"TYPE_NEIGHBORHOOD", -"TYPE_CONSTITUENCY", -"TYPE_DESIGNATED_MARKET_AREA", -"TYPE_SCHOOL_DISTRICT", -"TYPE_LAND_PARCEL", -"TYPE_DISPUTED_AREA", -"TYPE_POLICE_JURISDICTION", -"TYPE_STATISTICAL_AREA", -"TYPE_CONSTITUENCY_FUTURE", -"TYPE_PARK", -"TYPE_GOLF_COURSE", -"TYPE_LOCAL_PARK", -"TYPE_NATIONAL_PARK", -"TYPE_US_NATIONAL_PARK", -"TYPE_US_NATIONAL_MONUMENT", -"TYPE_NATIONAL_FOREST", -"TYPE_PROVINCIAL_PARK", -"TYPE_PROVINCIAL_FOREST", -"TYPE_CAMPGROUNDS", -"TYPE_HIKING_AREA", -"TYPE_BUSINESS", -"TYPE_GOVERNMENT", -"TYPE_BORDER_CROSSING", -"TYPE_CITY_HALL", -"TYPE_COURTHOUSE", -"TYPE_EMBASSY", -"TYPE_LIBRARY", -"TYPE_SCHOOL", -"TYPE_UNIVERSITY", -"TYPE_EMERGENCY", -"TYPE_HOSPITAL", -"TYPE_PHARMACY", -"TYPE_POLICE", -"TYPE_FIRE", -"TYPE_DOCTOR", -"TYPE_DENTIST", -"TYPE_VETERINARIAN", -"TYPE_TRAVEL_SERVICE", -"TYPE_LODGING", -"TYPE_RESTAURANT", -"TYPE_GAS_STATION", -"TYPE_PARKING", -"TYPE_POST_OFFICE", -"TYPE_REST_AREA", -"TYPE_CASH_MACHINE", -"TYPE_CAR_RENTAL", -"TYPE_CAR_REPAIR", -"TYPE_SHOPPING", -"TYPE_GROCERY", -"TYPE_TOURIST_DESTINATION", -"TYPE_ECO_TOURIST_DESTINATION", -"TYPE_BIRD_WATCHING", -"TYPE_FISHING", -"TYPE_HUNTING", -"TYPE_NATURE_RESERVE", -"TYPE_TEMPLE", -"TYPE_CHURCH", -"TYPE_GURUDWARA", -"TYPE_HINDU_TEMPLE", -"TYPE_MOSQUE", -"TYPE_SYNAGOGUE", -"TYPE_STADIUM", -"TYPE_BAR", -"TYPE_MOVIE_RENTAL", -"TYPE_COFFEE", -"TYPE_GOLF", -"TYPE_BANK", -"TYPE_DOODLE", -"TYPE_GROUNDS", -"TYPE_AIRPORT_GROUNDS", -"TYPE_BUILDING_GROUNDS", -"TYPE_CEMETERY", -"TYPE_HOSPITAL_GROUNDS", -"TYPE_INDUSTRIAL", -"TYPE_MILITARY", -"TYPE_SHOPPING_CENTER", -"TYPE_SPORTS_COMPLEX", -"TYPE_UNIVERSITY_GROUNDS", -"TYPE_DEPRECATED_TARMAC", -"TYPE_ENCLOSED_TRAFFIC_AREA", -"TYPE_PARKING_LOT", -"TYPE_PARKING_GARAGE", -"TYPE_OFF_ROAD_AREA", -"TYPE_BORDER", -"TYPE_BUILDING", -"TYPE_GEOCODED_ADDRESS", -"TYPE_NATURAL_FEATURE", -"TYPE_TERRAIN", -"TYPE_SAND", -"TYPE_BEACH", -"TYPE_DUNE", -"TYPE_ROCKY", -"TYPE_ICE", -"TYPE_GLACIER", -"TYPE_BUILT_UP_AREA", -"TYPE_VEGETATION", -"TYPE_SHRUBBERY", -"TYPE_WOODS", -"TYPE_AGRICULTURAL", -"TYPE_GRASSLAND", -"TYPE_TUNDRA", -"TYPE_DESERT", -"TYPE_SALT_FLAT", -"TYPE_WATER", -"TYPE_OCEAN", -"TYPE_BAY", -"TYPE_BIGHT", -"TYPE_LAGOON", -"TYPE_SEA", -"TYPE_STRAIT", -"TYPE_INLET", -"TYPE_FJORD", -"TYPE_LAKE", -"TYPE_SEASONAL_LAKE", -"TYPE_RESERVOIR", -"TYPE_POND", -"TYPE_RIVER", -"TYPE_RAPIDS", -"TYPE_DISTRIBUTARY", -"TYPE_CONFLUENCE", -"TYPE_WATERFALL", -"TYPE_SPRING", -"TYPE_GEYSER", -"TYPE_HOT_SPRING", -"TYPE_SEASONAL_RIVER", -"TYPE_WADI", -"TYPE_ESTUARY", -"TYPE_WETLAND", -"TYPE_WATER_NAVIGATION", -"TYPE_FORD", -"TYPE_CANAL", -"TYPE_HARBOR", -"TYPE_CHANNEL", -"TYPE_REEF", -"TYPE_REEF_FLAT", -"TYPE_REEF_GROWTH", -"TYPE_REEF_EXTENT", -"TYPE_REEF_ROCK_SUBMERGED", -"TYPE_IRRIGATION", -"TYPE_DAM", -"TYPE_DRINKING_WATER", -"TYPE_CURRENT", -"TYPE_WATERING_HOLE", -"TYPE_TECTONIC", -"TYPE_WATERING_HOLE_DEPRECATED", -"TYPE_VOLCANO", -"TYPE_LAVA_FIELD", -"TYPE_FISSURE", -"TYPE_FAULT", -"TYPE_LAND_MASS", -"TYPE_CONTINENT", -"TYPE_ISLAND", -"TYPE_ATOLL", -"TYPE_OCEAN_ROCK_EXPOSED", -"TYPE_CAY", -"TYPE_PENINSULA", -"TYPE_ISTHMUS", -"TYPE_ELEVATED", -"TYPE_PEAK", -"TYPE_NUNATAK", -"TYPE_SPUR", -"TYPE_PASS", -"TYPE_PLATEAU", -"TYPE_RIDGE", -"TYPE_RAVINE", -"TYPE_CRATER", -"TYPE_KARST", -"TYPE_CLIFF", -"TYPE_VISTA", -"TYPE_DIGITAL_ELEVATION_MODEL", -"TYPE_UPLAND", -"TYPE_TERRACE", -"TYPE_SLOPE", -"TYPE_CONTOUR_LINE", -"TYPE_PAN", -"TYPE_UNSTABLE_HILLSIDE", -"TYPE_MOUNTAIN_RANGE", -"TYPE_UNDERSEA", -"TYPE_SUBMARINE_SEAMOUNT", -"TYPE_SUBMARINE_RIDGE", -"TYPE_SUBMARINE_GAP", -"TYPE_SUBMARINE_PLATEAU", -"TYPE_SUBMARINE_DEEP", -"TYPE_SUBMARINE_VALLEY", -"TYPE_SUBMARINE_BASIN", -"TYPE_SUBMARINE_SLOPE", -"TYPE_SUBMARINE_CLIFF", -"TYPE_SUBMARINE_PLAIN", -"TYPE_SUBMARINE_FRACTURE_ZONE", -"TYPE_CAVE", -"TYPE_ROCK", -"TYPE_ARCHIPELAGO", -"TYPE_POSTAL", -"TYPE_POSTAL_CODE", -"TYPE_POSTAL_CODE_PREFIX", -"TYPE_PREMISE", -"TYPE_SUB_PREMISE", -"TYPE_SUITE", -"TYPE_POST_TOWN", -"TYPE_POSTAL_ROUND", -"TYPE_META_FEATURE", -"TYPE_DATA_SOURCE", -"TYPE_LOCALE", -"TYPE_TIMEZONE", -"TYPE_BUSINESS_CHAIN", -"TYPE_PHONE_NUMBER_PREFIX", -"TYPE_PHONE_NUMBER_AREA_CODE", -"TYPE_BUSINESS_CORRIDOR", -"TYPE_ADDRESS_TEMPLATE", -"TYPE_TRANSIT_AGENCY", -"TYPE_FUTURE_GEOMETRY", -"TYPE_EVENT", -"TYPE_EARTHQUAKE", -"TYPE_HURRICANE", -"TYPE_WEATHER_CONDITION", -"TYPE_TRANSIENT", -"TYPE_ENTRANCE", -"TYPE_CARTOGRAPHIC", -"TYPE_HIGH_TENSION", -"TYPE_SKI_TRAIL", -"TYPE_SKI_LIFT", -"TYPE_SKI_BOUNDARY", -"TYPE_WATERSHED_BOUNDARY", -"TYPE_TARMAC", -"TYPE_WALL", -"TYPE_PICNIC_AREA", -"TYPE_PLAY_GROUND", -"TYPE_TRAIL_HEAD", -"TYPE_GOLF_TEEING_GROUND", -"TYPE_GOLF_PUTTING_GREEN", -"TYPE_GOLF_ROUGH", -"TYPE_GOLF_SAND_BUNKER", -"TYPE_GOLF_FAIRWAY", -"TYPE_GOLF_HOLE", -"TYPE_DEPRECATED_GOLF_SHOP", -"TYPE_CAMPING_SITE", -"TYPE_DESIGNATED_BARBECUE_PIT", -"TYPE_DESIGNATED_COOKING_AREA", -"TYPE_CAMPFIRE_PIT", -"TYPE_WATER_FOUNTAIN", -"TYPE_LITTER_RECEPTACLE", -"TYPE_LOCKER_AREA", -"TYPE_ANIMAL_ENCLOSURE", -"TYPE_CARTOGRAPHIC_LINE", -"TYPE_ESTABLISHMENT", -"TYPE_ESTABLISHMENT_GROUNDS", -"TYPE_ESTABLISHMENT_BUILDING", -"TYPE_ESTABLISHMENT_POI", -"TYPE_ESTABLISHMENT_SERVICE", -"TYPE_CELESTIAL", -"TYPE_ROAD_MONITOR", -"TYPE_PUBLIC_SPACES_AND_MONUMENTS", -"TYPE_STATUE", -"TYPE_TOWN_SQUARE", -"TYPE_LEVEL", -"TYPE_COMPOUND", -"TYPE_COMPOUND_GROUNDS", -"TYPE_COMPOUND_BUILDING", -"TYPE_COMPOUND_SECTION", -"TYPE_TERMINAL_POINT", -"TYPE_REGULATED_AREA", -"TYPE_LOGICAL_BORDER", -"TYPE_GCONCEPT_ONLY", -"TYPE_DO_NOT_USE_RESERVED_TO_CATCH_GENERATED_FILES", -"TYPE_UNKNOWN" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -false, -true, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -true, -true, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -true, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false -], -"enumDescriptions": [ -"ABSTRACT", -"ABSTRACT", -"A route is any section of road (or rails, etc.) that has a name. This includes city streets as well as highways. Road segments can belong to multiple routes (e.g. El Camino, CA-82).", -"DEPRECATED", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"A designated bicycle route, whose segments may consist of any combination of bicycle paths, bicycle lanes, or city streets.", -"A designated trail, which may consist of paved walkways, dirt paths, fire road, streets or highways, etc.", -"ABSTRACT", -"", -"Railroads use several different incompatible track types.", -"", -"", -"", -"", -"", -"", -"", -"", -"Tracks for streetcars, cable-cars, etc. Ferries are services that are part of the road network but are not roads. They typically involve fares and scheduled departure times.", -"ABSTRACT", -"The vast majority of ferries are ferry boats.", -"Also called a \"car transport\", a ferry train is a rail service that carries passengers and their vehicles across undrivable terrain. The Channel Tunnel (\"Chunnel\") is the most famous example, but they are also common in the Alps where they connect neighboring valleys otherwise separated by impassable mountains.", -"Any plausible 1-dimensional path through a 2+ dimensional space, for the purposes of making graph-search-based routing possible. Such segments can be used to model paths through parking lots, squares, floors of buildings and other areas.", -"An intersection consists of a collection of segments that terminate at the same location. This is topological definition: it may not match what a typical user would think of as an \"intersection\". See TYPE_INTERSECTION_GROUP, below, for more information. Each segment terminating at an intersection has an \"endpoint type\" that specifies how that segment is terminated: stop sign, yield sign, three-way light, etc.", -"ABSTRACT", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"A transit line is a collection of transit legs, associated with some invariant properties of the trips that run over the legs. See also transitline.proto", -"TYPE_TRANSIT_AGENCY was moved to 0xC91. This deprecated enum value still exists for debugging purposes only.", -"DEPRECATED", -"ABSTRACT", -"Road sign features have names, point geometry, etc. They also have segment_path data (see below) which lists the segments that refer to the sign. See segment.proto for the reference from the segment to the road sign.", -"Our TYPE_INTERSECTION feature, above, models the point where one or more segments terminate. This is topological definition: it may not match what a typical user would think of as an \"intersection\". Consider the intersections where Hayes, Market, Larkin, and 9th Street meet near (37.77765, -122.41638) in San Francisco. Most people would probably consider this a single feature, even though we model it as four separate TYPE_INTERSECTION features. This TYPE_INTERSECTION_GROUP is used to model the user's concept of a complex intersection.", -"RESERVED", -"A restriction group describes a set of segment restrictions that belong together and have a name or an associated event. See also restriction_group.proto", -"DEPRECATED", -"ABSTRACT", -"", -"ABSTRACT", -"", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"e.g. Silicon Valley", -"A reservation is a region collectively held or governed by indigenous people and officially recognized by the country\u2019s government at the federal or state level. A reservation may be fully contained within an administrative feature or partially contained within two or more. These regions are referred to by different categorical names depending on country and even by state, including but not limited to: \u201cIndian Reservations\u201d, \u201cIndian Reserves\u201d, \u201cLand Claim Settlement Lands\u201d, \u201cIndian Lands\u201d, \u201cTreaty Lands\u201d, \u201cIndigenous Territories\u201d, etc. A reservation is not a historic indigenous territory boundary or a region which has applied for land rights but has not yet received official recognition.", -"", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"An entity widely considered to be a city, that may itself be made up of smaller political entities, some of which are cities/towns/villages themselves. For example, the colloquial view of Sydney, Australia actually comprises many smaller cities, but is regarded as a city itself. This type is not suitable for modeling official metro-/micropolitan or other statistical areas.", -"ABSTRACT", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"Designated Market Areas (or DMAs) are used by marketing and ratings companies (such as the Nielsen Media Research company) to describe geographical regions (such as the greater New York metropolitan area) that are covered by a set of television stations. (See http://www.schooldata.com/pdfs/DMA.pdf) In the United States, DMAs should have a DMA numeric ID name, tagged with the FLAG_DESIGNATED_MARKET_AREA_ID flag.", -"", -"", -"Eventually we'll have more data for disputed areas (e.g., who makes claims on the area, who has de facto control, etc.). For the moment, we just define a type so we can simply mark areas as disputed.", -"Boundaries representing the jurisdiction of a particular police station.", -"An area used for aggregating statistical data, eg, a census region. Note that TYPE_STATISTICAL_AREA has a third nibble so we can add an abstract parent above it later if need be at 0x2E1 (and rename TYPE_STATISTICAL_AREA as TYPE_STATISTICAL_AREA1).", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"A line representing the boundary between two features. See border.proto for details.", -"DEPRECATED", -"An association of a point with an address, with no other information.", -"ABSTRACT", -"Expanses of land that share common surface attributes. These areas would look more or less uniform from a high altitude.", -"", -"", -"", -"", -"", -"", -"Terrain that looks populated.", -"Terrain that is covered in vegetation.", -"", -"", -"", -"", -"", -"", -"A flat expanse of salt left by the evaporation of a body of salt water.", -"Features can be TYPE_WATER if we don't have enough information to properly type the body of water. TYPE_WATER is also used as the type for child features that compose a TYPE_RIVER feature.", -"One of the large salt-water bodies that covers most of the globe.", -"An ocean subdivision formed by a coastal indentation. Includes coves and gulfs.", -"An open body of water formed by a slight coastal indentation.", -"", -"An ocean subdivision more or less confined by land and islands.", -"A long narrow ocean subdivision. Includes sounds.", -"", -"", -"An inland body of standing water.", -"A lake that dries up part of the year.", -"An artificial body of water, possibly created by a dam, often used for irrigation or house use.", -"", -"An inland body of moving water, or parts associated with it in which there is little or no current (backwater).", -"", -"A branch which flows away from the main river. Includes deltas.", -"A place where two or more rivers join.", -"", -"A place where ground water flows naturally out of the ground.", -"", -"", -"A river that dries up part of the year.", -"A dry riverbed that occasionally receives flashfloods.", -"A place at the end of a river where fresh and salt water mix. Includes tidal creeks and limans.", -"Land that is usually flooded. Includes bogs, marshes, flats, moors, and swamps.", -"", -"A shallow place where water may be waded through.", -"A narrow passage used by boats. Normally artificial.", -"A deep place near a shore where ships commonly drop anchor.", -"A deep part in a body of water that is suitable for navigation. Includes narrows.", -"Rocks, coral, sandbars, or other features beneath the surface of the water that pose a hazard to passing ships. Includes shoals.", -"A relatively shallow zone of the back reef located closest to the shore, that may be exposed at low tide.", -"A small section of rocks, coral, sandbars, or other features beneath the surface of the water that forms part of a reef.", -"The full extent of the reef complex.", -"A submerged rock in the water.", -"Man-made (and sometimes natural) channels used to move water. This type was used for both dam structures and water that is hold back by dams. We should use TYPE_COMPOUND_BUILDING for dam structures and TYPE_RESERVOIR for water.", -"DEPRECATED", -"", -"Includes overfalls.", -"A natural depression filled with water where animals come to drink.", -"This type is incorrectly under TYPE_TECTONIC instead of TYPE_WATER. This was a mistake and is now fixed. See TYPE_WATERING_HOLE for the replacement.", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"", -"An exposed rock in the water.", -"A small, low-elevation, sandy island formed on the surface of coral reefs", -"A stretch of land projecting into water. Includes capes and spits.", -"A strip of land connecting two larger land masses, such as continents.", -"Features that are notable for being high (or low), or for having sudden changes in elevation. These features might have an \"elevation\" extension to specify the actual elevation. See ElevationProto for more information.", -"Elevations that have a distinctive peak.", -"A peak or ridge of a mountain that extends through a glacier.", -"A subsidiary peak of a mountain.", -"A route over an otherwise difficult to traverse feature. Includes saddle.", -"Elevations that are flat on top. Includes mesas and buttes.", -"A ridge is a geographical feature consisting of a chain of mountains or hills that form a continuous elevated crest with a single ridgeline for some distance.", -"Steep declines usually carved by erosion. Includes valleys, canyons, ditches, and gorges.", -"Depressions causes by impact, explosion, and sometimes sink-holes.", -"Topography formed on limestone and gypsum by dissolution with sinkholes, caves, etc.", -"A vertical or nearly vertical slope. Includes escarpments.", -"An elevated place that is notable for having a good view. Raster digital elevation data. This is not a type to be used by providers or consumed by clients.", -"RESERVED", -"Land along streams higher than the alluvial plain or stream terrace.", -"", -"Land not so steep as a cliff, but changing elevation. Includes slides.", -"All the points on the polygon are at the same elevation.", -"A near-level shallow, natural depression or basin, usually containing an intermittent lake, pond, or pool.", -"", -"A series of mountains or hills ranged in a line and connected by high ground. Mountain ranges usually consist of many smaller ridges. For example, the Himalayas, the Andes. the Alps, etc.", -"Features that are notable for being high (or low), or for having sudden changes in elevation. These features might have an \"elevation\" extension to specify the actual elevation. See ElevationProto for more information.", -"includes peaks, ranges, and spurs", -"", -"includes saddles", -"", -"", -"includes trenches and troughs", -"", -"", -"", -"", -"", -"Don't use 0xA7. Use 8 bits for additional types under TYPE_NATURAL_FEATURE, so we don't run out of space. The following are miscellaneous natural features that don't fit any of the categories above.", -"", -"A feature representing a group or chain of islands. ", -"ABSTRACT", -"This is the type for postal codes which are complete and independent enough that there should be a feature for them (e.g. US 5-digit ZIP codes). For even more detailed suffixes that further subdivide a postal code (such as the +4 component in US ZIP codes), store the information in a TYPE_POSTAL_CODE_SUFFIX address component. When a range or set of postal codes share the same geographical area, e.g. because a precise subdivision does not exist or this subdivision is unknown, this type is used for each individual postal code.", -"A prefix portion of a postal code which does not meet the requirements for TYPE_POSTAL_CODE, but which is useful to search for, for example UK outcodes.", -"DEPRECATED", -"DEPRECATED This is deprecated and we want to use TYPE_COMPOUND_SECTION instead.", -"DEPRECATED", -"The term \"post town\" is used for a locality-like-entity that is only used for postal addresses.", -"DEPRECATED", -"ABSTRACT", -"Every data source used in constructing a data repository has a corresponding feature that provides more information about that data source. The extra information is stored in the optional data_source field below.", -"A locale feature provides region specific conventions such as preferred language and formatting details for time, date, and currency values. Locales aren't necessary defined by physical geographic features, so they are classified as meta-features.", -"A timezone feature is used to specify the region covering an international timezone. When a point is covered by multiple timezone features, the most specific one can be used to compute the local time at this point. Most specific implies a much smaller region or the one that is closer to the center. A feature's timezone can be specified in the repeated related_timezone field.", -"A business chain feature is used to represent a chain, e.g. Starbucks, McDonald's, etc. Other features representing specific stores/franchises of this chain may refer to one such feature via RELATION_MEMBER_OF_CHAIN. This is not strictly reserved to commercial chains but can also be used to model organizations such as the Red Cross or the United Nations.", -"A phone number prefix feature is used to specify the region where phone numbers (typically fixed-line numbers) must begin with a certain prefix. Any phone number prefix down to any level of granularity could be represented by this type.", -"A phone number area code is a prefix which also coincides with the area code, or national destination code, of a particular region.", -"A Business Corridor is a dense cluster of semantically similar establishments. TYPE_BUSINESS_CORRIDOR features are distinguished from TYPE_COLLOQUIAL_AREA features because the corridors are not under the political hierarchy, are allowed to be nameless, and may not correspond to well-known real world locations. For more details, see go/geo-corridors-schema.", -"An address template feature provides region-specific conventions for structuring addresses. These features aren't necessarily defined by physical geographic features, so they are classified as meta-features.", -"A transit agency operates a number of lines, typically all in the same city, region or country. See also transitagency.proto", -"A feature whose geometry is planned to replace the geometry on another feature.", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"RESERVED", -"A portal of entry or exit to another feature. Examples: - Subway station entrance. - Parking lot entrance.", -"Cartographic features are used to capture real-world objects for which there is no current desire to model any specific attributes. These are only useful to make the map tiles look pretty.", -"DEPRECATED", -"Also see skitrail.proto", -"Also see skilift.proto", -"Also see skiboundary.proto", -"", -"Starting with TYPE_TARMAC, we use longer IDs, so that we can expand the number of feature types under TYPE_CARTOGRAPHIC.", -"Use TYPE_COMPOUND_GROUND and appropriate gcids for the next two.", -"DEPRECATED", -"DEPRECATED", -"", -"Sub-types within a golf course.", -"", -"", -"", -"", -"Use TYPE_ESTABLISHMENT_POI and gcid:golf_shop for golf shops instead.", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"Subtype within a zoo - a cage or fenced-off or otherwise delineated area containing animals.", -"A line for a cartographic detail. For example the international date line. Such features should have polyline geometry.", -"ABSTRACT This type is being replaced by TYPE_COMPOUND_GROUNDS. For further details, see go/compounds-v2", -"DEPRECATED This type has been replaced by TYPE_COMPOUND_BUILDING. For further details, see go/oyster-compounds", -"DEPRECATED", -"An establishment that has a physical location. Note that it *may* also have a service area (e.g. a restaurant that offers both dine-in and delivery). This type of business is also known as a \"hybrid\" Service Area Business. Establishment POIs can be referenced by TYPE_COMPOUND features using the RELATION_PRIMARILY_OCCUPIED_BY. This is the reciprocal relation of the RELATION_OCCUPIES.", -"NOTE(tcain): Using value 0xD441, since we could find ourselves with a need to differentiate service areas from online-only at this level in the future, but still benefit from being able to group those under a common parent, disjoint from TYPE_ESTABLISHMENT_POI.", -"The root of types of features that are in the sky, rather than on the earth. There will eventually be a hierarchy of types here.", -"Features responsible for monitoring traffic on roads (usually for speed). Includes cameras at particular points as well as monitors that cover larger spans. Features of this type should have a corresponding gcid that specifies the correct subtype (e.g. gcid:road_camera or gcid:speed_camera_zone). This type was originally named as TYPE_ROAD_CAMERA.", -"ABSTRACT", -"Note that this type does not distinguish the nature of the statue (religious, historical, memorial, tourist, ...).", -"Open space used for events, gathering, or as market-place.", -"A feature used to represent a logical level, e.g. floor.", -"ABSTRACT", -"e.g. campus, compound, parcel.", -"e.g. single family dwelling, office building.", -"e.g. suite, room, hallway, cubicle.", -"A terminal point represents a good location for a user to meet a taxi, ridesharing vehicle, or general driver.", -"An area controlled in some way by an authoritative source, such as a government-designated COVID containment zone or an area under government sanctions. Features of this type should have one or more gcids corresponding to their specific regulation, and client handling of these features may vary based on the type of regulation.", -"A grouping of TYPE_BORDER features (\"border segments\"), which together represent a border between two features of the same type.", -"A generic feature type for any geo-type that cannot be modeled under existing feature types. No new feature type should be created within feature proto after this type.", -"DEPRECATED", -"A feature of completely unknown type. This should only be used when absolutely necessary. One example in which this type is useful is in the Chinese importer, which must heuristically segment addresses into components - it often does not know what types to make those components. Please note that the Oyster address formatter does not currently support address components of TYPE_UNKNOWN well." -], -"type": "string" -}, -"verticalOrdering": { -"$ref": "GeostoreVerticalOrderingProto", -"description": "Represents vertical ordering for this feature relative to other geometrically-overlaping features. See go/aboutgrades for more information about distinction among different levels." -}, -"visualizationArea": { -"$ref": "GeostoreGeometryStoreReferenceProto", -"description": "Geometry Store ID and materialized geometry representing a bounding box of the feature's geometry (or the building/grounds a POI occupies) for stylized rendering, UX experiences, etc. IMPORTANT: Clients may not write to this field directly; see go/geometry-store-reading-and-writing#writing-to-geometry-store." -}, -"waterRemovedPolygon": { -"$ref": "GeostorePolygonProto", -"description": "A version of the geometry which has water removed but is not simplified (thus having equal or more vertices than polygon_for_display)." -}, -"website": { -"description": "The official website of this feature. Stored as a repeated field to allow for multilingual official websites (see comments in url.proto).", -"items": { -"$ref": "GeostoreUrlProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreFeatureReplacementInfoProto": { -"description": "Metadata to track feature derivations and replacements. This is used to track feature provenance (particularly for road segments).", -"id": "GeostoreFeatureReplacementInfoProto", -"properties": { -"derivedFrom": { -"description": "This feature was created to replace other features that are referenced by this field.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"replacedBy": { -"description": "This feature was replaced by other features that are referenced by this this field.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreFieldMetadataProto": { -"description": "Internal field metadata. This part is not exposed to downstream consumers of the repository (read-only clients) but is available to upstream providers to the repository (read-write clients).", -"id": "GeostoreFieldMetadataProto", -"properties": { -"internal": { -"$ref": "GeostoreInternalFieldMetadataProto" -} -}, -"type": "object" -}, -"GeostoreFieldWithRightsProto": { -"description": "Proto used to represent rights for a feature property id. See go/geo-rights for more details. NOTE: Use google3/geostore/provenance/public/rights.h or google3/java/com/google/geostore/provenance/rights/Rights.java instead of accessing this proto directly.", -"id": "GeostoreFieldWithRightsProto", -"properties": { -"attributeId": { -"deprecated": true, -"description": "**DEPRECATED** Never set. ", -"type": "string" -}, -"featurePropertyId": { -"$ref": "GeostoreFeaturePropertyIdProto", -"description": "The feature property id for which this entry tracks rights." -}, -"fieldType": { -"description": "**DEPRECATED** The field type for which this entry tracks rights. There may be multiple entries for the same field type - prefer feature_property_id to uniquely identify a particular entry. ", -"format": "int32", -"type": "integer" -}, -"minRightsLevel": { -"description": "The minimum rights level among all current values for this feature property id.", -"enum": [ -"UNKNOWN_RIGHTS", -"GT_RIGHTS", -"FULL_RIGHTS" -], -"enumDescriptions": [ -"Indicates that the rights level is unknown. This should always be the default value of any RightsLevel field in any proto.", -"GT-rights or GT-level rights is a level of data ownership that Google has retained as defined by the Ground Truth project. GT rights include but may not be limited to: - Data may be used perpetually in any Google product or service - No requirement for any on-screen attribution requirements (although we may have softer attribution requirements, like display on a legal notices page) - No display, distribution, or storage restrictions - other than that the content may not be redistributed on a standalone basis, independent of a Google product or service.", -"Full rights refers to data for which Google has no usage restrictions. We can do whatever we want with it." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreFlowLineProto": { -"description": "Wrapper to hold data related to a lane\u2019s track, extendable for future data.", -"id": "GeostoreFlowLineProto", -"properties": { -"curvature": { -"$ref": "GeostoreCurvatureProto", -"description": "RESERVED" -}, -"track": { -"$ref": "GeostoreTrackProto" -} -}, -"type": "object" -}, -"GeostoreFoodMenuItemOptionProto": { -"description": "Example: the choice of chicken, beef, or tofu for the Thai Basil dish. Each option would have its own name, price, allergen info, etc. Note: This proto stores both food and service items despite the name.", -"id": "GeostoreFoodMenuItemOptionProto", -"properties": { -"allergenAbsent": { -"items": { -"enum": [ -"ALLERGEN_DAIRY", -"ALLERGEN_EGG", -"ALLERGEN_FISH", -"ALLERGEN_PEANUT", -"ALLERGEN_SHELLFISH", -"ALLERGEN_SOY", -"ALLERGEN_TREE_NUT", -"ALLERGEN_WHEAT" -], -"enumDescriptions": [ -"A list of types of allergens for food and drinks.", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"allergenPresent": { -"items": { -"enum": [ -"ALLERGEN_DAIRY", -"ALLERGEN_EGG", -"ALLERGEN_FISH", -"ALLERGEN_PEANUT", -"ALLERGEN_SHELLFISH", -"ALLERGEN_SOY", -"ALLERGEN_TREE_NUT", -"ALLERGEN_WHEAT" -], -"enumDescriptions": [ -"A list of types of allergens for food and drinks.", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"calories": { -"format": "int32", -"type": "integer" -}, -"ingredients": { -"description": "Ingredients of the food dish option.", -"items": { -"$ref": "GeostoreFoodMenuItemOptionProtoIngredient" -}, -"type": "array" -}, -"media": { -"description": "Photos of the food dish option.", -"items": { -"$ref": "GeostoreMediaItemProto" -}, -"type": "array" -}, -"nameInfo": { -"description": "The repeated name_info field here is for item options with names or descriptions listed in multiple languages. When an item option has no names or descriptions, the size of the repeated field name_info may be 0. For example, when a food menu item does not have multiple options, the item option proto is used only to specify price and nutritional information, so it will not have a name_info field. There should be at most one name_info for any given language.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -}, -"nutritionFacts": { -"$ref": "GeostorePriceInfoFoodNutritionFacts", -"description": "Nutrition facts of the food dish option. Note that it also includes calories information with a finer defined unit information." -}, -"portionSize": { -"$ref": "GeostoreFoodMenuItemOptionProtoPortionSize", -"description": "Size of the order, represented in units of items. (e.g. 4 \"skewers\u201d, 6 \"pieces\u201d)" -}, -"preparationMethods": { -"description": "Methods on how the food dish option is prepared.", -"items": { -"enum": [ -"METHOD_UNDEFINED", -"BAKED", -"BOILED", -"BLANCHED", -"BRAISED", -"CODDLED", -"PRESSURE_COOKED", -"SIMMERED", -"STEAMED", -"STEEPED", -"GRILLED", -"FRIED", -"PAN_FRIED", -"STIR_FRIED", -"SAUTEED", -"ROASTED", -"BARBECUED", -"SEARED", -"SMOKED", -"FERMENTED", -"MARINATED", -"PICKLED", -"BASTED", -"KNEADED", -"OTHER_METHOD" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"price": { -"$ref": "GeostorePriceRangeProto", -"description": "We use PriceRangeProto here but we expect the lower_price and upper_price to be both set to equal numbers because an option should have a single price. This field is not required because food item prices may be variable depending on season." -}, -"restriction": { -"items": { -"enum": [ -"DIET_HALAL", -"DIET_KOSHER", -"DIET_ORGANIC", -"DIET_VEGAN", -"DIET_VEGETARIAN" -], -"enumDescriptions": [ -"A list of types of restrictions adhered to during food preparation.", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"servesNumPeople": { -"description": "Number of people can be served by this food dish option.", -"format": "int32", -"type": "integer" -}, -"spiciness": { -"enum": [ -"SPICINESS_NONE", -"SPICINESS_MILD", -"SPICINESS_MEDIUM", -"SPICINESS_HOT" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreFoodMenuItemOptionProtoIngredient": { -"description": "This message denotes an ingredient information of a food dish.", -"id": "GeostoreFoodMenuItemOptionProtoIngredient", -"properties": { -"nameInfo": { -"description": "The repeated name_info field is for the ingredient in multiple languages.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreFoodMenuItemOptionProtoPortionSize": { -"description": "This message denotes the serving portion size of a food dish.", -"id": "GeostoreFoodMenuItemOptionProtoPortionSize", -"properties": { -"quantity": { -"description": "Required.", -"format": "int32", -"type": "integer" -}, -"unit": { -"description": "Required. The repeated name_info field is for the unit in multiple languages.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreFoodMenuItemProto": { -"description": "A food menu item must have a name although it can have multiple names in different languages. Example: Thai Basil. Price for this item is specified in the item_option field. Since the price of an item may be unknown, e.g. seasonal price, there is nothing that requires an item_option to be present in the FoodMenuItemProto.", -"id": "GeostoreFoodMenuItemProto", -"properties": { -"itemOption": { -"items": { -"$ref": "GeostoreFoodMenuItemOptionProto" -}, -"type": "array" -}, -"nameInfo": { -"description": "The repeated name_info field is for items listed in multiple languages.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreGConceptInstanceProto": { -"description": "A GConceptInstanceProto contains a GConceptID (which is the unique identifier of a GConcept, a category in the Geo Ontology).", -"id": "GeostoreGConceptInstanceProto", -"properties": { -"gconceptId": { -"description": "The unique identifier of a GConcept (e.g. \"gcid:railway\").", -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this GConcept." -}, -"prominence": { -"description": "The relative prominence of this category to this feature according to the data provider, as one of the values from the enum above. Prominence is a measure of how well the given GConcept describes the feature. An example is a gas station with convenience store and ATM. All three GConcepts are very relevant, but the gas_station GConcept is the most prominent. If the prominence of this GConcept is unknown, this field should not be set.", -"enum": [ -"NON_PRIMARY", -"PRIMARY" -], -"enumDescriptions": [ -"not a prominent GConcept", -"the most prominent GConcept" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreGeometryComposition": { -"description": "Encapsulates all the features which, together, define the geometry of a feature. This happens by: 1. taking the union of all polygons of features referenced in includes_geometry_of 2. subtracting the polygons of all the features referenced in excludes_geometry_of", -"id": "GeostoreGeometryComposition", -"properties": { -"excludesGeometryOf": { -"description": "Features whose geometry to exclude while composing the geometry of this feature.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"includesGeometryOf": { -"description": "Features whose geometry to include while composing the geometry of this feature.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreGeometryStoreReferenceProto": { -"description": "A Geometry Store ID and, in some contexts, geometry materialized from the record associated with that ID.", -"id": "GeostoreGeometryStoreReferenceProto", -"properties": { -"footprint": { -"description": "Geometry (in STGeography format) materialized from the footprint field of the Geometry Store record associated with the geometry_id referenced above.", -"format": "byte", -"type": "string" -}, -"geometry": { -"$ref": "GeostoreCityJsonProto", -"description": "Geometry materialized from the full_fidelity_proto field of the Geometry Store record associated with the geometry_id referenced above." -}, -"geometryId": { -"description": "The ID of a record in the Geometry Store.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreGeopoliticalGeometryProto": { -"description": "Geopolitical (unsimplified) polygons for a feature for different geopolitical use cases. See go/unsimplified-poly.", -"id": "GeostoreGeopoliticalGeometryProto", -"properties": { -"restOfWorldPolygon": { -"$ref": "GeostorePolygonProto", -"description": "The unsimplified, water-subtracted polygon representing the feature's geometry as viewed by the rest of the world, which may differ from its default polygon, for example by excluding certain regions." -}, -"selfPolygon": { -"$ref": "GeostorePolygonProto", -"description": "The unsimplified, water-subtracted polygon representing the feature's geometry as viewed by the country that administers it, which may differ from its default polygon, for example by including disputed areas." -} -}, -"type": "object" -}, -"GeostoreGeopoliticalProto": { -"description": "This protocol buffer is used to store geopolitical information about the feature that override the base state of the feature. For example, the name of the feature from different regions' POV. This protocol buffer can be used on any feature involved in a geopolitical situation and is not limited to TYPE_POLITICAL features.", -"id": "GeostoreGeopoliticalProto", -"properties": { -"conveysAttributionTo": { -"description": "If present, some aspect of this feature (usually name or geometry) can signify ownership of the area by the region specified. The string, if present, will be a region code. E.g., suppose a TYPE_POSTAL_CODE feature has a Russian format in Crimea (which is disputed by Ukraine), then this field would be set to \"RU\".", -"type": "string" -}, -"regionSpecificName": { -"description": "Any specific handling of this feature's name from different regions' POVs. This field can only contain up to one name per region/language combination (each of which would be a separate RegionSpecificNameProto).", -"items": { -"$ref": "GeostoreRegionSpecificNameProto" -}, -"type": "array" -}, -"regionalPolygonAdjustment": { -"description": "Adjustments to the base polygon of this feature to construct a region-specific view. In general, this field is edited only by go/mf-triggers based on the data written to regional_polygon_composing_claims. This field should only be directly edited if there are region-specific adjustments that cannot be represented using the geometry of this feature's claims, but note that anything written to this field will be overwritten by go/mf-triggers if regional_polygon_composing_claims is ever edited. See go/geopolitical-geometry-schema for more information.", -"items": { -"$ref": "GeostoreGeopoliticalProtoRegionalPolygonAdjustmentProto" -}, -"type": "array" -}, -"regionalPolygonComposingClaims": { -"description": "Feature's claim(s) which compose the region-specific polygon. This composition will be used by go/mf-triggers to compose the specified region's view of this feature's geometry. The difference between that polygon and this feature's base polygon will be stored in regional_polygon_adjustment below. Edits to this field will overwrite anything already stored in regional_polygon_adjustment. See go/geopolitical-geometry-schema for more information.", -"items": { -"$ref": "GeostoreGeopoliticalProtoRegionalPolygonComposingClaimsProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreGeopoliticalProtoRegionalPolygonAdjustmentProto": { -"description": "Raw polygon adjustments to apply to this feature's base polygon to construct a specific region's view of this feature's polygon.", -"id": "GeostoreGeopoliticalProtoRegionalPolygonAdjustmentProto", -"properties": { -"polygonToAdd": { -"$ref": "GeostorePolygonProto", -"description": "Polygon to add to the feature's base polygon to get the view for the given region. Applied after polygon_to_subtract, such that any region contained in both polygons WILL be part of the final result." -}, -"polygonToSubtract": { -"$ref": "GeostorePolygonProto", -"description": "Polygon to subtract from the feature's base polygon to get the view for the given region." -}, -"regionCode": { -"description": "Region code (or other identifier) for the region.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreGeopoliticalProtoRegionalPolygonComposingClaimsProto": { -"description": "The polygon composition recipe for a specific region's view of this feature's geometry, based on disputed area claims asserted by this country. Note that the included / excluded claims listed for a given region's view of this feature may not match up exactly with which claims the region recognizes / does not recognize for this feature. Claims may only be included or excluded for regions that themselves are a party to the dispute, i.e. a region's view of itself will include its own claims, and a region's view of another feature will exclude its own claims. See current Geopolitical policy at go/geopolitical-policy-primer.", -"id": "GeostoreGeopoliticalProtoRegionalPolygonComposingClaimsProto", -"properties": { -"regionCode": { -"description": "Region code (or other identifier) for the region.", -"type": "string" -}, -"regionExcludedClaims": { -"description": "TYPE_DISPUTED_AREAs claimed by this feature excluded from this region's view of the feature's geometry. (The TDAs must have polygonal geometry and be referenced in this feature's political.claim field.) The polygons of these claims will be subtracted to this feature's base polygon when this feature is viewed from the specified region.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"regionIncludedClaims": { -"description": "TYPE_DISPUTED_AREAs claimed by this feature and included in this region's view of the feature's geometry. (The TDAs must have polygonal geometry and be referenced in this feature's political.claim field.) The polygons of these claims will be added to this feature's base polygon when this feature is viewed from the specified region.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreGradeLevelProto": { -"description": "The grade level of a segment represents the relative altitude of the segment at a particular point along the segment. This level is in relation to other segments at the same point. For example, you might have a freeway at level = 0 and an overpass at level = 2. Vertical segments are represented by a polyline containing only 1 vertex and exactly two grade_level in segment.proto whose indices are 0. grade_level(0) represents the relative height at the start point of the segments, and grade_level(1) represents the one at the end point.", -"id": "GeostoreGradeLevelProto", -"properties": { -"index": { -"description": "The index of the point along the segment, where 0 is the starting point. This means that the index of a point along a segment and its sibling will be different.", -"format": "int32", -"type": "integer" -}, -"level": { -"description": "The grade level of the indexed point. The grade level can be thought of as a relative vertical ordering with respect to other segments at the same point, where larger/more positive numbers are \"higher\". Negative grade level values are allowed and are typically used for points below grade level (0 is a common choice to represent the level of points at the ground level). For vertical segments, the height, i.e. the vertical length, is represented by difference of levels in millimeters. For example, feature.segment().grade_level(0).level() == 0 and feature.segment().grade_level(1).level() == 5000, then the length of the vertical segment feature is 5000 millimeters (5 meters).", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreHtmlTextProto": { -"description": "Represents HTML text associated with a feature.", -"id": "GeostoreHtmlTextProto", -"properties": { -"text": { -"description": "Zero or more texts of the specified type, in various languages. If this is a HTML_DESCRIPTION blob then these texts would hold the description in English, German, and so forth. The text is an HTML fragment, not a full page. The fragment should be suitable for including in a DIV. It must have balanced HTML tags. It may use HTML's \"class\" attributes to assign classes to HTML elements. This allows the HTML to be formatted by an (external) style sheet. The HTML should not have embedded style sheet definitions, nor should it have embedded JavaScript.", -"items": { -"$ref": "GeostoreLanguageTaggedTextProto" -}, -"type": "array" -}, -"type": { -"enum": [ -"HTML_DESCRIPTION" -], -"enumDescriptions": [ -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreInferredGeometryProto": { -"description": "Inferred geometry defines the geometry of a feature as the union or exclusion of the geometry of other features. For instance, the geometry of a timezone can be specified as the union of all the countries it applies to. In this scenario, the timezone will can be considered a \"composite feature\", while the countries are its \"composing features\". A composite feature must have a bidirectional reference between itself and all its composing features. A composite feature refers to its composing features via `geometry_composition`, while the composing features must refer back to the composing feature via `defines_geometry_for`. See: go/inferred-geometry and go/geo-schema:composite-geometry-editor for more details.", -"id": "GeostoreInferredGeometryProto", -"properties": { -"definesGeometryFor": { -"description": "Features whose geometry depends on this feature's geometry.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"geometryComposition": { -"$ref": "GeostoreGeometryComposition", -"description": "Features whose geometry defines the geometry of this feature (i.e. \"composing features\")." -} -}, -"type": "object" -}, -"GeostoreInternalFeatureProto": { -"description": "InternalFeatureProto represents fields for data that are more about other data within the FeatureProto than about the feature itself. This could be data that's set by editors directly(ish) (e.g. trust or rights data), or data set internally by MapFacts based on other data (e.g. polygon shape IDs).", -"id": "GeostoreInternalFeatureProto", -"properties": { -"polygonShapeId": { -"description": "A unique identifier for this feature's polygon data which is being held externally in Shapestore (see go/shapestore).", -"type": "string" -}, -"restOfWorldPolygonShapeId": { -"description": "A unique identifier for this feature's rest-of-world view polygon data which is being held externally in Shapestore (see go/shapestore). This is part of the feature's geopolitical geometry.", -"type": "string" -}, -"rightsStatus": { -"$ref": "GeostoreRightsStatusProto", -"description": "Per-field rights for this feature. See http://g3doc/geostore/g3doc/developers-guide/inputs/rights-tracking for more information." -}, -"selfPolygonShapeId": { -"description": "A unique identifier for this feature's self view polygon data which is being held externally in Shapestore (see go/shapestore). This is part of the feature's geopolitical geometry.", -"type": "string" -}, -"trust": { -"$ref": "GeostoreTrustSignalsProto", -"description": "Trust signals/annotations for the feature. In an input feature, these signals are computed at the beginning of the pipeline and are immutable during the processing. In output features, this proto may define the rules/criteria that a newer edit should meet, in order to be applied." -}, -"waterRemovedPolygonShapeId": { -"description": "A unique identifier for this feature's water-removed polygon data which is being held externally in Shapestore (see go/shapestore).", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreInternalFieldMetadataProto": { -"id": "GeostoreInternalFieldMetadataProto", -"properties": { -"isAuto": { -"description": "Whether or not the piece of data has been generated automatically (i.e., by a bot/automated process based on heuristics/algorithms rather than coming as a fact set by some human user or data provider based on their knowledge). Note that this does NOT imply that the value was set as a result of a bot operation on the repository, since it is conceivable to use a bot/automated process simply as a way of convenience to ingest large amount of canonical/ground truth data.", -"type": "boolean" -}, -"sourceSummary": { -"$ref": "GeostoreInternalSourceSummaryProto", -"description": "Information about the source providing the piece of data this metadata is attached to." -} -}, -"type": "object" -}, -"GeostoreInternalSegmentProto": { -"description": "Internal-only proto used to express additional information about segments. This is intended for communicating extra information between editing clients and the repository, and should not be used by or visible to clients. WARNING - if you add new fields to InternalSegmentProto, make sure that geostore/tools/internal/mr-mergesegments_test.cc is aware of them.", -"id": "GeostoreInternalSegmentProto", -"properties": { -"disallowedConnections": { -"description": "A list of lane connections that are explicitly not allowed to be added. NOTE: This should never reference existing lane connections on this segment.", -"items": { -"$ref": "GeostoreInternalSegmentProtoLaneConnectionReference" -}, -"type": "array" -}, -"disallowedPrimaryConnection": { -"description": "A list of lane connections which cannot have the primary_connection bit set.", -"items": { -"$ref": "GeostoreInternalSegmentProtoLaneConnectionReference" -}, -"type": "array" -}, -"travelAllowance": { -"description": "The set of restrictions that apply to this segment; these are actually *POSITIVE* restrictions, i.e. they are known to be allowed.", -"items": { -"$ref": "GeostoreRestrictionProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreInternalSegmentProtoLaneConnectionReference": { -"description": "Specifies a single outgoing lane connection.", -"id": "GeostoreInternalSegmentProtoLaneConnectionReference", -"properties": { -"fromLaneNumber": { -"description": "The lane number on this segment.", -"format": "int32", -"type": "integer" -}, -"segment": { -"$ref": "GeostoreFeatureIdProto", -"description": "This reference to the other segment is weak, since strong would blow up bounds of all segments." -}, -"toLaneNumber": { -"description": "This is the lane number on the target segment.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreInternalSourceSummaryProto": { -"id": "GeostoreInternalSourceSummaryProto", -"properties": { -"dataset": { -"description": "Within the above provider, the dataset from which this piece of data was generated. For fields that are auto-generated the \"dataset\" is likely to be some algorithm's or program's name. Similar to SourceInfoProto.dataset but with the difference that it is required to always be set. Providers that don't have a concept of dataset may use \"default\".", -"type": "string" -}, -"provider": { -"description": "The data provider from which this piece of data was generated. Equivalent to SourceInfoProto.provider in the public schema.", -"enum": [ -"PROVIDER_ANY", -"PROVIDER_UNKNOWN", -"PROVIDER_NAVTEQ", -"PROVIDER_TELE_ATLAS", -"PROVIDER_TELE_ATLAS_MULTINET", -"PROVIDER_TELE_ATLAS_CODEPOINT", -"PROVIDER_TELE_ATLAS_GEOPOST", -"PROVIDER_TELE_ATLAS_DATAGEO", -"PROVIDER_TELE_ATLAS_ADDRESS_POINTS", -"PROVIDER_TELCONTAR", -"PROVIDER_EUROPA", -"PROVIDER_ROYAL_MAIL", -"PROVIDER_GOOGLE", -"PROVIDER_GOOGLE_HAND_EDIT", -"PROVIDER_GOOGLE_BORDERS", -"PROVIDER_GOOGLE_SUBRANGE", -"PROVIDER_GOOGLE_GT_FUSION", -"PROVIDER_GOOGLE_ZAGAT_CMS", -"PROVIDER_GOOGLE_PLACE_NAVBOOST", -"PROVIDER_GOOGLE_FOOTPRINT", -"PROVIDER_GOOGLE_PRODUCT_TERMS", -"PROVIDER_GOOGLE_POINTCARDS", -"PROVIDER_GOOGLE_BUSINESS_CHAINS", -"PROVIDER_GOOGLE_LOCAL_SUMMARIZATION", -"PROVIDER_GOOGLE_PRONUNCIATIONS", -"PROVIDER_GOOGLE_DUMPLING", -"PROVIDER_GOOGLE_DISTILLERY", -"PROVIDER_GOOGLE_LOCAL_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_RELATION_MINER", -"PROVIDER_GOOGLE_MAPSPAM", -"PROVIDER_GOOGLE_ROSE", -"PROVIDER_GOOGLE_LOCAL_PLACE_RATINGS", -"PROVIDER_GOOGLE_WIPEOUT", -"PROVIDER_GOOGLE_KNOWLEDGE_GRAPH", -"PROVIDER_GOOGLE_BEEGEES", -"PROVIDER_GOOGLE_REVIEW_SUMMARIZATION", -"PROVIDER_GOOGLE_OFFLINE_NON_CORE_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_GEO_WORLDMAPS", -"PROVIDER_GOOGLE_GEO_MODERATION", -"PROVIDER_GOOGLE_OYSTER_AUTO_EDITS", -"PROVIDER_GOOGLE_LOCAL_ALCHEMY", -"PROVIDER_GOOGLE_KEROUAC", -"PROVIDER_GOOGLE_MOBRANK", -"PROVIDER_GOOGLE_RAPTURE", -"PROVIDER_GOOGLE_CULTURAL_INSTITUTE", -"PROVIDER_GOOGLE_GEOCODES_FROM_LOCAL_FEEDS", -"PROVIDER_GOOGLE_ATTRIBUTES_FROM_CRAWLED_CHAINS", -"PROVIDER_GOOGLE_TACTILE_MAPS", -"PROVIDER_GOOGLE_MAPS_FOR_MOBILE", -"PROVIDER_GOOGLE_GEO_REALTIME", -"PROVIDER_GOOGLE_PROMINENT_PLACES", -"PROVIDER_GOOGLE_PLACE_ACTIONS", -"PROVIDER_GOOGLE_GT_AUTO_EDITS", -"PROVIDER_GOOGLE_WAZE", -"PROVIDER_GOOGLE_ONTHEGO", -"PROVIDER_GOOGLE_GT_IMPORT", -"PROVIDER_GOOGLE_STRUCTURED_DATA", -"PROVIDER_GOOGLE_HELICOPTER", -"PROVIDER_GOOGLE_ROLLBACK", -"PROVIDER_GOOGLE_RIGHTS_REPAIR", -"PROVIDER_GOOGLE_PERFUME", -"PROVIDER_GOOGLE_MAPS_TRANSLATION", -"PROVIDER_GOOGLE_CALL_ME_MAYBE", -"PROVIDER_GOOGLE_LOCAL_UNIVERSAL", -"PROVIDER_GOOGLE_CROUPIER", -"PROVIDER_GOOGLE_SKYSMART", -"PROVIDER_GOOGLE_RIDDLER", -"PROVIDER_GOOGLE_ROADCLOSURES", -"PROVIDER_GOOGLE_SPORE", -"PROVIDER_GOOGLE_LOCALIZATION", -"PROVIDER_GOOGLE_CATTERMS", -"PROVIDER_GOOGLE_GT_FIELD_OPS", -"PROVIDER_GOOGLE_MATCHMAKER", -"PROVIDER_GOOGLE_ARBITRATION", -"PROVIDER_GOOGLE_BIZBUILDER_OPS", -"PROVIDER_GOOGLE_LOCAL_INVENTORY_ADS", -"PROVIDER_GOOGLE_GT_DRAFTY", -"PROVIDER_GOOGLE_HOTELADS_OPS", -"PROVIDER_GOOGLE_MARKERS", -"PROVIDER_GOOGLE_STATE_MACHINE", -"PROVIDER_GOOGLE_ATTRIBUTES_INFERENCE", -"PROVIDER_GOOGLE_BIKESHARE", -"PROVIDER_GOOGLE_GHOSTWRITER", -"PROVIDER_GOOGLE_EDIT_PLATFORM", -"PROVIDER_GOOGLE_BLUE_GINGER", -"PROVIDER_GOOGLE_GEO_TIGER", -"PROVIDER_GOOGLE_HYADES", -"PROVIDER_GOOGLE_WEBQUARRY", -"PROVIDER_GOOGLE_GEO_MADDEN", -"PROVIDER_GOOGLE_ANDROID_PAY", -"PROVIDER_GOOGLE_OPENING_HOURS_TEAM", -"PROVIDER_GOOGLE_LOCAL_DISCOVERY", -"PROVIDER_GOOGLE_LOCAL_HEALTH", -"PROVIDER_GOOGLE_UGC_MAPS", -"PROVIDER_GOOGLE_FIBER", -"PROVIDER_GOOGLE_REVGEO", -"PROVIDER_GOOGLE_HOTELADS_PARTNER_FRONT_END", -"PROVIDER_GOOGLE_GEO_UGC_TASKS", -"PROVIDER_GOOGLE_GEOCODING", -"PROVIDER_GOOGLE_SPYGLASS", -"PROVIDER_GOOGLE_PLUS_CODES_AS_ADDRESSES", -"PROVIDER_GOOGLE_GEO_CHANGES", -"PROVIDER_GOOGLE_HUME", -"PROVIDER_GOOGLE_MEGAMIND", -"PROVIDER_GOOGLE_GT_ROADSYNTH", -"PROVIDER_GOOGLE_FIREBOLT", -"PROVIDER_GOOGLE_LOCAL_PLACE_OFFERINGS", -"PROVIDER_GOOGLE_UGC_SERVICES", -"PROVIDER_GOOGLE_GEOALIGN", -"PROVIDER_GOOGLE_GT_COMPOUNDS", -"PROVIDER_GOOGLE_FOOD_ORDERING", -"PROVIDER_GOOGLE_HOTEL_KNOWLEDGE_OPS", -"PROVIDER_GOOGLE_URAW", -"PROVIDER_GOOGLE_FLYEYE", -"PROVIDER_GOOGLE_YOUKE", -"PROVIDER_GOOGLE_GT_ZEPHYR", -"PROVIDER_GOOGLE_USER_SAFETY", -"PROVIDER_GOOGLE_ADDRESS_MAKER", -"PROVIDER_GOOGLE_UGC_PHOTOS", -"PROVIDER_GOOGLE_GT_WINDCHIME", -"PROVIDER_GOOGLE_SNAG_FIXER", -"PROVIDER_GOOGLE_GEO_DEALS", -"PROVIDER_GOOGLE_LOCAL_PLACE_TOPICS", -"PROVIDER_GOOGLE_PROPERTY_INSIGHTS", -"PROVIDER_GOOGLE_GEO_CONSUMER_MERCHANT_EXPERIMENTS", -"PROVIDER_GOOGLE_GEO_PORTKEY", -"PROVIDER_GOOGLE_ROAD_MAPPER", -"PROVIDER_GOOGLE_LOCATION_PLATFORM", -"PROVIDER_GOOGLE_POSTTRIP", -"PROVIDER_GOOGLE_TRAVEL_DESTINATION", -"PROVIDER_GOOGLE_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_BIZBUILDER_CLEANUP", -"PROVIDER_GOOGLE_USER", -"PROVIDER_GOOGLE_STATION", -"PROVIDER_GOOGLE_GEO_FOOD", -"PROVIDER_GOOGLE_GEO_AR", -"PROVIDER_GOOGLE_GEO_TEMPORAL", -"PROVIDER_GOOGLE_SERVICES_MARKETPLACE", -"PROVIDER_GOOGLE_IMT_CLEANUP", -"PROVIDER_GOOGLE_GEO_FOOD_MENU", -"PROVIDER_GOOGLE_CARENAV", -"PROVIDER_GOOGLE_DRIVING_FEEDS", -"PROVIDER_GOOGLE_DRIVING_UGC", -"PROVIDER_GOOGLE_POLAR", -"PROVIDER_GOOGLE_TRIWILD", -"PROVIDER_GOOGLE_CROWD_COMPUTE_OPS", -"PROVIDER_GOOGLE_SA_FROM_WEB", -"PROVIDER_GOOGLE_POI_ALIGNMENT", -"PROVIDER_GOOGLE_SA_FROM_HULK", -"PROVIDER_GOOGLE_SERVICES_INTERACTIONS", -"PROVIDER_GOOGLE_ROADS_UGC_EDITOR", -"PROVIDER_GOOGLE_SA_FROM_NG_INFERENCE", -"PROVIDER_GOOGLE_GEO_DRIVING_VIZ", -"PROVIDER_GOOGLE_GEO_TASKING", -"PROVIDER_GOOGLE_CROWDTASK_DATACOMPUTE", -"PROVIDER_GOOGLE_CROWDTASK_TASKADS", -"PROVIDER_GOOGLE_CROWDTASK_TASKMATE", -"PROVIDER_GOOGLE_CROWDTASK_FURBALL", -"PROVIDER_GOOGLE_CROWDTASK_ADAP", -"PROVIDER_GOOGLE_GPAY", -"PROVIDER_GOOGLE_GEO_UGC_TRUSTED_USERS", -"PROVIDER_GOOGLE_THIRD_PARTY_DATA_PRODUCTION", -"PROVIDER_GOOGLE_GEOTRACKER", -"PROVIDER_GOOGLE_LOCAL_LANDMARK_INFERENCE", -"PROVIDER_GOOGLE_GEO_CLOSED_LOOP", -"PROVIDER_GOOGLE_SA_FROM_MERCHANT_POSTS", -"PROVIDER_GOOGLE_CORE_DATA_RIGHTS", -"PROVIDER_GOOGLE_SA_FROM_USER_REVIEWS", -"PROVIDER_GOOGLE_GEO_CONTENT_FIXER", -"PROVIDER_GOOGLE_POLYGON_REFINEMENT", -"PROVIDER_GOOGLE_HANASU", -"PROVIDER_GOOGLE_FULLRIGHTS_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_FULLRIGHTS_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_ATTRIBUTION_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_SA_FROM_FOOD_MENUS", -"PROVIDER_GOOGLE_GT_CONSISTENCY_EDITS", -"PROVIDER_GOOGLE_SA_QUALITY", -"PROVIDER_GOOGLE_GDCE_CLEANUP", -"PROVIDER_GOOGLE_UGC_QUALITY_CHAINS", -"PROVIDER_GOOGLE_ATTRIBUTES_DISCOVERY", -"PROVIDER_GOOGLE_GEO_LDE", -"PROVIDER_GOOGLE_GEO_SIGNAL_TRACKING", -"PROVIDER_GOOGLE_UGC_AGGREGATION", -"PROVIDER_GOOGLE_3D_BASEMAP", -"PROVIDER_GOOGLE_MAPFACTS_PRIVACY", -"PROVIDER_GOOGLE_GT_ALF", -"PROVIDER_GOOGLE_GT_OPERATOR_PROVENANCE", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS", -"PROVIDER_GOOGLE_GT_LANE_AUTOMATION", -"PROVIDER_GOOGLE_GEO_NG_LOCAL", -"PROVIDER_GOOGLE_MAPFACTS_CLEANUP", -"PROVIDER_GOOGLE_THIRD_PARTY_UGC", -"PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", -"PROVIDER_GOOGLE_VACATION_RENTAL_PARTNERS", -"PROVIDER_GOOGLE_FEED_PROCESSOR_ROAD_INCIDENTS", -"PROVIDER_GOOGLE_DYNAMIC_BASEMAP", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS_EMEA", -"PROVIDER_GOOGLE_LOCALSEARCH", -"PROVIDER_GOOGLE_TRANSIT", -"PROVIDER_GOOGLE_GEOWIKI", -"PROVIDER_GOOGLE_CHINA_LOCAL_TEAM", -"PROVIDER_GOOGLE_SYNTHESIZED", -"PROVIDER_GOOGLE_INTERNAL_TEST", -"PROVIDER_GOOGLE_DISPUTED_AREAS", -"PROVIDER_GOOGLE_3DWAREHOUSE", -"PROVIDER_GOOGLE_GROUNDS_BUILDER", -"PROVIDER_GOOGLE_SESAME", -"PROVIDER_GOOGLE_GT", -"PROVIDER_GOOGLE_GT_BASEMAP_UPLOAD", -"PROVIDER_GOOGLE_ADSDB", -"PROVIDER_GOOGLE_MACHINE_TRANSLITERATION", -"PROVIDER_GOOGLE_TRAVELSEARCH", -"PROVIDER_GOOGLE_PANORAMIO", -"PROVIDER_GOOGLE_YOUTUBE", -"PROVIDER_GOOGLE_OLD", -"PROVIDER_GOOGLE_STREETVIEW", -"PROVIDER_GOOGLE_STREETVIEW_BIZVIEW", -"PROVIDER_GOOGLE_ZIPIT", -"PROVIDER_GOOGLE_OYSTER_CONNECT_ROUTES", -"PROVIDER_GOOGLE_GOLDEN", -"PROVIDER_GOOGLE_INNERSPACE", -"PROVIDER_GOOGLE_MAPSEARCH", -"PROVIDER_GOOGLE_CATEGORIES_TEAM", -"PROVIDER_GOOGLE_CROWDSENSUS", -"PROVIDER_GOOGLE_LOCAL_ALGORITHMIC_IDENTITY", -"PROVIDER_GOOGLE_FREEBASE", -"PROVIDER_GOOGLE_HOTELADS", -"PROVIDER_GOOGLE_AUTHORITY_PAGES", -"PROVIDER_GOOGLE_PLACES_API", -"PROVIDER_GOOGLE_NAMEHEATMAP", -"PROVIDER_GOOGLE_MAPMAKER", -"PROVIDER_GOOGLE_MAPMAKER_MOBILE", -"PROVIDER_GOOGLE_MAPMAKER_PANCAKE", -"PROVIDER_GOOGLE_MAPMAKER_V2", -"PROVIDER_GOOGLE_LOCAL_CLUSTERING_OPERATOR_OVERRIDE", -"PROVIDER_GOOGLE_SERVED_ON_MAPMAKER", -"PROVIDER_GOOGLE_GT_LOCAL", -"PROVIDER_GOOGLE_GT_LOCAL_WITH_RIGHTS", -"PROVIDER_GOOGLE_LOGS_RANKING_SIGNALS", -"PROVIDER_GOOGLE_ENTITY_NAVBOOST", -"PROVIDER_GOOGLE_RELATED_PLACES", -"PROVIDER_GOOGLE_KNOWN_FOR_TERMS", -"PROVIDER_GOOGLE_SYNTHETIC_AREAS", -"PROVIDER_GOOGLE_AUTHORITY_PAGE_PHOTOS", -"PROVIDER_GOOGLE_CROSS_STREETS", -"PROVIDER_GOOGLE_CORRIDORS", -"PROVIDER_GOOGLE_BICYCLE_RENTAL", -"PROVIDER_GOOGLE_CONCRETE_URLS", -"PROVIDER_GOOGLE_LEANBACK", -"PROVIDER_GOOGLE_LOCKED_LISTINGS", -"PROVIDER_GOOGLE_MONITORING", -"PROVIDER_GOOGLE_SPROUT", -"PROVIDER_GOOGLE_LOCAL_SEARCH_QUALITY", -"PROVIDER_GOOGLE_GOBY", -"PROVIDER_GOOGLE_PROBLEM_REPORT", -"PROVIDER_GOOGLE_CANDID", -"PROVIDER_GOOGLE_BIZBUILDER", -"PROVIDER_AUTOMOTIVE_NAVIGATION_DATA", -"PROVIDER_MAPDATA_SCIENCES", -"PROVIDER_MAPONICS", -"PROVIDER_SKI_RESORTS", -"PROVIDER_ZENRIN", -"PROVIDER_SANBORN", -"PROVIDER_URBAN_MAPPING", -"PROVIDER_US_GOVERNMENT", -"PROVIDER_US_CENSUS", -"PROVIDER_US_POSTAL_SERVICE", -"PROVIDER_US_GEOLOGICAL_SURVEY", -"PROVIDER_US_GNIS", -"PROVIDER_US_LANDSAT", -"PROVIDER_US_NATIONAL_GEOSPATIAL_INTELLIGENCE_AGENCY", -"PROVIDER_US_NGA_GNS", -"PROVIDER_US_SSIBL", -"PROVIDER_US_BUREAU_OF_TRANSPORTATION_STATISTICS", -"PROVIDER_US_NATIONAL_OCEANIC_AND_ATMOSPHERIC_ADMINISTRATION", -"PROVIDER_US_POLAR_GEOSPATIAL_CENTER", -"PROVIDER_US_DEPARTMENT_OF_AGRICULTURE", -"PROVIDER_US_NPI_REGISTRY", -"PROVIDER_US_BUREAU_OF_INDIAN_AFFAIRS", -"PROVIDER_DMTI_SPATIAL", -"PROVIDER_INTERNATIONAL_HYDROGRAPHIC_ORGANIZATION", -"PROVIDER_MAPLINK", -"PROVIDER_KINGWAY", -"PROVIDER_GEOCENTRE", -"PROVIDER_CN_NATIONAL_FOUNDAMENTAL_GIS", -"PROVIDER_CN_MAPABC", -"PROVIDER_SMITHSONIAN_INSTITUTE", -"PROVIDER_TRACKS_FOR_AFRICA", -"PROVIDER_PPWK", -"PROVIDER_LEADDOG", -"PROVIDER_CENTRE_DONNEES_ASTRONOMIQUES_STRASBOURG", -"PROVIDER_GISRAEL", -"PROVIDER_BASARSOFT", -"PROVIDER_MAPINFO", -"PROVIDER_MAPIT", -"PROVIDER_GEOBASE", -"PROVIDER_ORION", -"PROVIDER_CENTRAL_EUROPEAN_DATA_AGENCY", -"PROVIDER_ANASAT", -"PROVIDER_MINED_POSTCODES", -"PROVIDER_DMAPAS", -"PROVIDER_COMMON_LOCALE_DATA_REPOSITORY", -"PROVIDER_CH_SBB", -"PROVIDER_SKENERGY", -"PROVIDER_GBRMPA", -"PROVIDER_KOREA_POST", -"PROVIDER_CN_AUTONAVI", -"PROVIDER_MINED_POI", -"PROVIDER_ML_INFOMAP", -"PROVIDER_SNOOPER", -"PROVIDER_GEOSISTEMAS", -"PROVIDER_AFRIGIS", -"PROVIDER_TRANSNAVICOM", -"PROVIDER_EASYCONNECT", -"PROVIDER_LANTMATERIET", -"PROVIDER_LOGICA", -"PROVIDER_MAPKING", -"PROVIDER_DIANPING", -"PROVIDER_GEONAV", -"PROVIDER_HEIBONSHA", -"PROVIDER_DEUTSCHE_TELEKOM", -"PROVIDER_LINGUISTIC_DATA_CONSORTIUM", -"PROVIDER_ACXIOM", -"PROVIDER_DUN_AND_BRADSTREET", -"PROVIDER_FEDERAL_AVIATION_ADMINISTRATION", -"PROVIDER_INFOUSA", -"PROVIDER_INFOUSA_NIXIE", -"PROVIDER_THOMSON_LOCAL", -"PROVIDER_TELEFONICA_PUBLICIDAD_E_INFORMACION", -"PROVIDER_WIKIPEDIA", -"PROVIDER_INFOBEL", -"PROVIDER_MX_GOVERNMENT", -"PROVIDER_MX_NATIONAL_INSTITUTE_STATISTICS_GEOGRAPHY", -"PROVIDER_MX_SERVICIO_POSTAL_MEXICANO", -"PROVIDER_TELEGATE", -"PROVIDER_TELELISTAS", -"PROVIDER_MAPCITY", -"PROVIDER_EXPLAINER_DC", -"PROVIDER_DAIKEI", -"PROVIDER_NL_CHAMBER_OF_COMMERCE", -"PROVIDER_KOREA_INFO_SERVICE", -"PROVIDER_WIKITRAVEL", -"PROVIDER_FLICKR", -"PROVIDER_DIANCO", -"PROVIDER_VOLT_DELTA", -"PROVIDER_SG_GOVERNMENT", -"PROVIDER_SG_LAND_TRANSPORT_AUTHORITY", -"PROVIDER_MAPBAR", -"PROVIDER_LONGTU", -"PROVIDER_SA_GOVERNMENT", -"PROVIDER_SA_SAUDI_POST", -"PROVIDER_PEAKLIST", -"PROVIDER_LOCAL_BUSINESS_CENTER", -"PROVIDER_LOCAL_FEED_XML", -"PROVIDER_WEB", -"PROVIDER_RAILS_TO_TRAILS", -"PROVIDER_INDIACOM", -"PROVIDER_INFOMEDIA", -"PROVIDER_PICASA", -"PROVIDER_AT_GOVERNMENT", -"PROVIDER_AT_BUNDESAMT_FUR_EICH_UND_VERMESSUNGSWESEN", -"PROVIDER_AT_NATIONAL_TOURIST_OFFICE", -"PROVIDER_AT_AUSTRIA_POST", -"PROVIDER_NO_GOVERNMENT", -"PROVIDER_NO_NORSK_EIENDOMSINFORMASJON", -"PROVIDER_NO_POSTEN_NORGE_AS", -"PROVIDER_CH_GOVERNMENT", -"PROVIDER_CH_SWISS_POST", -"PROVIDER_CH_SWISSTOPO", -"PROVIDER_CH_SWISS_NATIONAL_PARK", -"PROVIDER_NAVIT", -"PROVIDER_GEOSEARCH", -"PROVIDER_DE_GOVERNMENT", -"PROVIDER_BUNDESAMT_KARTOGRAPHIE_UND_GEODASIE", -"PROVIDER_BUNDESNETZAGENTUR", -"PROVIDER_SCHOBER_GROUP", -"PROVIDER_MIREO", -"PROVIDER_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WEBSTER_TEXAS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_AMHERST_MASSACHUSETTS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BLOOMINGTON_INDIANA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PASADENA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_CHULA_VISTA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_TEMPE_ARIZONA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_COLUMBUS_OHIO", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PORTAGE_MICHIGAN", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GEORGETOWN_KENTUCKY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GREENVILLE_SOUTH_CAROLINA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_NASHVILLE_TENNESSEE", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WASHINGTON_DISTRICT_OF_COLUMBIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BOULDER_COLORADO", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY_ENVIRONMENT_BAY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY_BIELSKO_BIALA", -"PROVIDER_DE_PUBLIC_MUNICIPALITY", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_FRANKFURT", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_HAMBURG", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_KARLSRUHE", -"PROVIDER_PT_PUBLIC_MUNICIPALITY", -"PROVIDER_PT_PUBLIC_MUNICIPALITY_SANTA_CRUZ", -"PROVIDER_AT_PUBLIC_MUNICIPALITY", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_KLAGENFURT", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_LINZ", -"PROVIDER_ES_PUBLIC_MUNICIPALITY", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_AZKOITIA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_BEASAIN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_GIRONA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_SAN_SEBASTIAN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_CATALUNYA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_HONDARRIBIA", -"PROVIDER_AU_PUBLIC_MUNICIPALITY", -"PROVIDER_AU_PUBLIC_MUNICIPALITY_LAUNCESTON_TASMANIA", -"PROVIDER_IS_PUBLIC_MUNICIPALITY", -"PROVIDER_IS_PUBLIC_MUNICIPALITY_REYKJAVIK", -"PROVIDER_NL_PUBLIC_MUNICIPALITY", -"PROVIDER_NL_PUBLIC_MUNICIPALITY_AMELSTEVEEN", -"PROVIDER_BE_PUBLIC_MUNICIPALITY", -"PROVIDER_BE_PUBLIC_MUNICIPALITY_ANTWERPEN", -"PROVIDER_CA_PUBLIC_MUNICIPALITY", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_FREDERICTON_NEW_BRUNSWICK", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_KAMLOOPS_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_NANAIMO_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_BANFF_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_CALGARY_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_TORONTO_ONTARIO", -"PROVIDER_SE_PUBLIC_MUNICIPALITY", -"PROVIDER_SE_PUBLIC_MUNICIPALITY_UMEA", -"PROVIDER_UA_PUBLIC_MUNICIPALITY", -"PROVIDER_UA_PUBLIC_MUNICIPALITY_KHARKIV", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY_AQUA_CALIENTE_CAHUILLA_INDIANS", -"PROVIDER_FR_PUBLIC_MUNICIPALITY", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_PONT_AUDEMER", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_BORDEAUX", -"PROVIDER_SG_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY_RIO_DE_JANEIRO", -"PROVIDER_MAPCUBE", -"PROVIDER_3D_REALITYMAPS", -"PROVIDER_DEUTSCHES_ZENTRUM_FUR_LUFT_UND_RAUMFAHRT", -"PROVIDER_3D_CITIES_SOCIEDADE_ANONIMA", -"PROVIDER_DISNEY", -"PROVIDER_CYBERCITY", -"PROVIDER_PRECISION_LIGHTWORKS_MODELWORKS", -"PROVIDER_VIRTUAL_HUNGARY_LIMITED", -"PROVIDER_VIRTUEL_CITY", -"PROVIDER_SCREAMPOINT_INTERNATIONAL", -"PROVIDER_AGENTSCHAP_VOOR_GEOGRAFISCHE_INFORMATIE_VLAANDEREN", -"PROVIDER_FR_GOVERNMENT", -"PROVIDER_FR_INSTITUT_GEOGRAPHIQUE_NATIONAL", -"PROVIDER_FR_CADASTRE", -"PROVIDER_DIADIEM", -"PROVIDER_THE_WEATHER_CHANNEL", -"PROVIDER_COWI", -"PROVIDER_FALKPLAN_ANDES", -"PROVIDER_NL_GOVERNMENT", -"PROVIDER_NL_KADASTER", -"PROVIDER_NL_BOARD_OF_TOURISM_AND_CONVENTIONS", -"PROVIDER_DIGITAL_MAP_PRODUCTS", -"PROVIDER_SILICE_DIGITAL", -"PROVIDER_TYDAC", -"PROVIDER_ALBRECHT_GOLF", -"PROVIDER_HEALTH_CH", -"PROVIDER_VISITDENMARK", -"PROVIDER_FLYHERE", -"PROVIDER_DIGITAL_DATA_SERVICES", -"PROVIDER_MECOMO", -"PROVIDER_ZA_GOVERNMENT", -"PROVIDER_ZA_RURAL_DEVELOPMENT_LAND_REFORM", -"PROVIDER_SENSIS", -"PROVIDER_JJCONNECT", -"PROVIDER_OPPLYSNINGEN", -"PROVIDER_TELLUS", -"PROVIDER_IQONIA", -"PROVIDER_BE_GOVERNMENT", -"PROVIDER_BE_NATIONAAL_GEOGRAFISCH_INSTITUUT", -"PROVIDER_BE_BRUSSELS_MOBILITY", -"PROVIDER_YELLOWMAP_AG", -"PROVIDER_STIFTUNG_GESUNDHEIT", -"PROVIDER_GIATA", -"PROVIDER_SANPARKS", -"PROVIDER_CENTRE_DINFORMATIQUE_POUR_LA_REGION_BRUXELLOISE", -"PROVIDER_INFOPORTUGAL", -"PROVIDER_NEGOCIOS_DE_TELECOMUNICACOES_E_SISTEMAS_DE_INFORMACAO", -"PROVIDER_COLLINS_BARTHOLOMEW", -"PROVIDER_PROTECT_PLANET_OCEAN", -"PROVIDER_KARTTAKESKUS", -"PROVIDER_FI_GOVERNMENT", -"PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION", -"PROVIDER_FI_NATIONAL_LAND_SURVEY", -"PROVIDER_FI_STATISTICS_FINLAND", -"PROVIDER_GB_GOVERNMENT", -"PROVIDER_GB_ORDNANCE_SURVEY", -"PROVIDER_NATURAL_ENGLAND", -"PROVIDER_WELSH_GOVERNMENT", -"PROVIDER_GB_OFFICE_FOR_NATIONAL_STATISTICS", -"PROVIDER_EPSILON", -"PROVIDER_PARTNER_FRONT_END", -"PROVIDER_CARTESIA", -"PROVIDER_SE_GOVERNMENT", -"PROVIDER_SE_TRAFIKVERKET", -"PROVIDER_SE_NATURVARDSVERKET", -"PROVIDER_IE_GOVERNMENT", -"PROVIDER_IE_ORDNANCE_SURVEY_IRELAND", -"PROVIDER_LU_GOVERNMENT", -"PROVIDER_LU_P_AND_T_LUXEMBOURG", -"PROVIDER_LU_ADMINISTRATION_DU_CADASTRE_ET_DE_LA_TOPOGRAPHIE", -"PROVIDER_LU_NATIONAL_TOURIST_OFFICE", -"PROVIDER_MAPFLOW", -"PROVIDER_TKARTOR", -"PROVIDER_JUMPSTART", -"PROVIDER_EPTISA", -"PROVIDER_MC_GOVERNMENT", -"PROVIDER_MC_PRINCIPAUTE_DE_MONACO", -"PROVIDER_MONOLIT", -"PROVIDER_ENVIRONMENTAL_SYSTEMS_RESEARCH_INSTITUTE", -"PROVIDER_MODIS", -"PROVIDER_GEOX", -"PROVIDER_GEODIRECTORY", -"PROVIDER_GEOPLAN", -"PROVIDER_INFODIREKT", -"PROVIDER_GEOGLOBAL", -"PROVIDER_DEUTSCHE_POST", -"PROVIDER_TRACASA", -"PROVIDER_CORREOS", -"PROVIDER_ES_GOVERNMENT", -"PROVIDER_ES_CENTRO_NACIONAL_DE_INFORMACION_GEOGRAFICA", -"PROVIDER_EDIMAP", -"PROVIDER_VERIZON", -"PROVIDER_NATIONAL_GEOGRAPHIC_MAPS", -"PROVIDER_PROMAPS", -"PROVIDER_CONSODATA", -"PROVIDER_DE_AGOSTINI", -"PROVIDER_FEDERPARCHI", -"PROVIDER_NAVIGO", -"PROVIDER_ITALIAMAPPE", -"PROVIDER_CZECOT", -"PROVIDER_NATURAL_EARTH", -"PROVIDER_REGIO", -"PROVIDER_SHIPWRECK_CENTRAL", -"PROVIDER_RUTGERS_STATE_UNIVERSITY", -"PROVIDER_TWINICE", -"PROVIDER_NORTHERN_IRELAND_TOURIST_BOARD", -"PROVIDER_INFOGROUP", -"PROVIDER_TNET", -"PROVIDER_CTT_CORREIOS_DE_PORTUGAL", -"PROVIDER_EUROPARC", -"PROVIDER_IUPPITER", -"PROVIDER_MICHAEL_BAUER_INTERNATIONAL", -"PROVIDER_LEPTON", -"PROVIDER_MAPPOINT", -"PROVIDER_GEODATA", -"PROVIDER_RU_GOVERNMENT", -"PROVIDER_RU_FNS_KLADR", -"PROVIDER_BR_GOVERNMENT", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DO_MEIO_AMBIENTE_E_DOS_RECURSOS_NATURAIS_RENOVAVEIS", -"PROVIDER_BR_MINISTERIO_DO_MEIO_AMBIENTE", -"PROVIDER_BR_AGENCIA_NACIONAL_DE_AGUAS", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DE_GEOGRAFIA_E_ESTATISTICA", -"PROVIDER_BR_FUNDACAO_NACIONAL_DO_INDIO", -"PROVIDER_BR_DEPARTAMENTO_NACIONAL_DE_INFRAESTRUTURA_DE_TRANSPORTES", -"PROVIDER_AZAVEA", -"PROVIDER_NORTHSTAR", -"PROVIDER_COMMEDI", -"PROVIDER_NEXUS_GEOGRAFICS", -"PROVIDER_INFOERA", -"PROVIDER_AD_GOVERNMENT", -"PROVIDER_AD_AREA_DE_CARTOGRAFIA", -"PROVIDER_MAXXIMA", -"PROVIDER_SI_GOVERNMENT", -"PROVIDER_SI_AGENCY_FOR_ENVIRONMENT", -"PROVIDER_TRANSPORT_HI_TECH_CONSULTANTS", -"PROVIDER_L1_TECHNOLOGIES", -"PROVIDER_TELEMEDIA", -"PROVIDER_CDCOM_PROGOROD", -"PROVIDER_MIT_CITYGUIDE", -"PROVIDER_SUNCART", -"PROVIDER_MICROMAPPER", -"PROVIDER_RICHI", -"PROVIDER_FORUM44", -"PROVIDER_SEAT", -"PROVIDER_VALASSIS", -"PROVIDER_NAVICOM", -"PROVIDER_COLTRACK", -"PROVIDER_PSMA_AUSTRALIA", -"PROVIDER_PT_DUTA_ASTAKONA_GIRINDA", -"PROVIDER_CA_GOVERNMENT", -"PROVIDER_STATISTICS_CANADA", -"PROVIDER_TOCTOC", -"PROVIDER_RMSI", -"PROVIDER_TRUE_TECHNOLOGY", -"PROVIDER_INCREMENT_P_CORPORATION", -"PROVIDER_GOJAVAS", -"PROVIDER_GEOINFORMATION_GROUP", -"PROVIDER_CYBERSOFT", -"PROVIDER_TSENTR_EFFEKTIVNYKH_TEKHNOLOGIY", -"PROVIDER_EE_GOVERNMENT", -"PROVIDER_EE_MAA_AMET", -"PROVIDER_GASBUDDY", -"PROVIDER_DK_GOVERNMENT", -"PROVIDER_DK_GEODATASTYRELSEN", -"PROVIDER_MURCIA_REGION_GOVERNMENT", -"PROVIDER_CORREIOS", -"PROVIDER_WEST_WORLD_MEDIA", -"PROVIDER_INTERNATIONAL_MAPPING_ASSOCIATION", -"PROVIDER_MEDICARE", -"PROVIDER_POLARIS", -"PROVIDER_TW_GOVERNMENT", -"PROVIDER_TW_MINISTRY_OF_THE_INTERIOR_SURVEYING_AND_MAPPING_CENTER", -"PROVIDER_NORDECA", -"PROVIDER_AFRIMAPPING", -"PROVIDER_OVERDRIVE", -"PROVIDER_PROVIDER_NETWORK_DIRECTORIES", -"PROVIDER_BR_MINISTERIO_DA_SAUDE", -"PROVIDER_DIGITAL_EGYPT", -"PROVIDER_INRIX", -"PROVIDER_ARPINDO", -"PROVIDER_IT_GOVERNMENT", -"PROVIDER_ISTITUTO_GEOGRAFICO_MILITARE", -"PROVIDER_EAST_END_GROUP", -"PROVIDER_INGEOLAN", -"PROVIDER_SEMACONNECT", -"PROVIDER_BLINK", -"PROVIDER_EVGO", -"PROVIDER_CHARGEPOINT", -"PROVIDER_TPL_TRAKKER", -"PROVIDER_OI", -"PROVIDER_MAPARADAR", -"PROVIDER_SINGAPORE_POST", -"PROVIDER_CHARGEMASTER", -"PROVIDER_TESLA", -"PROVIDER_VISICOM", -"PROVIDER_GEOLYSIS", -"PROVIDER_ZEPHEIRA", -"PROVIDER_HUBJECT", -"PROVIDER_PODPOINT", -"PROVIDER_CHARGEFOX", -"PROVIDER_KR_GOVERNMENT", -"PROVIDER_KR_MOLIT", -"PROVIDER_KR_MINISTRY_OF_THE_INTERIOR_AND_SAFETY", -"PROVIDER_CRITCHLOW", -"PROVIDER_EIFRIG", -"PROVIDER_GIREVE", -"PROVIDER_CN_NAVINFO", -"PROVIDER_JAPAN_CHARGE_NETWORK", -"PROVIDER_NOBIL", -"PROVIDER_INDIA_BANKS", -"PROVIDER_INDONESIA_ELECTION_KPU", -"PROVIDER_CAREERS360", -"PROVIDER_SOURCE_LONDON", -"PROVIDER_EVBOX", -"PROVIDER_JP_GOVERNMENT", -"PROVIDER_JP_MINISTRY_OF_THE_ENVIRONMENT", -"PROVIDER_YUMYUM", -"PROVIDER_HWW_AUSTRALIA", -"PROVIDER_CINERGY", -"PROVIDER_MTIME", -"PROVIDER_KULTUNAUT", -"PROVIDER_BLITZ", -"PROVIDER_PIA", -"PROVIDER_INTERPARK", -"PROVIDER_CINEMA_ONLINE", -"PROVIDER_BELBIOS", -"PROVIDER_MOVIESEER", -"PROVIDER_SODAMEDYA", -"PROVIDER_ATMOVIES", -"PROVIDER_HOTELBEDS", -"PROVIDER_VERICRED", -"PROVIDER_CIRRANTIC", -"PROVIDER_GOGO_LABS", -"PROVIDER_ELECTRIFY_AMERICA", -"PROVIDER_CMS_MPPUF", -"PROVIDER_DIGIROAD", -"PROVIDER_KONTEX_GEOMATICS", -"PROVIDER_NZ_GOVERNMENT", -"PROVIDER_NZ_LINZ", -"PROVIDER_NZ_DOC", -"PROVIDER_FASTNED", -"PROVIDER_DESTINY_CS", -"PROVIDER_IONITY", -"PROVIDER_EV_CONNECT", -"PROVIDER_PANPAGES", -"PROVIDER_ETECNIC", -"PROVIDER_VOLTA", -"PROVIDER_NISSAN_MEXICO", -"PROVIDER_BMW_GROUP_LATIN_AMERICA", -"PROVIDER_FEDERAL_ELECTRICITY_COMMISSION_MEXICO", -"PROVIDER_VOLVO_CARS_BRASIL", -"PROVIDER_CHARGE_AND_PARKING", -"PROVIDER_DEDUCE_TECHNOLOGIES", -"PROVIDER_SK_TELECOM", -"PROVIDER_ECO_MOVEMENT", -"PROVIDER_GOOGLE_GMS", -"PROVIDER_EASYWAY", -"PROVIDER_PHYSICIAN_COMPARE", -"PROVIDER_HOSPITAL_COMPARE", -"PROVIDER_ENDOLLA_BARCELONA", -"PROVIDER_BE_CHARGE", -"PROVIDER_ONE_NETWORK", -"PROVIDER_CARENAV_DUPLEX", -"PROVIDER_CARENAV_POI", -"PROVIDER_IN_GOVERNMENT", -"PROVIDER_SURVEY_OF_INDIA", -"PROVIDER_E_ON", -"PROVIDER_ELECTRIFY_CANADA", -"PROVIDER_GRIDCARS", -"PROVIDER_DRIVECO", -"PROVIDER_GREEN_ACTION_STUDIOS", -"PROVIDER_GREEN_ACTION_STUDIO", -"PROVIDER_EVINY", -"PROVIDER_MASTERCARD", -"PROVIDER_VATTENFALL", -"PROVIDER_VIETGIS", -"PROVIDER_UNITE", -"PROVIDER_NEOGY", -"PROVIDER_AMPUP", -"PROVIDER_LOOP", -"PROVIDER_ZEST", -"PROVIDER_EZVOLT", -"PROVIDER_JOLT", -"PROVIDER_CHARGESMITH", -"PROVIDER_PLUGO", -"PROVIDER_ELECTRIC_ERA", -"PROVIDER_FLO", -"PROVIDER_DIGITAL_CHARGING_SOLUTIONS", -"PROVIDER_ELECTRIC_PE" -], -"enumDeprecated": [ -false, -false, -true, -false, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -true, -false, -true, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -true, -true, -false, -true, -false, -true, -true, -false, -true, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -false, -false, -false, -true, -false, -true, -false, -true, -true, -true, -false, -true, -true, -true, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"ABSTRACT The root of all provider types. This should never be present on an actual feature, but can be useful when calling InCategory.", -"not actually a legal value, used as sentinel", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730C2", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This is an internal *only* provider meant for sending wipeout requests to mapfacts.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Never rely on source infos with this provider to identify whether or not a feature is a Prominent Place! Instead, use the proper API, localsearch_clustering::QualityTierHelper::IsProminentPlace().", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Shopping Attributes Discovery", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"", -"The next new \"Google\" provider entries should be placed above.", -"UMBRELLA", -"", -"", -"", -"This is a testing provider for teams that wish to integrate with components of the Geo Data Infrastructure that require a valid provider. No production data should ever be sent using this provider.", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"0x1117F must not be used, since its range extends the PROVIDER_GOOGLE hierarchy.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Deprecated in favor of PROVIDER_GOOGLE_STRUCTURED_DATA (for attributes) and PROVIDER_GOOGLE_GEO_TIGER (for categories).", -"", -"0x1117FF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. aka Local AI.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"No data is obtained from this provider. It is only used to identify features that must be served on MapMaker.", -"", -"", -"", -"", -"", -"", -"0x1117FFF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. Synthetically generated areas (sublocalities/neighborhoods/ postal codes/etc) based on dropped terms from approximate geocoding. More info on go/syntheticareas.", -"", -"", -"", -"Similar to Google Transit, a provider that aggregates positions of bicycle rental points that we have agreements with to show on maps", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This range has been depleted. For new Ids see PROVIDER_GOOGLE_SUBRANGE above.", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"Small Scale International Boundary Lines", -"", -"NOAA", -"PGC", -"USDA", -"National Provider Identifier Registry", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"old name for PROVIDER_NAVIGO", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"See b/33687395", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"ABSTRACT", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"IBAMA", -"MMA", -"ANA", -"IBGE", -"FUNAI", -"DNIT", -"", -"", -"", -"", -"", -"ABSTRACT", -"Department of Cartography", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"0x12 and 0x120 are not to be used. OOO CET", -"ABSTRACT", -"Estonian Land Board", -"", -"ABSTRACT", -"Danish Geodata Agency", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"Ministry of land, infrastructure and transport, \uad6d\ud1a0\uad50\ud1b5\ubd80, Guktogyotongbu", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Use PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION.", -"", -"ABSTRACT", -"Land Information New Zealand", -"NZ Department of Conservation", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Note: Next available value is 0x1275." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreIntersectionGroupProto": { -"description": "Our TYPE_INTERSECTION features model the point where one or more segments terminate. This is topological definition: it may not match what a typical user would think of as an \"intersection\". Consider the intersections where Hayes, Market, Larkin, and 9th Street meet near (37.77765, -122.41638) in San Francisco. Most people would probably consider this a single feature, even though we model it as four separate TYPE_INTERSECTION features. The TYPE_INTERSECTION_GROUP is used to model the user's concept of a real-world intersection, which also includes turn lanes or a whole roundabout (a logical intersection). For the purposes of modeling turn restrictions and lane connections, a smaller grouping is needed to model the \"core\" part of the intersection where there are no lane markings. This is called a core or artifact group. An intersection group must contain at least two intersections or add some information (e.g. a name or a polygon) compared to the intersection itself, or else must not exist. The standard feature properties are interpreted as follows: name - Can be used to specify any \"special\" names associated with this intersection (e.g. Reads Corner intersection, PEI, Canada). Intersections that are named according to their cross streets do not need to specify this field, since this can be determined by looking at the routes associated with each segment. address - This should always be empty. point - Specifies the center of the intersection. This is basically the point where the intersection name should be rendered. Can be omitted in favor of a polygon. polyline - This should always be empty. polygon - Specifies the two-dimensional extent of the intersection. This may substitute to the point field, though having a center set is desirable in this case. child - This should always be empty.", -"id": "GeostoreIntersectionGroupProto", -"properties": { -"childGroup": { -"description": "All artifact intersection groups that are in this logical group.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"groupType": { -"enum": [ -"GROUP_ARTIFACT", -"GROUP_LOGICAL" -], -"enumDescriptions": [ -"Groups intersections that bound segment artifacts of centerline digitization. These segment artifacts do not represent the geometry of actual vehicle travel through an intersection group. This grouping is typically in the core or shared area of an intersection where there are no lane markings (with the exception of raised reflective white dots demarkating multiple turn lanes through the real-world intersection). This is also the typical grouping used for modeling STYLE_IN_OUT turn restrictions.", -"Groups all intersections that form the logical connection of roads. Also known as a cross road intersection group. This group is usually larger than just the artifact grouping, since it includes external turn lanes and roundabout external bypasses." -], -"type": "string" -}, -"intersection": { -"description": "The list of TYPE_INTERSECTION features that form this intersection group, but are NOT in any of this group's child groups. This could be an empty list, though that is sub-optimal. Even an empty list would allow the paint team to draw a label for a named intersection, but a non-empty list would, for example, enable PathFinder to generate better directions. Each of the TYPE_INTERSECTION feature referred here must refer back to this feature in its IntersectionProto.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"parentGroup": { -"$ref": "GeostoreFeatureIdProto", -"description": "Parent logical intersection group. An artifact group that does not have an associated parent logical group is assumed to be both an artifact and logical group." -} -}, -"type": "object" -}, -"GeostoreIntersectionProto": { -"description": "A TYPE_INTERSECTION feature represents a common endpoint of one or more segments in a transportation network at which the segments are connected. An intersection in the real world may be more complicated than that (e.g., comprise multiple segment endpoints or have extra attributes), which can be modeled with an additional TYPE_INTERSECTION_GROUP feature, if needed (see intersectiongroup.proto). The standard feature properties are interpreted as follows: name - This should always be empty. Intersections that have a \"special\" name (e.g. Reads Corner intersection, PEI, Canada) should point to a separate TYPE_INTERSECTION_GROUP feature that captures it. Intersections which are named according to their cross streets do not need this requirement, since their name can be determined by looking at the routes associated with each segment. address - This should always be empty. point - Specifies the center of the intersection. This should be the last vertex of all the segments which terminate at this intersection. polyline - This should always be empty. polygon - This should always be empty. child - This should always be empty.", -"id": "GeostoreIntersectionProto", -"properties": { -"intersectionGroup": { -"$ref": "GeostoreFeatureIdProto", -"description": "The artifact or logical intersection group to which this intersection belongs. If present, the intersection group must also refer back to the intersection. If an intersection is within both the artifact and logical group, then this reference should be to the artifact group." -}, -"outSegment": { -"description": "RESERVED", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"segment": { -"description": "The list of segments that terminate at this intersection, in any order. Note that all segments are directed towards the intersection, i.e. their endpoints indicate what sort of intersection this is. This should not be empty because an intersection with no associated segment is meaningless.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"tollCluster": { -"$ref": "GeostoreFeatureIdProto", -"description": "The toll cluster to which this intersection belongs. If present, the toll cluster must also refer back to the intersection. To set this field, the intersection must be a gcid:toll_intersection feature." -} -}, -"type": "object" -}, -"GeostoreJobMetadata": { -"id": "GeostoreJobMetadata", -"properties": { -"duration": { -"description": "Describes how much time the service is going to take, e.g. how long it takes to do a haircut. Value of seconds must be from +60 (1 min) to +31,536,000 (365 days) inclusive. Value of nanos must be zero.", -"format": "google-duration", -"type": "string" -}, -"jobRelatedCategories": { -"description": "Represents the name of a potential grouping of items. For TYPE_JOB, this is the category names of the categories that a user picked this job type from at the time of input.", -"items": { -"$ref": "GeostoreJobRelatedCategory" -}, -"type": "array" -}, -"jobTypeId": { -"description": "Unique identifier for a job. This is required for standard jobs and blank for free-form jobs. Job type ids are prefixed with \"job_type_id:\". Notice this is a unique string representation of a job across languages. E.g., \u201cjob_type_id:air_duct_repair\u201d. The existence of a job_type_id means the job type is a standard one, and has a corresponding entry in the Standard Jobs Taxonomy.", -"type": "string" -}, -"jobTypeMid": { -"description": "Represents the MID corresponding to the job_category entity in the Knowledge Graph. For example, job_type_id=\"job_type_id:install_faucet\", job_type_mid=\"/g/11hzzxjv3f\". ", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreJobRelatedCategory": { -"description": "This is the category that a user picked this job type from at the time of input. The field serves two purposes: 1) The name is used in consumer surface similar to the heading name today (i.e., grouping jobs under the category. 2) The gcid is needed mainly for free-formed entries, for which GMB needs to map them to corresponding categories in the frontend, if applicable. Notice that the name and the id are both not expected to be in sync with gcid deprecation or location category change per product decision. In other words, they are not guaranteed to stay in sync, only guaranteed true at time of creation.", -"id": "GeostoreJobRelatedCategory", -"properties": { -"gcid": { -"type": "string" -}, -"language": { -"type": "string" -}, -"name": { -"description": "Category name in the primary language of the feature. Generally intended to be used as a fallback when we are unable to fetch the name in the user's language.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreKnowledgeGraphReferenceProto": { -"description": "The reference to an entity in the KnowledgeGraph. For details on the KnowledgeGraph see http://goto/kg.", -"id": "GeostoreKnowledgeGraphReferenceProto", -"properties": { -"id": { -"description": "KG Identifier (MID). For details, see http://go/ke-bg-knowledge-graph#mids.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreLandmarkReferenceProto": { -"description": "This protocol buffer represents the association between a segment and a landmark feature. Notes: - References to TYPE_SEGMENT features should always point to the even sibling. - Self-references are allowed but the referencing segment's sibling is required to have a self-reference as well (the above requirement to always reference the even sibling still applies).", -"id": "GeostoreLandmarkReferenceProto", -"properties": { -"featureType": { -"description": "The type of the landmark feature. Allowed types: - TYPE_CARTOGRAPHIC e.g. a putting green or water hazard - TYPE_COMPOUND e.g. - the Empire state building (TYPE_COMPOUND_BUILDING) - a park (TYPE_COMPOUND_GROUNDS) - a section of a retail store (TYPE_COMPOUND_SECTION) - TYPE_ESTABLISHMENT e.g. - the Eiffel Tower (TYPE_ESTABLISHMENT_BUILDING) - a sports field (TYPE_ESTABLISHMENT_GROUNDS) - Starbucks (TYPE_ESTABLISHMENT_POI) - TYPE_INTERSECTION_GROUP e.g. a major intersection - TYPE_NATURAL_FEATURE e.g. a river - TYPE_SEGMENT e.g. a bike trail or train tracks", -"format": "int32", -"type": "integer" -}, -"landmark": { -"$ref": "GeostoreFeatureIdProto", -"description": "The feature ID of the landmark feature." -}, -"travelMode": { -"description": "The mode(s) of travel for which this landmark is useful.", -"items": { -"enum": [ -"UNKNOWN", -"TRAVEL_MOTOR_VEHICLE", -"TRAVEL_AUTO", -"TRAVEL_TWO_WHEELER", -"TRAVEL_BICYCLE", -"TRAVEL_PEDESTRIAN" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreLaneMarkerProto": { -"description": "This proto contains attributes relevant to physical lane markers.", -"id": "GeostoreLaneMarkerProto", -"properties": { -"barrierMaterials": { -"$ref": "GeostoreBarrierLogicalMaterialProto", -"description": "If this is a physical barrier marker, represent materials found on the marker." -}, -"crossingPattern": { -"$ref": "GeostoreCrossingStripePatternProto", -"description": "Pattern border and color for crossing markers. These include crosswalks, stop, and yield lines." -}, -"linearPattern": { -"$ref": "GeostoreLinearStripePatternProto", -"description": "Stripe pattern, spacing, and color for longitudinal markers." -} -}, -"type": "object" -}, -"GeostoreLaneProto": { -"description": "Describes an individual road lane. Not only driving lanes, but also parking and biking lanes are covered by this. Note that we may eventually add curbs and walking to this schema. MOTIVATION/DESIGN DISCUSSION The intent of this schema is to model a schematic representation of the road for a bunch of use cases within GMM, navigation, map tiles. For rendering, we do not want to represent the geometry of each lane exactly, but do want to model types/width/gaps/lane markings so that a schematic rendering can be made. For navigation, we model lane connectivity and restrictions per lane, so that Pathfinder can potentially pick routes based on lanes, and definitely use the lanes to better describe the path to the driver. This schema is driven by the GT team, which is likely to be the only provider of this data. It is based on compromises that we are working out with other teams, based on what our operators can reasonably collect and what is useful. See docs here: https://docs.google.com/a/google.com/document/d/11XJ1WvqS5Sm7MxWXzzc3tnsk49VhrR3BYFjiRMAzYm0/edit?hl=en_US https://docs.google.com/a/google.com/document/d/1nzdupynTUKE8xY8JcfvQbU-KWtCJ6IwHiTaCxuq40EM/edit?hl=en_US Note: Some lane information (width, surface type, etc) may duplicate or contradict information stored at the segment level.", -"id": "GeostoreLaneProto", -"properties": { -"boundingMarker": { -"description": "References to any gcid:physical_lane_marker features that bound this lane.", -"items": { -"$ref": "GeostoreBoundingMarkerProto" -}, -"type": "array" -}, -"conjoinedCategory": { -"description": "If the current lane is part of a merge/split area, indicates the type (split or merge) and whether the current lane is on the left or right or in the middle of the merge/split area, as seen in the direction of traffic. See go/lane-split-merge-schema", -"enum": [ -"CONJOINED_NONE", -"CONJOINED_SPLIT_LEFT", -"CONJOINED_SPLIT_MIDDLE", -"CONJOINED_SPLIT_RIGHT", -"CONJOINED_MERGE_LEFT", -"CONJOINED_MERGE_MIDDLE", -"CONJOINED_MERGE_RIGHT" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"distanceToNextLane": { -"description": "Gap between this lane and the next in meters. This is relevant when the divider is physical, or a wide painted area. For regular painted single or double lines, there is no gap. This distance is duplicated between the innermost lanes for each side. Note that this is not used to describe smallish islands - this is only for long-running gaps. In particular, this models the median width, the gap between HOV lanes/regular lanes on freeways, and the road verge between a curb and sidewalk. Note on split roads: We can model any split road with a median as a single sibling pair with this distance set to the width of the median, or as two one-way sibling pairs.", -"format": "float", -"type": "number" -}, -"flow": { -"$ref": "GeostoreFlowLineProto", -"description": "The most logical path for the center of an object to travel along within the lane. Typically, this is the lane's center line, but doesn't have to be." -}, -"laneConnection": { -"description": "Connections to lanes of other segments at the end of this segment. These connections model the connectivity where you don't have to do a lane change maneuver. If any lane connection is present, assume that all others are forbidden. Also note that segment level restrictions do apply if present, and can disallow some turn even if the lanes are connected. For instance, this can happen with timed or vehicle type based restrictions on the segment. If lane connectivity implies a segment-level restriction (can't transition to some target segment), that restriction will also exist as a segment level restriction. In effect - PathFinder does not have to look at lane connectivity to figure out segment connectivity. Example: Typically, lanes are just connected to one other lane. Example: A splitting lane is connected to the two resulting lanes. Example: At an intersection, a lane is connected to crossing lanes according to how lanes are painted across the intersection. In the common case, the target segment will be connected to the same intersection as this segment. That will however NOT be true for complex intersections where there is an intersection group. The connections will be across the whole group, connecting to one of the outgoing segments from the group. This is analogous to how we do turn restrictions around intersection groups.", -"items": { -"$ref": "GeostoreLaneProtoLaneConnection" -}, -"type": "array" -}, -"laneDividerCrossing": { -"description": "clang-format on Whether the divider to the inside of this lane can be crossed. Note that we assume this is symmetric, and that this also describes whether someone in the next inside lane can cross to this one. The \"inside\" lane is the one with a lower lane_number. Note on lane markers: We do not model the painting, but only the resulting legality. There are many painted marker styles and colors that lead to the same legality. We expect Paint or Driveabout to render lanes stylized, with solid meaning \"can't cross\", and dashed meaning \"can cross\". Note on varying legality along segment: ALLOWED takes precedence - even if some small portion has a restriction (such as right before an intersection) , the lane change will be ALLOWED.", -"enum": [ -"CROSSING_ALLOWED", -"CROSSING_DISALLOWED", -"CROSSING_LEGALLY_DISALLOWED", -"CROSSING_PHYSICALLY_IMPOSSIBLE" -], -"enumDescriptions": [ -"Cross at will.", -"", -"This is usually some painted barrier", -"This is either a patch of dirt, or some barrier" -], -"type": "string" -}, -"laneFollowsSegmentBeginFraction": { -"description": "These indicate for what portion of the segment the lane's flowline exactly follows the segment, and the lane is of constant width. This will be set to not include the whole segment where there is a split/turn/merge at either end of the lane. The painting of the lane should completely synthesize the lane geometry outside of this portion, connecting it to neighboring lanes to make graphical nice.", -"format": "float", -"type": "number" -}, -"laneFollowsSegmentEndFraction": { -"format": "float", -"type": "number" -}, -"laneNumber": { -"description": "Lanes are numbered from inside of the road outward, i.e. the lane next to the center line is lane 0. The lanes then stack outwards, towards the side that one drives on this segment (right or left). NOTE: do NOT use the lane_number as index for lookup. Lane_number is not guaranteed to match the segment.lane repeated field index.", -"format": "int32", -"type": "integer" -}, -"laneToken": { -"description": "A token that can be used to identify the version of the data about this lane.", -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this lane." -}, -"restriction": { -"description": "Restrictions that apply to this lane only. Examples include HOV lanes. If a lane restriction implies a segment-level restriction (can't route on the segment at all), that restriction will also exist as a segment level restriction. In effect - PathFinder does not have to look at lane restrictions to figure out segment restrictions.", -"items": { -"$ref": "GeostoreRestrictionProto" -}, -"type": "array" -}, -"shared": { -"description": "True if this lane is usable in both directions (left-turn lane, reversing lane, one-lane road, etc). To get the total number of lanes for a road, add up the lanes in each direction counting 0.5 for each shared lane.", -"type": "boolean" -}, -"stopLine": { -"description": "References to any gcid:physical_lane_marker features that intersect this lane, with the implication that a moving vehicle should stop there.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"surface": { -"description": "clang-format on LINT.ThenChange(//depot/google3/geostore/base/proto/segment.proto) Unlike the surface in SegmentProto, this field does not have a default value. This is because the lane-level surface overrides the segment-level surface. The lane's surface should be unset unless explicitly overriding the segment's surface.", -"enum": [ -"SURFACE_UNKNOWN", -"SURFACE_PAVED", -"SURFACE_ASPHALT", -"SURFACE_CONCRETE", -"SURFACE_CHIPSEAL", -"SURFACE_BRICK", -"SURFACE_SETT", -"SURFACE_COBBLESTONE", -"SURFACE_UNPAVED", -"SURFACE_GRAVEL", -"SURFACE_DIRT", -"SURFACE_SAND" -], -"enumDescriptions": [ -"RESERVED", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": { -"description": "clang-format on", -"enum": [ -"TYPE_UNKNOWN", -"TYPE_NORMAL", -"TYPE_PASSING", -"TYPE_LEFT_TURN", -"TYPE_LEFT_TURN_OFF", -"TYPE_LEFT_TURN_ON_OFF", -"TYPE_RIGHT_TURN", -"TYPE_RIGHT_TURN_OFF", -"TYPE_RIGHT_TURN_ON_OFF", -"TYPE_BICYCLE", -"TYPE_PARKING", -"TYPE_PARKING_IMPLIED", -"TYPE_PARKING_MARKED", -"TYPE_EXIT_ENTRANCE", -"TYPE_EXIT_LANE", -"TYPE_ENTRANCE_LANE", -"TYPE_PEDESTRIAN", -"TYPE_SIDEWALK_SHOULDER", -"TYPE_VEHICLE_SHOULDER", -"TYPE_OFFSET" -], -"enumDescriptions": [ -"", -"", -"These are usually shared lanes, usable for passing by either direction.", -"", -"Typical left turn at intersection.", -"Lane used both for cars turning left to a side street, and for cars from side streets turning left onto the larger road.", -"", -"Typical right turn at intersection.", -"Lane used both for cars turning right to a side street, and for cars from side streets turning right onto the larger road (in left-side driving countries).", -"", -"", -"This lane represents the common case of parking along the curb. It is generally not a drivable lane.", -"Parking with marked stalls.", -"Lanes that are temporary, for acceleration/deceleration, or that will split off.", -"A lane that is used for exiting the core road.", -"An acceleration/merging lane.", -"A bidirectional walking lane, such as a sidewalk. The walking lane is implied to be on the side of the road as specified by the segment's \"on_right\" flag. The pedestrian lane should therefore be on the outside of a road's traffic lanes. Presence of a pedestrian lane implies that we have a sidewalk or other facility away from the road surface where pedestrians walk. If a road does not have a pedestrian lane, this means there is no space or facility for pedestrians to walk, or that pedestrians walk on the wide shoulder. The pedestrian lane, width, and distance_to_next_lane controls where Pathfinder draws the pedestrian polyline. As described in go/hdwalking++, the polyline is computed as (pl = pedestrian lane, s = segment): offset = s.distance_to_edge + pl.distance_to_next_lane + pl.width / 2", -"A special shoulder lane present on narrow roads in Japan that may double as a sidewalk. This isn\u2019t a subset of TYPE_PEDESTRIAN since it may have either uses and is not separated by a curb.", -"Represents a lane that is wide enough to park or drive slowly on and is physically accessible from the main roadway. This is part of road surface that\u2019s not used for normal flow of traffic but can be used for emergency purposes, such as emergency stopping and parking or by emergency vehicles. These are often known as emergency use lanes. This should generally be either first or last lane on the road (not counting pedestrian lanes if present). NOTE: In may cases, this may be colloquially called a \u201cshoulder\u201d, but road shoulders may include other areas that are not readily accessible, especially at speed.", -"This represents an \u201coffset\u201d lane that doesn\u2019t really carry useful semantics but can be used to fill in \u201cgaps\u201d between the sum total of lane widths and the road as represented by distance_to_edge." -], -"type": "string" -}, -"width": { -"description": "Width of this lane in meters. In many cases, we will collect this data by dividing the total road width by the number of lanes. On accuracy: This is a rough average width along this segment. If and when we wanted to be more accurate, we'd extend this schema to have full polygons for segments/lanes rather than just this average width.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreLaneProtoLaneConnection": { -"id": "GeostoreLaneProtoLaneConnection", -"properties": { -"boundingMarker": { -"description": "References to any gcid:physical_lane_marker features that bound this lane connection.", -"items": { -"$ref": "GeostoreBoundingMarkerProto" -}, -"type": "array" -}, -"connectionToken": { -"description": "A token that can be used to identify the version of the data about this lane connection.", -"type": "string" -}, -"curve": { -"$ref": "GeostoreCurveConnectionProto", -"description": "Specifies how the flowline should be synthesized in this connection region. If unspecified, heuristics may be used to pick a sweep shape based on retraction values or neighboring curves." -}, -"flow": { -"$ref": "GeostoreFlowLineProto", -"description": "The most logical path for the center of an object to travel along within the lane connection. Typically, this is the lane connection's center line, but doesn't have to be." -}, -"laneNumber": { -"description": "This is the lane number on the target segment. This field is not set if the target segment doesn't have lanes, or we don't know the exact connectivity.", -"format": "int32", -"type": "integer" -}, -"primaryConnection": { -"description": "True if this connects to the unique, natural continuation of the current lane. At most one LaneConnection per lane can have this field set true. This attribute is of interest to ADAS providers as a hint to which lane a vehicle is likely to follow, in the absence of other information about the vehicle's planned path.", -"type": "boolean" -}, -"segment": { -"$ref": "GeostoreFeatureIdProto", -"description": "This reference to the other segment is weak, since strong would blow up bounds of all segments." -}, -"yieldToOtherConnections": { -"description": "The inverse of the primary connection bit that provides a hint that this connection is part of a merge and a vehicle following this connection should yield to vehicles following other incoming connections.", -"type": "boolean" -} -}, -"type": "object" -}, -"GeostoreLanguageTaggedTextProto": { -"description": "Represents a piece of text with an associated language.", -"id": "GeostoreLanguageTaggedTextProto", -"properties": { -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. We place extra restrictions on languages in addition to what the III library requires. See http://go/geo-schema-reference/feature-properties/languages.md", -"type": "string" -}, -"text": { -"description": "The text (UTF-8 encoding).", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreLevelProto": { -"description": "A feature used to represent a logical level, e.g. floor. A feature belonging to a given level should point to the level feature using relation RELATION_ON_LEVEL. The standard feature properties are interpreted as follows: name - Levels should have names according to the following: * Short, elevator-style names, such as \"L\" for \"Lobby\" and \"2\" for \"Second floor\", must be included and must be tagged with FLAG_ABBREVIATED. There must be an unambiguous \"best\" abbreviated name. * Longer names such as \"Ticketing\" or \"Upper Level\" may be present when the level has a specific name. * Non-abbreviated names should only be added if they are known to meaningfully expand upon the abbreviated name. For example, the long name \"Observation Deck 2\" for the abbreviated name \"OD2\" is a good additional name. In contrast, the name \"Level 2\" for the abbreviated name \"2\" is not desired. address - This should always be empty. point, polyline, polygon, center - These should never be set (since we are representing a logical entity). preferred_viewport - This should be the approximate extent of the level. child - This should always be empty.", -"id": "GeostoreLevelProto", -"properties": { -"building": { -"description": "The building(s) to which this level belongs. A level will typically belong to a single building, but it is valid for a single level to be shared by multiple buildings (for example, a large underground parking lot). These buildings refer back to the level via another strong reference (the BuildingProto.level field).", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"number": { -"description": "The elevation of this level relative to the ground level, in levels. 0 = ground floor (even in locales that call the ground floor \"1st floor\"); 0.5 = between ground and first floor, eg mezzanine; 1 = first floor (one level above ground floor); -3 = three levels below ground floor.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreLinearStripePatternProto": { -"id": "GeostoreLinearStripePatternProto", -"properties": { -"line": { -"description": "A linear marker may consist of one or more parallel physical lines. These are ordered left to right along the direction of the marker core polyline.", -"items": { -"$ref": "GeostorePhysicalLineProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreLocaleLanguageProto": { -"description": "This message describes the details of a single language within a locale.", -"id": "GeostoreLocaleLanguageProto", -"properties": { -"language": { -"description": "The language associated with this preference. The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. We place extra restrictions on languages in addition to what the III library requires. See http://go/geo-schema-reference/feature-properties/languages.md", -"type": "string" -}, -"official": { -"description": "Flag to indicate if the associated language is \"official\" within a locale.", -"type": "boolean" -}, -"preference": { -"description": "This value represents the preference of the associated language within a locale. It must be between 0.0 and 1.0.", -"format": "float", -"type": "number" -}, -"speakingPercent": { -"description": "Percentage of population that can speak the associated language within a locale. It must be between 0 and 100.", -"format": "float", -"type": "number" -}, -"writingPercent": { -"description": "Percentage of population that can write the associated language within a locale. It must be between 0 and 100.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreLocaleProto": { -"description": "A locale is a meta-feature that describes the geographic extent of localization preferences such as the local language, and formatting conventions for numbers, dates and monetary values. Multilingual areas may be contained by multiple locales. We try to model locales fine-grained enough for deciding which languages are typically used within a city. For example, while French is an official language for all of Switzerland, we would prefer to have Zurich contained by a separate (more fine-grained) Swiss-German locale indicating that German, not French, is the predominantly spoken language in this city. Note that language borders are frequently considered a political question and often don't have clearly defined extents. For example, California has a significant Spanish-speaking population, but Spanish is not an official language of California.", -"id": "GeostoreLocaleProto", -"properties": { -"language": { -"description": "This holds the list of languages spoken within a locale.", -"items": { -"$ref": "GeostoreLocaleLanguageProto" -}, -"type": "array" -}, -"localizationPolicyId": { -"description": "The ID of the localization policy (from googledata/geostore/localization/localization_policies.textpb) to apply to features that have this locale as their best match locale. Localization policy IDs are arbitrary identifiers that uniquely distinguish a set of language-selection rules.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreLogicalBorderProto": { -"description": "A logical border is a grouping of border features, which together model a divide between two regions. The borders within this grouping share common attributes, such as the regions they divide, and may represent a conceptual group of borders, of which may be wholly disputed, wholly undisputed, or a mixture of disputed and undisputed. Note that any borders within this group may be part of multiple logical borders. A logical border is required to have a name describing what the grouping represents (e.g. \"US - Mexico Border\", \"Kosovo - Serbia Border (Disputed)\").", -"id": "GeostoreLogicalBorderProto", -"properties": { -"borderSegment": { -"description": "All the border segments which make up this logical border. Border segments must be TYPE_BORDER features which have the same left/right features. This is a many-to-many bidirectional relationship, so any border segment within this list might be part of another logical border.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"status": { -"description": "The logical border status identifies its legal status. This is similar to the BorderStatus present within border segments, but applies to the group as a whole.", -"enum": [ -"STATUS_UNSPECIFIED", -"STATUS_NORMAL", -"STATUS_DISPUTED" -], -"enumDescriptions": [ -"The logical border is neither entirely composed of either normal nor disputed border segments. There are no guarantees about the BorderStatus of its border segments.", -"This status should be used when both parties agree on the location of the border. These might be called \"de jure\" borders. Most borders will have this status. This status requires that all border segments that make up this logical border have normal border segments.", -"This status should be used when the two parties disagree on the location of the border. This status requires that all border segments are themselves disputed." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreMediaItemProto": { -"description": "Media item attached to an element of price list.", -"id": "GeostoreMediaItemProto", -"properties": { -"googleUrl": { -"description": "The FIFE url associated with the media. NOTE: This FIFE URL must be PII-free, see go/product-catalogue-photo-storage", -"type": "string" -}, -"mediaFormat": { -"enum": [ -"MEDIA_FORMAT_UNSPECIFIED", -"MEDIA_FORMAT_PHOTO" -], -"enumDescriptions": [ -"Format unspecified.", -"Media item is a photo." -], -"type": "string" -}, -"mediaKey": { -"description": "The mediaKey associated with the media. NOTE: This media key must be PII-free, see go/product-catalogue-photo-storage", -"type": "string" -}, -"mediaSize": { -"$ref": "GeostoreMediaItemProtoMediaSize" -} -}, -"type": "object" -}, -"GeostoreMediaItemProtoMediaSize": { -"description": "Width and height of the original photo in pixels.", -"id": "GeostoreMediaItemProtoMediaSize", -"properties": { -"originalHeightPx": { -"format": "int32", -"type": "integer" -}, -"originalWidthPx": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreNameProto": { -"description": "A name for a Feature (street name, point of interest, city, building, etc). We currently use NameProto for two essentially disjoint purposes: 1. Common names, which can be language-specific, or have other kinds of variations. 2. Opaque IDs, such as postal codes, which only have the `text` field set, and potentially some flags. This includes internal-only features like template ids. Each NameProto representing a common name corresponds to an assertion that a fluent speaker or writer of a language would recognize NameProto.text to name the given feature in that language. As such, NameProtos are stored in a repeated field, often having: 1. multiple names with the same text and varying languages, and 2. multiple names with the same language and varying texts.", -"id": "GeostoreNameProto", -"properties": { -"flag": { -"description": "clang-format on The set of flags that apply to this name.", -"items": { -"enum": [ -"FLAG_ANY", -"FLAG_IN_LOCAL_LANGUAGE", -"FLAG_PREFERRED", -"FLAG_OFFICIAL", -"FLAG_OBSCURE", -"FLAG_ON_SIGNS", -"FLAG_EXIT_NAME_NUMBER", -"FLAG_EXIT_NAME", -"FLAG_INTERCHANGE_NAME", -"FLAG_EXIT_NUMBER", -"FLAG_INTERCHANGE_NUMBER", -"FLAG_TRANSIT_HEADSIGN", -"FLAG_CONNECTS_DIRECTLY", -"FLAG_CONNECTS_INDIRECTLY", -"FLAG_INTERSECTION_NAME", -"FLAG_VANITY", -"FLAG_ROUTE_NUMBER", -"FLAG_COUNTRY_CODE_2", -"FLAG_ABBREVIATED", -"FLAG_ID", -"FLAG_DESIGNATED_MARKET_AREA_ID", -"FLAG_IATA_ID", -"FLAG_ICAO_ID", -"FLAG_ISO_3166_2", -"FLAG_COUNTRY_SPECIFIC_ID", -"FLAG_LANGUAGE_CODE", -"FLAG_TIMEZONE_ID", -"FLAG_PHONE_NUMBER_PREFIX", -"FLAG_PHONE_NUMBER_AREA_CODE", -"FLAG_TRANSLITERATED", -"FLAG_NOT_ON_SIGNS", -"FLAG_NOT_IN_LOCAL_LANGUAGE", -"FLAG_ROUNDABOUT_ROUTE", -"FLAG_NEVER_DISPLAY", -"FLAG_BICYCLE_ROUTE", -"FLAG_MACHINE_GENERATED", -"FLAG_SUSPICIOUS" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true -], -"enumDescriptions": [ -"ABSTRACT", -"This name is in one of the local languages. A local language is one readable by local people, where all language/script/region subtags in the language field are local. E.g. \"ru\" is local in Russia, but \"ru-Latn\" (transliterated in Latin script) is not local. This is the opposite of FLAG_NOT_IN_LOCAL_LANGUAGE. In general, a given name should have either FLAG_IN_LOCAL_LANGUAGE or FLAG_NOT_IN_LOCAL_LANGUAGE specified. Importers that are unsure should not set either flag. It doesn't make sense to set this flag unless you also specify a language. NOTE: unlike most other flags in this enumeration, this flag has to do with the language field, NOT to the text (and {raw,short}_text fields).", -"This is the most commonly recognized name for this feature (for a given language, when languages apply). This flag is not compatible with the following flags: - FLAG_NEVER_DISPLAY - FLAG_OBSCURE", -"This name is officially recognized by the government. This flag is not compatible with FLAG_SUSPICIOUS.", -"This name is not commonly known. This flag is used on names that are not commonly known, or not \"standard\", but which we wish to have so clients can search on them. An example would be \"Philly\" instead of \"Philadelphia\".", -"ABSTRACT", -"This name is an exit name/number found on signs.", -"This flag is used to differentiate exit names from exit numbers on signs that have both.", -"The highway sign is labeling a highway interchange instead of an exit. In several European countries interchanges are differentiated from highway exits.", -"This flag is used to differentiate exit numbers from exit names on signs that have both. This flag is not compatible with FLAG_ROUTE_NUMBER.", -"The highway sign is labeling a highway interchange the same manner as FLAG_INTERCHANGE_NAME.", -"The headsign of a transit vehicle, often the name of the destination.", -"The road sign target is directly connected (e.g., at end of ramp).", -"The road sign target is not directly connected (e.g., must travel some additional distance).", -"This name is an intersection name found on signs. Usually we can just use the name of a TYPE_INTERSECTION_GROUP feature. However, there are road signs on some intersections in Japan, especially in Hokkaido region where the intersection name is considered \"directional\" (i.e. an intersection could have multiple names depending on directions drivers or pedestrians approach from). Many people think of them as \"intersection names\", but that's not really what they are. We want to model them as road signs, because that's really what they are. This flag is supposed to be used by Pathfinder to generate the appropriate maneuver texts where such a named intersection is involved.", -"This is a vanity name (usually an alternate name for a section of road in honor of some person).", -"This name is a route number from an official numbering system for highways or similar routes (like bicycle routes). Street names (even numbered ones like in Manhattan) are excluded. The name (both text and short_text) should follow a strictly-defined format determined by the official numbering system. Route numbers may be assigned to segments on city streets as well as highways. (US-101 passing through San Francisco is an example that has both.) Historic route names that are no longer part of an official numbering system (like the former US-66) are not route numbers. This flag is not compatible with FLAG_EXIT_NUMBER.", -"This name is a two-letter ISO 3166-1 country code. It has code 0x81 instead of 0x8 for historical reasons (FLAG_COUNTRY_CODE and FLAG_COUNTRY_CODE_3 were removed in the belief that we only want one way to represent a country code in the Geo Schema).", -"An abbreviated version of the name, for example \"CA\" for California. This flag should be used for postal abbreviations that one would expect to see in an address. It should also be used for names where one of the component words is abbreviated (e.g. \"Mass.\" for Massachusetts, \"NWFP\" for the North-West Frontier Province in Pakistan, or \"L\" for the Lobby level in a building). Shortened forms of names (e.g. \"Kingston\" rather than \"Kingston-upon-Hull\") should be marked obscure rather than abbreviated. This flag should be used for names that are feature specific abbreviations. For shortened name versions based on regular, per-term transformations (ex: '1st Street' -> '1st St'), use the short_text field instead.", -"ABSTRACT", -"This name is a Designated Market Area ID number (ex: 501)", -"This name is a three-letter IATA airport code (ex SFO, ZRH).", -"This name is a four-letter ICAO airport code (ex KSFO, LSZH).", -"This name is the ISO 3166-2 country subdivision code.", -"This name is the country specific ID. For example, China government specifies an admin code for each administrative area (province, city and district).", -"This name is an IETF BCP 47 language code, for example \"fr-CA\". Previously used for names of locales. See locale.proto for new way of defining this information.", -"This name is a timezone identifier in the Olson database, for example \"Europe/Zurich\". All timezones have an Olson identifier, understood by POSIX and Google's i18n/identifiers libraries. This flag applies only to features of TYPE_TIMEZONE type.", -"This name is the prefix for a phone number, starting with + and followed by a country code. This flag applies only to features of TYPE_PHONE_NUMBER_PREFIX type and at most one name should have this flag set.", -"This name is the commonly used name for the area code of a phone number, including the national prefix if applicable. This flag applies only to features of TYPE_PHONE_NUMBER_AREA_CODE type and at most one name should have this flag set.", -"The flag used to indicate the name was transliterated from some other character set. For example, this flag would be set on names transliterated from the Cyrillic or Greek alphabets to a Latin alphabet. The correct approach is to use the Google III language code to specify the appropriate language. For example, Greek transliterated into a Latin alphabet should have a language code of \"el-Latn\".", -"There are two kinds of street signs: \"location signs\" and \"guidance signs\". Location signs are the ones you see that tell you where you are. \"Ah, I'm on 85th Street and I'm crossing 6th Avenue.\" Guidance signs are the ones that give you hints about how to get where you want to go. \"Ah, I should go that way to get on I-5 to Seattle.\" This flag is talking about location signs. You'll find this flag on route names when then name is never posted on location signs. This flag is not compatible with flags in the FLAG_ON_SIGNS category.", -"This name is not in one of the local languages. A local language is one readable by local people, where all language/script/region subtags in the language field are local. E.g. \"ru\" is local in Russia, but \"ru-Latn\" (transliterated in Latin script) is not local. This is the opposite of FLAG_IN_LOCAL_LANGUAGE. In general, a given name should have either FLAG_IN_LOCAL_LANGUAGE or FLAG_NOT_IN_LOCAL_LANGUAGE specified. Importers that are unsure should not set either flag. It doesn't make sense to set this flag unless you also specify a language. NOTE: unlike most other flags in this enumeration, this flag has to do with the language field, NOT to the text (and {raw,short}_text fields).", -"This flag is set on routes attached to roundabout segments if and only if this name does not appear on any of the segments incident on the roundabout. This flag can be used by clients to determine which names on roundabouts are more useful than others when displaying to users.", -"Never display this name to a user. This flag is typically used for politically sensitive names such as \"Taiwan, Province of China\". Any feature which has a name with this flag should have another name without it so we have something to display to the user.", -"Routes with these names are common bicycle routes. This includes bicycle trails and routes along shared roadways to popular destinations for cyclists. Unnamed bicycle routes along named shared roadways should not have names of this type. This flag exists so that we can avoid showing bicycle route names in driving directions, while still showing them in bicycling directions.", -"This name is generated automatically by the transliteration pipeline based on an existing name in a different language. Contact Maps Translation team (go/maps-translation) for details.", -"This name may not be a correct name for a feature. It communicates to clients that the name may not be trustworthy. Examples are names of features that are too general (\"swimming pool\", \"hardware store\" etc) or those that incorrectly match names of prominent political features (businesses, transit stations etc named after sublocalities). This flag is not compatible with FLAG_OFFICIAL." -], -"type": "string" -}, -"type": "array" -}, -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. These strings should be treated as opaque blobs. You can use LanguageCodeConverter::FromOther to convert the string to a LanguageCode reference. You can then call methods on the LanguageCode class to extract language/script/region subtags (if any). See also http://g3doc/i18n/identifiers/g3doc/using-iii. We place extra restrictions on languages in addition to what the III library requires. See go/geo-schema-reference/feature-properties/languages. This field may be missing if the name does not have a concept of language but should be set if the language is unknown.", -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this name. NOTE: there are multiple NameProto fields in the Geo Schema. Metadata here is only expected to be present on FeatureProto.name[]." -}, -"rawText": { -"deprecated": true, -"description": "** DEPRECATED ** The name text provided in the original source data (UTF-8 encoding). This is the text provided in the source data unmodified with the exception of being converted to UTF-8 and stripping extra leading, trailing and duplicate whitespaces (if necessary).", -"type": "string" -}, -"shortText": { -"description": "The short name text (UTF-8 encoding). Acronyms/abbreviations should be consistently used, for example \"NE 57th St\" rather than \"Northeast 57th Street\", \"N.E 57th St.\" or some other variant. This field should be populated with the chosen canonical version of the shortened name, based on per-term transformations. For feature specific abbreviations (such as 'CA' for 'California'), one should define a separate name with FLAG_ABBREVIATED set. For other variants of the shortened name that are not the canonical one, devise client based logic (ex: query rewriting rules).", -"type": "string" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a name. Never set in MapFacts." -}, -"text": { -"description": "The name text (UTF-8 encoding). Acronyms/abbreviations should be fully expanded, for example \"Northeast 57th Street\" rather than \"NE 57th St\". They can be shortened at display or geocode time. This decision prevents ambiguity over such issues as whether \"St\" represents \"Street\" or \"Saint\". However, it pushes language-specific knowledge into code. We will have libraries and data files to contract acronyms/abbreviations at run-time.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreOntologyRawGConceptInstanceContainerProto": { -"description": "The container for all GConceptInstances associated with a feature.", -"id": "GeostoreOntologyRawGConceptInstanceContainerProto", -"properties": { -"instance": { -"items": { -"$ref": "GeostoreOntologyRawGConceptInstanceProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreOntologyRawGConceptInstanceProto": { -"description": "A RawGConceptInstanceProto contains all data required by both internal and external clients. We store the 'public' data in a GConceptInstanceProto and the 'private' data inside of RawGConceptInstanceProto. NOTE: this doesn't really match the design we want anymore. Please talk to the Geo Schema team if you are planning to make use of the \"private\" fields below.", -"id": "GeostoreOntologyRawGConceptInstanceProto", -"properties": { -"instance": { -"$ref": "GeostoreGConceptInstanceProto", -"description": "This is the 'public' section of the GConceptInstance." -}, -"isAddedByEdit": { -"deprecated": true, -"description": "** DEPRECATED ** Was this GConcept explicitly added by an edit? Examples of gconcepts not added by edits include those inferred through geo ontology and those mapped from legacy category forms by the feature updater. Note that it is possible for both is_added_by_edit and is_inferred to be true - it means this gconcept is added by an edit and there is also another more fine-grained gconcept added by an edit.", -"type": "boolean" -}, -"isInferred": { -"description": "RESERVED", -"type": "boolean" -}, -"provider": { -"deprecated": true, -"description": "** DEPRECATED ** These two fields combined describe the source of a GConceptInstance. They are based on geostore/base/proto/datasourceprovider.proto. Their use has been deprecated. Use the FieldMetadataProto inside instance instead.", -"enum": [ -"PROVIDER_ANY", -"PROVIDER_UNKNOWN", -"PROVIDER_NAVTEQ", -"PROVIDER_TELE_ATLAS", -"PROVIDER_TELE_ATLAS_MULTINET", -"PROVIDER_TELE_ATLAS_CODEPOINT", -"PROVIDER_TELE_ATLAS_GEOPOST", -"PROVIDER_TELE_ATLAS_DATAGEO", -"PROVIDER_TELE_ATLAS_ADDRESS_POINTS", -"PROVIDER_TELCONTAR", -"PROVIDER_EUROPA", -"PROVIDER_ROYAL_MAIL", -"PROVIDER_GOOGLE", -"PROVIDER_GOOGLE_HAND_EDIT", -"PROVIDER_GOOGLE_BORDERS", -"PROVIDER_GOOGLE_SUBRANGE", -"PROVIDER_GOOGLE_GT_FUSION", -"PROVIDER_GOOGLE_ZAGAT_CMS", -"PROVIDER_GOOGLE_PLACE_NAVBOOST", -"PROVIDER_GOOGLE_FOOTPRINT", -"PROVIDER_GOOGLE_PRODUCT_TERMS", -"PROVIDER_GOOGLE_POINTCARDS", -"PROVIDER_GOOGLE_BUSINESS_CHAINS", -"PROVIDER_GOOGLE_LOCAL_SUMMARIZATION", -"PROVIDER_GOOGLE_PRONUNCIATIONS", -"PROVIDER_GOOGLE_DUMPLING", -"PROVIDER_GOOGLE_DISTILLERY", -"PROVIDER_GOOGLE_LOCAL_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_RELATION_MINER", -"PROVIDER_GOOGLE_MAPSPAM", -"PROVIDER_GOOGLE_ROSE", -"PROVIDER_GOOGLE_LOCAL_PLACE_RATINGS", -"PROVIDER_GOOGLE_WIPEOUT", -"PROVIDER_GOOGLE_KNOWLEDGE_GRAPH", -"PROVIDER_GOOGLE_BEEGEES", -"PROVIDER_GOOGLE_REVIEW_SUMMARIZATION", -"PROVIDER_GOOGLE_OFFLINE_NON_CORE_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_GEO_WORLDMAPS", -"PROVIDER_GOOGLE_GEO_MODERATION", -"PROVIDER_GOOGLE_OYSTER_AUTO_EDITS", -"PROVIDER_GOOGLE_LOCAL_ALCHEMY", -"PROVIDER_GOOGLE_KEROUAC", -"PROVIDER_GOOGLE_MOBRANK", -"PROVIDER_GOOGLE_RAPTURE", -"PROVIDER_GOOGLE_CULTURAL_INSTITUTE", -"PROVIDER_GOOGLE_GEOCODES_FROM_LOCAL_FEEDS", -"PROVIDER_GOOGLE_ATTRIBUTES_FROM_CRAWLED_CHAINS", -"PROVIDER_GOOGLE_TACTILE_MAPS", -"PROVIDER_GOOGLE_MAPS_FOR_MOBILE", -"PROVIDER_GOOGLE_GEO_REALTIME", -"PROVIDER_GOOGLE_PROMINENT_PLACES", -"PROVIDER_GOOGLE_PLACE_ACTIONS", -"PROVIDER_GOOGLE_GT_AUTO_EDITS", -"PROVIDER_GOOGLE_WAZE", -"PROVIDER_GOOGLE_ONTHEGO", -"PROVIDER_GOOGLE_GT_IMPORT", -"PROVIDER_GOOGLE_STRUCTURED_DATA", -"PROVIDER_GOOGLE_HELICOPTER", -"PROVIDER_GOOGLE_ROLLBACK", -"PROVIDER_GOOGLE_RIGHTS_REPAIR", -"PROVIDER_GOOGLE_PERFUME", -"PROVIDER_GOOGLE_MAPS_TRANSLATION", -"PROVIDER_GOOGLE_CALL_ME_MAYBE", -"PROVIDER_GOOGLE_LOCAL_UNIVERSAL", -"PROVIDER_GOOGLE_CROUPIER", -"PROVIDER_GOOGLE_SKYSMART", -"PROVIDER_GOOGLE_RIDDLER", -"PROVIDER_GOOGLE_ROADCLOSURES", -"PROVIDER_GOOGLE_SPORE", -"PROVIDER_GOOGLE_LOCALIZATION", -"PROVIDER_GOOGLE_CATTERMS", -"PROVIDER_GOOGLE_GT_FIELD_OPS", -"PROVIDER_GOOGLE_MATCHMAKER", -"PROVIDER_GOOGLE_ARBITRATION", -"PROVIDER_GOOGLE_BIZBUILDER_OPS", -"PROVIDER_GOOGLE_LOCAL_INVENTORY_ADS", -"PROVIDER_GOOGLE_GT_DRAFTY", -"PROVIDER_GOOGLE_HOTELADS_OPS", -"PROVIDER_GOOGLE_MARKERS", -"PROVIDER_GOOGLE_STATE_MACHINE", -"PROVIDER_GOOGLE_ATTRIBUTES_INFERENCE", -"PROVIDER_GOOGLE_BIKESHARE", -"PROVIDER_GOOGLE_GHOSTWRITER", -"PROVIDER_GOOGLE_EDIT_PLATFORM", -"PROVIDER_GOOGLE_BLUE_GINGER", -"PROVIDER_GOOGLE_GEO_TIGER", -"PROVIDER_GOOGLE_HYADES", -"PROVIDER_GOOGLE_WEBQUARRY", -"PROVIDER_GOOGLE_GEO_MADDEN", -"PROVIDER_GOOGLE_ANDROID_PAY", -"PROVIDER_GOOGLE_OPENING_HOURS_TEAM", -"PROVIDER_GOOGLE_LOCAL_DISCOVERY", -"PROVIDER_GOOGLE_LOCAL_HEALTH", -"PROVIDER_GOOGLE_UGC_MAPS", -"PROVIDER_GOOGLE_FIBER", -"PROVIDER_GOOGLE_REVGEO", -"PROVIDER_GOOGLE_HOTELADS_PARTNER_FRONT_END", -"PROVIDER_GOOGLE_GEO_UGC_TASKS", -"PROVIDER_GOOGLE_GEOCODING", -"PROVIDER_GOOGLE_SPYGLASS", -"PROVIDER_GOOGLE_PLUS_CODES_AS_ADDRESSES", -"PROVIDER_GOOGLE_GEO_CHANGES", -"PROVIDER_GOOGLE_HUME", -"PROVIDER_GOOGLE_MEGAMIND", -"PROVIDER_GOOGLE_GT_ROADSYNTH", -"PROVIDER_GOOGLE_FIREBOLT", -"PROVIDER_GOOGLE_LOCAL_PLACE_OFFERINGS", -"PROVIDER_GOOGLE_UGC_SERVICES", -"PROVIDER_GOOGLE_GEOALIGN", -"PROVIDER_GOOGLE_GT_COMPOUNDS", -"PROVIDER_GOOGLE_FOOD_ORDERING", -"PROVIDER_GOOGLE_HOTEL_KNOWLEDGE_OPS", -"PROVIDER_GOOGLE_URAW", -"PROVIDER_GOOGLE_FLYEYE", -"PROVIDER_GOOGLE_YOUKE", -"PROVIDER_GOOGLE_GT_ZEPHYR", -"PROVIDER_GOOGLE_USER_SAFETY", -"PROVIDER_GOOGLE_ADDRESS_MAKER", -"PROVIDER_GOOGLE_UGC_PHOTOS", -"PROVIDER_GOOGLE_GT_WINDCHIME", -"PROVIDER_GOOGLE_SNAG_FIXER", -"PROVIDER_GOOGLE_GEO_DEALS", -"PROVIDER_GOOGLE_LOCAL_PLACE_TOPICS", -"PROVIDER_GOOGLE_PROPERTY_INSIGHTS", -"PROVIDER_GOOGLE_GEO_CONSUMER_MERCHANT_EXPERIMENTS", -"PROVIDER_GOOGLE_GEO_PORTKEY", -"PROVIDER_GOOGLE_ROAD_MAPPER", -"PROVIDER_GOOGLE_LOCATION_PLATFORM", -"PROVIDER_GOOGLE_POSTTRIP", -"PROVIDER_GOOGLE_TRAVEL_DESTINATION", -"PROVIDER_GOOGLE_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_BIZBUILDER_CLEANUP", -"PROVIDER_GOOGLE_USER", -"PROVIDER_GOOGLE_STATION", -"PROVIDER_GOOGLE_GEO_FOOD", -"PROVIDER_GOOGLE_GEO_AR", -"PROVIDER_GOOGLE_GEO_TEMPORAL", -"PROVIDER_GOOGLE_SERVICES_MARKETPLACE", -"PROVIDER_GOOGLE_IMT_CLEANUP", -"PROVIDER_GOOGLE_GEO_FOOD_MENU", -"PROVIDER_GOOGLE_CARENAV", -"PROVIDER_GOOGLE_DRIVING_FEEDS", -"PROVIDER_GOOGLE_DRIVING_UGC", -"PROVIDER_GOOGLE_POLAR", -"PROVIDER_GOOGLE_TRIWILD", -"PROVIDER_GOOGLE_CROWD_COMPUTE_OPS", -"PROVIDER_GOOGLE_SA_FROM_WEB", -"PROVIDER_GOOGLE_POI_ALIGNMENT", -"PROVIDER_GOOGLE_SA_FROM_HULK", -"PROVIDER_GOOGLE_SERVICES_INTERACTIONS", -"PROVIDER_GOOGLE_ROADS_UGC_EDITOR", -"PROVIDER_GOOGLE_SA_FROM_NG_INFERENCE", -"PROVIDER_GOOGLE_GEO_DRIVING_VIZ", -"PROVIDER_GOOGLE_GEO_TASKING", -"PROVIDER_GOOGLE_CROWDTASK_DATACOMPUTE", -"PROVIDER_GOOGLE_CROWDTASK_TASKADS", -"PROVIDER_GOOGLE_CROWDTASK_TASKMATE", -"PROVIDER_GOOGLE_CROWDTASK_FURBALL", -"PROVIDER_GOOGLE_CROWDTASK_ADAP", -"PROVIDER_GOOGLE_GPAY", -"PROVIDER_GOOGLE_GEO_UGC_TRUSTED_USERS", -"PROVIDER_GOOGLE_THIRD_PARTY_DATA_PRODUCTION", -"PROVIDER_GOOGLE_GEOTRACKER", -"PROVIDER_GOOGLE_LOCAL_LANDMARK_INFERENCE", -"PROVIDER_GOOGLE_GEO_CLOSED_LOOP", -"PROVIDER_GOOGLE_SA_FROM_MERCHANT_POSTS", -"PROVIDER_GOOGLE_CORE_DATA_RIGHTS", -"PROVIDER_GOOGLE_SA_FROM_USER_REVIEWS", -"PROVIDER_GOOGLE_GEO_CONTENT_FIXER", -"PROVIDER_GOOGLE_POLYGON_REFINEMENT", -"PROVIDER_GOOGLE_HANASU", -"PROVIDER_GOOGLE_FULLRIGHTS_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_FULLRIGHTS_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_ATTRIBUTION_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_SA_FROM_FOOD_MENUS", -"PROVIDER_GOOGLE_GT_CONSISTENCY_EDITS", -"PROVIDER_GOOGLE_SA_QUALITY", -"PROVIDER_GOOGLE_GDCE_CLEANUP", -"PROVIDER_GOOGLE_UGC_QUALITY_CHAINS", -"PROVIDER_GOOGLE_ATTRIBUTES_DISCOVERY", -"PROVIDER_GOOGLE_GEO_LDE", -"PROVIDER_GOOGLE_GEO_SIGNAL_TRACKING", -"PROVIDER_GOOGLE_UGC_AGGREGATION", -"PROVIDER_GOOGLE_3D_BASEMAP", -"PROVIDER_GOOGLE_MAPFACTS_PRIVACY", -"PROVIDER_GOOGLE_GT_ALF", -"PROVIDER_GOOGLE_GT_OPERATOR_PROVENANCE", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS", -"PROVIDER_GOOGLE_GT_LANE_AUTOMATION", -"PROVIDER_GOOGLE_GEO_NG_LOCAL", -"PROVIDER_GOOGLE_MAPFACTS_CLEANUP", -"PROVIDER_GOOGLE_THIRD_PARTY_UGC", -"PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", -"PROVIDER_GOOGLE_VACATION_RENTAL_PARTNERS", -"PROVIDER_GOOGLE_FEED_PROCESSOR_ROAD_INCIDENTS", -"PROVIDER_GOOGLE_DYNAMIC_BASEMAP", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS_EMEA", -"PROVIDER_GOOGLE_LOCALSEARCH", -"PROVIDER_GOOGLE_TRANSIT", -"PROVIDER_GOOGLE_GEOWIKI", -"PROVIDER_GOOGLE_CHINA_LOCAL_TEAM", -"PROVIDER_GOOGLE_SYNTHESIZED", -"PROVIDER_GOOGLE_INTERNAL_TEST", -"PROVIDER_GOOGLE_DISPUTED_AREAS", -"PROVIDER_GOOGLE_3DWAREHOUSE", -"PROVIDER_GOOGLE_GROUNDS_BUILDER", -"PROVIDER_GOOGLE_SESAME", -"PROVIDER_GOOGLE_GT", -"PROVIDER_GOOGLE_GT_BASEMAP_UPLOAD", -"PROVIDER_GOOGLE_ADSDB", -"PROVIDER_GOOGLE_MACHINE_TRANSLITERATION", -"PROVIDER_GOOGLE_TRAVELSEARCH", -"PROVIDER_GOOGLE_PANORAMIO", -"PROVIDER_GOOGLE_YOUTUBE", -"PROVIDER_GOOGLE_OLD", -"PROVIDER_GOOGLE_STREETVIEW", -"PROVIDER_GOOGLE_STREETVIEW_BIZVIEW", -"PROVIDER_GOOGLE_ZIPIT", -"PROVIDER_GOOGLE_OYSTER_CONNECT_ROUTES", -"PROVIDER_GOOGLE_GOLDEN", -"PROVIDER_GOOGLE_INNERSPACE", -"PROVIDER_GOOGLE_MAPSEARCH", -"PROVIDER_GOOGLE_CATEGORIES_TEAM", -"PROVIDER_GOOGLE_CROWDSENSUS", -"PROVIDER_GOOGLE_LOCAL_ALGORITHMIC_IDENTITY", -"PROVIDER_GOOGLE_FREEBASE", -"PROVIDER_GOOGLE_HOTELADS", -"PROVIDER_GOOGLE_AUTHORITY_PAGES", -"PROVIDER_GOOGLE_PLACES_API", -"PROVIDER_GOOGLE_NAMEHEATMAP", -"PROVIDER_GOOGLE_MAPMAKER", -"PROVIDER_GOOGLE_MAPMAKER_MOBILE", -"PROVIDER_GOOGLE_MAPMAKER_PANCAKE", -"PROVIDER_GOOGLE_MAPMAKER_V2", -"PROVIDER_GOOGLE_LOCAL_CLUSTERING_OPERATOR_OVERRIDE", -"PROVIDER_GOOGLE_SERVED_ON_MAPMAKER", -"PROVIDER_GOOGLE_GT_LOCAL", -"PROVIDER_GOOGLE_GT_LOCAL_WITH_RIGHTS", -"PROVIDER_GOOGLE_LOGS_RANKING_SIGNALS", -"PROVIDER_GOOGLE_ENTITY_NAVBOOST", -"PROVIDER_GOOGLE_RELATED_PLACES", -"PROVIDER_GOOGLE_KNOWN_FOR_TERMS", -"PROVIDER_GOOGLE_SYNTHETIC_AREAS", -"PROVIDER_GOOGLE_AUTHORITY_PAGE_PHOTOS", -"PROVIDER_GOOGLE_CROSS_STREETS", -"PROVIDER_GOOGLE_CORRIDORS", -"PROVIDER_GOOGLE_BICYCLE_RENTAL", -"PROVIDER_GOOGLE_CONCRETE_URLS", -"PROVIDER_GOOGLE_LEANBACK", -"PROVIDER_GOOGLE_LOCKED_LISTINGS", -"PROVIDER_GOOGLE_MONITORING", -"PROVIDER_GOOGLE_SPROUT", -"PROVIDER_GOOGLE_LOCAL_SEARCH_QUALITY", -"PROVIDER_GOOGLE_GOBY", -"PROVIDER_GOOGLE_PROBLEM_REPORT", -"PROVIDER_GOOGLE_CANDID", -"PROVIDER_GOOGLE_BIZBUILDER", -"PROVIDER_AUTOMOTIVE_NAVIGATION_DATA", -"PROVIDER_MAPDATA_SCIENCES", -"PROVIDER_MAPONICS", -"PROVIDER_SKI_RESORTS", -"PROVIDER_ZENRIN", -"PROVIDER_SANBORN", -"PROVIDER_URBAN_MAPPING", -"PROVIDER_US_GOVERNMENT", -"PROVIDER_US_CENSUS", -"PROVIDER_US_POSTAL_SERVICE", -"PROVIDER_US_GEOLOGICAL_SURVEY", -"PROVIDER_US_GNIS", -"PROVIDER_US_LANDSAT", -"PROVIDER_US_NATIONAL_GEOSPATIAL_INTELLIGENCE_AGENCY", -"PROVIDER_US_NGA_GNS", -"PROVIDER_US_SSIBL", -"PROVIDER_US_BUREAU_OF_TRANSPORTATION_STATISTICS", -"PROVIDER_US_NATIONAL_OCEANIC_AND_ATMOSPHERIC_ADMINISTRATION", -"PROVIDER_US_POLAR_GEOSPATIAL_CENTER", -"PROVIDER_US_DEPARTMENT_OF_AGRICULTURE", -"PROVIDER_US_NPI_REGISTRY", -"PROVIDER_US_BUREAU_OF_INDIAN_AFFAIRS", -"PROVIDER_DMTI_SPATIAL", -"PROVIDER_INTERNATIONAL_HYDROGRAPHIC_ORGANIZATION", -"PROVIDER_MAPLINK", -"PROVIDER_KINGWAY", -"PROVIDER_GEOCENTRE", -"PROVIDER_CN_NATIONAL_FOUNDAMENTAL_GIS", -"PROVIDER_CN_MAPABC", -"PROVIDER_SMITHSONIAN_INSTITUTE", -"PROVIDER_TRACKS_FOR_AFRICA", -"PROVIDER_PPWK", -"PROVIDER_LEADDOG", -"PROVIDER_CENTRE_DONNEES_ASTRONOMIQUES_STRASBOURG", -"PROVIDER_GISRAEL", -"PROVIDER_BASARSOFT", -"PROVIDER_MAPINFO", -"PROVIDER_MAPIT", -"PROVIDER_GEOBASE", -"PROVIDER_ORION", -"PROVIDER_CENTRAL_EUROPEAN_DATA_AGENCY", -"PROVIDER_ANASAT", -"PROVIDER_MINED_POSTCODES", -"PROVIDER_DMAPAS", -"PROVIDER_COMMON_LOCALE_DATA_REPOSITORY", -"PROVIDER_CH_SBB", -"PROVIDER_SKENERGY", -"PROVIDER_GBRMPA", -"PROVIDER_KOREA_POST", -"PROVIDER_CN_AUTONAVI", -"PROVIDER_MINED_POI", -"PROVIDER_ML_INFOMAP", -"PROVIDER_SNOOPER", -"PROVIDER_GEOSISTEMAS", -"PROVIDER_AFRIGIS", -"PROVIDER_TRANSNAVICOM", -"PROVIDER_EASYCONNECT", -"PROVIDER_LANTMATERIET", -"PROVIDER_LOGICA", -"PROVIDER_MAPKING", -"PROVIDER_DIANPING", -"PROVIDER_GEONAV", -"PROVIDER_HEIBONSHA", -"PROVIDER_DEUTSCHE_TELEKOM", -"PROVIDER_LINGUISTIC_DATA_CONSORTIUM", -"PROVIDER_ACXIOM", -"PROVIDER_DUN_AND_BRADSTREET", -"PROVIDER_FEDERAL_AVIATION_ADMINISTRATION", -"PROVIDER_INFOUSA", -"PROVIDER_INFOUSA_NIXIE", -"PROVIDER_THOMSON_LOCAL", -"PROVIDER_TELEFONICA_PUBLICIDAD_E_INFORMACION", -"PROVIDER_WIKIPEDIA", -"PROVIDER_INFOBEL", -"PROVIDER_MX_GOVERNMENT", -"PROVIDER_MX_NATIONAL_INSTITUTE_STATISTICS_GEOGRAPHY", -"PROVIDER_MX_SERVICIO_POSTAL_MEXICANO", -"PROVIDER_TELEGATE", -"PROVIDER_TELELISTAS", -"PROVIDER_MAPCITY", -"PROVIDER_EXPLAINER_DC", -"PROVIDER_DAIKEI", -"PROVIDER_NL_CHAMBER_OF_COMMERCE", -"PROVIDER_KOREA_INFO_SERVICE", -"PROVIDER_WIKITRAVEL", -"PROVIDER_FLICKR", -"PROVIDER_DIANCO", -"PROVIDER_VOLT_DELTA", -"PROVIDER_SG_GOVERNMENT", -"PROVIDER_SG_LAND_TRANSPORT_AUTHORITY", -"PROVIDER_MAPBAR", -"PROVIDER_LONGTU", -"PROVIDER_SA_GOVERNMENT", -"PROVIDER_SA_SAUDI_POST", -"PROVIDER_PEAKLIST", -"PROVIDER_LOCAL_BUSINESS_CENTER", -"PROVIDER_LOCAL_FEED_XML", -"PROVIDER_WEB", -"PROVIDER_RAILS_TO_TRAILS", -"PROVIDER_INDIACOM", -"PROVIDER_INFOMEDIA", -"PROVIDER_PICASA", -"PROVIDER_AT_GOVERNMENT", -"PROVIDER_AT_BUNDESAMT_FUR_EICH_UND_VERMESSUNGSWESEN", -"PROVIDER_AT_NATIONAL_TOURIST_OFFICE", -"PROVIDER_AT_AUSTRIA_POST", -"PROVIDER_NO_GOVERNMENT", -"PROVIDER_NO_NORSK_EIENDOMSINFORMASJON", -"PROVIDER_NO_POSTEN_NORGE_AS", -"PROVIDER_CH_GOVERNMENT", -"PROVIDER_CH_SWISS_POST", -"PROVIDER_CH_SWISSTOPO", -"PROVIDER_CH_SWISS_NATIONAL_PARK", -"PROVIDER_NAVIT", -"PROVIDER_GEOSEARCH", -"PROVIDER_DE_GOVERNMENT", -"PROVIDER_BUNDESAMT_KARTOGRAPHIE_UND_GEODASIE", -"PROVIDER_BUNDESNETZAGENTUR", -"PROVIDER_SCHOBER_GROUP", -"PROVIDER_MIREO", -"PROVIDER_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WEBSTER_TEXAS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_AMHERST_MASSACHUSETTS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BLOOMINGTON_INDIANA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PASADENA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_CHULA_VISTA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_TEMPE_ARIZONA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_COLUMBUS_OHIO", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PORTAGE_MICHIGAN", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GEORGETOWN_KENTUCKY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GREENVILLE_SOUTH_CAROLINA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_NASHVILLE_TENNESSEE", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WASHINGTON_DISTRICT_OF_COLUMBIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BOULDER_COLORADO", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY_ENVIRONMENT_BAY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY_BIELSKO_BIALA", -"PROVIDER_DE_PUBLIC_MUNICIPALITY", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_FRANKFURT", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_HAMBURG", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_KARLSRUHE", -"PROVIDER_PT_PUBLIC_MUNICIPALITY", -"PROVIDER_PT_PUBLIC_MUNICIPALITY_SANTA_CRUZ", -"PROVIDER_AT_PUBLIC_MUNICIPALITY", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_KLAGENFURT", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_LINZ", -"PROVIDER_ES_PUBLIC_MUNICIPALITY", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_AZKOITIA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_BEASAIN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_GIRONA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_SAN_SEBASTIAN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_CATALUNYA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_HONDARRIBIA", -"PROVIDER_AU_PUBLIC_MUNICIPALITY", -"PROVIDER_AU_PUBLIC_MUNICIPALITY_LAUNCESTON_TASMANIA", -"PROVIDER_IS_PUBLIC_MUNICIPALITY", -"PROVIDER_IS_PUBLIC_MUNICIPALITY_REYKJAVIK", -"PROVIDER_NL_PUBLIC_MUNICIPALITY", -"PROVIDER_NL_PUBLIC_MUNICIPALITY_AMELSTEVEEN", -"PROVIDER_BE_PUBLIC_MUNICIPALITY", -"PROVIDER_BE_PUBLIC_MUNICIPALITY_ANTWERPEN", -"PROVIDER_CA_PUBLIC_MUNICIPALITY", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_FREDERICTON_NEW_BRUNSWICK", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_KAMLOOPS_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_NANAIMO_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_BANFF_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_CALGARY_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_TORONTO_ONTARIO", -"PROVIDER_SE_PUBLIC_MUNICIPALITY", -"PROVIDER_SE_PUBLIC_MUNICIPALITY_UMEA", -"PROVIDER_UA_PUBLIC_MUNICIPALITY", -"PROVIDER_UA_PUBLIC_MUNICIPALITY_KHARKIV", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY_AQUA_CALIENTE_CAHUILLA_INDIANS", -"PROVIDER_FR_PUBLIC_MUNICIPALITY", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_PONT_AUDEMER", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_BORDEAUX", -"PROVIDER_SG_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY_RIO_DE_JANEIRO", -"PROVIDER_MAPCUBE", -"PROVIDER_3D_REALITYMAPS", -"PROVIDER_DEUTSCHES_ZENTRUM_FUR_LUFT_UND_RAUMFAHRT", -"PROVIDER_3D_CITIES_SOCIEDADE_ANONIMA", -"PROVIDER_DISNEY", -"PROVIDER_CYBERCITY", -"PROVIDER_PRECISION_LIGHTWORKS_MODELWORKS", -"PROVIDER_VIRTUAL_HUNGARY_LIMITED", -"PROVIDER_VIRTUEL_CITY", -"PROVIDER_SCREAMPOINT_INTERNATIONAL", -"PROVIDER_AGENTSCHAP_VOOR_GEOGRAFISCHE_INFORMATIE_VLAANDEREN", -"PROVIDER_FR_GOVERNMENT", -"PROVIDER_FR_INSTITUT_GEOGRAPHIQUE_NATIONAL", -"PROVIDER_FR_CADASTRE", -"PROVIDER_DIADIEM", -"PROVIDER_THE_WEATHER_CHANNEL", -"PROVIDER_COWI", -"PROVIDER_FALKPLAN_ANDES", -"PROVIDER_NL_GOVERNMENT", -"PROVIDER_NL_KADASTER", -"PROVIDER_NL_BOARD_OF_TOURISM_AND_CONVENTIONS", -"PROVIDER_DIGITAL_MAP_PRODUCTS", -"PROVIDER_SILICE_DIGITAL", -"PROVIDER_TYDAC", -"PROVIDER_ALBRECHT_GOLF", -"PROVIDER_HEALTH_CH", -"PROVIDER_VISITDENMARK", -"PROVIDER_FLYHERE", -"PROVIDER_DIGITAL_DATA_SERVICES", -"PROVIDER_MECOMO", -"PROVIDER_ZA_GOVERNMENT", -"PROVIDER_ZA_RURAL_DEVELOPMENT_LAND_REFORM", -"PROVIDER_SENSIS", -"PROVIDER_JJCONNECT", -"PROVIDER_OPPLYSNINGEN", -"PROVIDER_TELLUS", -"PROVIDER_IQONIA", -"PROVIDER_BE_GOVERNMENT", -"PROVIDER_BE_NATIONAAL_GEOGRAFISCH_INSTITUUT", -"PROVIDER_BE_BRUSSELS_MOBILITY", -"PROVIDER_YELLOWMAP_AG", -"PROVIDER_STIFTUNG_GESUNDHEIT", -"PROVIDER_GIATA", -"PROVIDER_SANPARKS", -"PROVIDER_CENTRE_DINFORMATIQUE_POUR_LA_REGION_BRUXELLOISE", -"PROVIDER_INFOPORTUGAL", -"PROVIDER_NEGOCIOS_DE_TELECOMUNICACOES_E_SISTEMAS_DE_INFORMACAO", -"PROVIDER_COLLINS_BARTHOLOMEW", -"PROVIDER_PROTECT_PLANET_OCEAN", -"PROVIDER_KARTTAKESKUS", -"PROVIDER_FI_GOVERNMENT", -"PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION", -"PROVIDER_FI_NATIONAL_LAND_SURVEY", -"PROVIDER_FI_STATISTICS_FINLAND", -"PROVIDER_GB_GOVERNMENT", -"PROVIDER_GB_ORDNANCE_SURVEY", -"PROVIDER_NATURAL_ENGLAND", -"PROVIDER_WELSH_GOVERNMENT", -"PROVIDER_GB_OFFICE_FOR_NATIONAL_STATISTICS", -"PROVIDER_EPSILON", -"PROVIDER_PARTNER_FRONT_END", -"PROVIDER_CARTESIA", -"PROVIDER_SE_GOVERNMENT", -"PROVIDER_SE_TRAFIKVERKET", -"PROVIDER_SE_NATURVARDSVERKET", -"PROVIDER_IE_GOVERNMENT", -"PROVIDER_IE_ORDNANCE_SURVEY_IRELAND", -"PROVIDER_LU_GOVERNMENT", -"PROVIDER_LU_P_AND_T_LUXEMBOURG", -"PROVIDER_LU_ADMINISTRATION_DU_CADASTRE_ET_DE_LA_TOPOGRAPHIE", -"PROVIDER_LU_NATIONAL_TOURIST_OFFICE", -"PROVIDER_MAPFLOW", -"PROVIDER_TKARTOR", -"PROVIDER_JUMPSTART", -"PROVIDER_EPTISA", -"PROVIDER_MC_GOVERNMENT", -"PROVIDER_MC_PRINCIPAUTE_DE_MONACO", -"PROVIDER_MONOLIT", -"PROVIDER_ENVIRONMENTAL_SYSTEMS_RESEARCH_INSTITUTE", -"PROVIDER_MODIS", -"PROVIDER_GEOX", -"PROVIDER_GEODIRECTORY", -"PROVIDER_GEOPLAN", -"PROVIDER_INFODIREKT", -"PROVIDER_GEOGLOBAL", -"PROVIDER_DEUTSCHE_POST", -"PROVIDER_TRACASA", -"PROVIDER_CORREOS", -"PROVIDER_ES_GOVERNMENT", -"PROVIDER_ES_CENTRO_NACIONAL_DE_INFORMACION_GEOGRAFICA", -"PROVIDER_EDIMAP", -"PROVIDER_VERIZON", -"PROVIDER_NATIONAL_GEOGRAPHIC_MAPS", -"PROVIDER_PROMAPS", -"PROVIDER_CONSODATA", -"PROVIDER_DE_AGOSTINI", -"PROVIDER_FEDERPARCHI", -"PROVIDER_NAVIGO", -"PROVIDER_ITALIAMAPPE", -"PROVIDER_CZECOT", -"PROVIDER_NATURAL_EARTH", -"PROVIDER_REGIO", -"PROVIDER_SHIPWRECK_CENTRAL", -"PROVIDER_RUTGERS_STATE_UNIVERSITY", -"PROVIDER_TWINICE", -"PROVIDER_NORTHERN_IRELAND_TOURIST_BOARD", -"PROVIDER_INFOGROUP", -"PROVIDER_TNET", -"PROVIDER_CTT_CORREIOS_DE_PORTUGAL", -"PROVIDER_EUROPARC", -"PROVIDER_IUPPITER", -"PROVIDER_MICHAEL_BAUER_INTERNATIONAL", -"PROVIDER_LEPTON", -"PROVIDER_MAPPOINT", -"PROVIDER_GEODATA", -"PROVIDER_RU_GOVERNMENT", -"PROVIDER_RU_FNS_KLADR", -"PROVIDER_BR_GOVERNMENT", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DO_MEIO_AMBIENTE_E_DOS_RECURSOS_NATURAIS_RENOVAVEIS", -"PROVIDER_BR_MINISTERIO_DO_MEIO_AMBIENTE", -"PROVIDER_BR_AGENCIA_NACIONAL_DE_AGUAS", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DE_GEOGRAFIA_E_ESTATISTICA", -"PROVIDER_BR_FUNDACAO_NACIONAL_DO_INDIO", -"PROVIDER_BR_DEPARTAMENTO_NACIONAL_DE_INFRAESTRUTURA_DE_TRANSPORTES", -"PROVIDER_AZAVEA", -"PROVIDER_NORTHSTAR", -"PROVIDER_COMMEDI", -"PROVIDER_NEXUS_GEOGRAFICS", -"PROVIDER_INFOERA", -"PROVIDER_AD_GOVERNMENT", -"PROVIDER_AD_AREA_DE_CARTOGRAFIA", -"PROVIDER_MAXXIMA", -"PROVIDER_SI_GOVERNMENT", -"PROVIDER_SI_AGENCY_FOR_ENVIRONMENT", -"PROVIDER_TRANSPORT_HI_TECH_CONSULTANTS", -"PROVIDER_L1_TECHNOLOGIES", -"PROVIDER_TELEMEDIA", -"PROVIDER_CDCOM_PROGOROD", -"PROVIDER_MIT_CITYGUIDE", -"PROVIDER_SUNCART", -"PROVIDER_MICROMAPPER", -"PROVIDER_RICHI", -"PROVIDER_FORUM44", -"PROVIDER_SEAT", -"PROVIDER_VALASSIS", -"PROVIDER_NAVICOM", -"PROVIDER_COLTRACK", -"PROVIDER_PSMA_AUSTRALIA", -"PROVIDER_PT_DUTA_ASTAKONA_GIRINDA", -"PROVIDER_CA_GOVERNMENT", -"PROVIDER_STATISTICS_CANADA", -"PROVIDER_TOCTOC", -"PROVIDER_RMSI", -"PROVIDER_TRUE_TECHNOLOGY", -"PROVIDER_INCREMENT_P_CORPORATION", -"PROVIDER_GOJAVAS", -"PROVIDER_GEOINFORMATION_GROUP", -"PROVIDER_CYBERSOFT", -"PROVIDER_TSENTR_EFFEKTIVNYKH_TEKHNOLOGIY", -"PROVIDER_EE_GOVERNMENT", -"PROVIDER_EE_MAA_AMET", -"PROVIDER_GASBUDDY", -"PROVIDER_DK_GOVERNMENT", -"PROVIDER_DK_GEODATASTYRELSEN", -"PROVIDER_MURCIA_REGION_GOVERNMENT", -"PROVIDER_CORREIOS", -"PROVIDER_WEST_WORLD_MEDIA", -"PROVIDER_INTERNATIONAL_MAPPING_ASSOCIATION", -"PROVIDER_MEDICARE", -"PROVIDER_POLARIS", -"PROVIDER_TW_GOVERNMENT", -"PROVIDER_TW_MINISTRY_OF_THE_INTERIOR_SURVEYING_AND_MAPPING_CENTER", -"PROVIDER_NORDECA", -"PROVIDER_AFRIMAPPING", -"PROVIDER_OVERDRIVE", -"PROVIDER_PROVIDER_NETWORK_DIRECTORIES", -"PROVIDER_BR_MINISTERIO_DA_SAUDE", -"PROVIDER_DIGITAL_EGYPT", -"PROVIDER_INRIX", -"PROVIDER_ARPINDO", -"PROVIDER_IT_GOVERNMENT", -"PROVIDER_ISTITUTO_GEOGRAFICO_MILITARE", -"PROVIDER_EAST_END_GROUP", -"PROVIDER_INGEOLAN", -"PROVIDER_SEMACONNECT", -"PROVIDER_BLINK", -"PROVIDER_EVGO", -"PROVIDER_CHARGEPOINT", -"PROVIDER_TPL_TRAKKER", -"PROVIDER_OI", -"PROVIDER_MAPARADAR", -"PROVIDER_SINGAPORE_POST", -"PROVIDER_CHARGEMASTER", -"PROVIDER_TESLA", -"PROVIDER_VISICOM", -"PROVIDER_GEOLYSIS", -"PROVIDER_ZEPHEIRA", -"PROVIDER_HUBJECT", -"PROVIDER_PODPOINT", -"PROVIDER_CHARGEFOX", -"PROVIDER_KR_GOVERNMENT", -"PROVIDER_KR_MOLIT", -"PROVIDER_KR_MINISTRY_OF_THE_INTERIOR_AND_SAFETY", -"PROVIDER_CRITCHLOW", -"PROVIDER_EIFRIG", -"PROVIDER_GIREVE", -"PROVIDER_CN_NAVINFO", -"PROVIDER_JAPAN_CHARGE_NETWORK", -"PROVIDER_NOBIL", -"PROVIDER_INDIA_BANKS", -"PROVIDER_INDONESIA_ELECTION_KPU", -"PROVIDER_CAREERS360", -"PROVIDER_SOURCE_LONDON", -"PROVIDER_EVBOX", -"PROVIDER_JP_GOVERNMENT", -"PROVIDER_JP_MINISTRY_OF_THE_ENVIRONMENT", -"PROVIDER_YUMYUM", -"PROVIDER_HWW_AUSTRALIA", -"PROVIDER_CINERGY", -"PROVIDER_MTIME", -"PROVIDER_KULTUNAUT", -"PROVIDER_BLITZ", -"PROVIDER_PIA", -"PROVIDER_INTERPARK", -"PROVIDER_CINEMA_ONLINE", -"PROVIDER_BELBIOS", -"PROVIDER_MOVIESEER", -"PROVIDER_SODAMEDYA", -"PROVIDER_ATMOVIES", -"PROVIDER_HOTELBEDS", -"PROVIDER_VERICRED", -"PROVIDER_CIRRANTIC", -"PROVIDER_GOGO_LABS", -"PROVIDER_ELECTRIFY_AMERICA", -"PROVIDER_CMS_MPPUF", -"PROVIDER_DIGIROAD", -"PROVIDER_KONTEX_GEOMATICS", -"PROVIDER_NZ_GOVERNMENT", -"PROVIDER_NZ_LINZ", -"PROVIDER_NZ_DOC", -"PROVIDER_FASTNED", -"PROVIDER_DESTINY_CS", -"PROVIDER_IONITY", -"PROVIDER_EV_CONNECT", -"PROVIDER_PANPAGES", -"PROVIDER_ETECNIC", -"PROVIDER_VOLTA", -"PROVIDER_NISSAN_MEXICO", -"PROVIDER_BMW_GROUP_LATIN_AMERICA", -"PROVIDER_FEDERAL_ELECTRICITY_COMMISSION_MEXICO", -"PROVIDER_VOLVO_CARS_BRASIL", -"PROVIDER_CHARGE_AND_PARKING", -"PROVIDER_DEDUCE_TECHNOLOGIES", -"PROVIDER_SK_TELECOM", -"PROVIDER_ECO_MOVEMENT", -"PROVIDER_GOOGLE_GMS", -"PROVIDER_EASYWAY", -"PROVIDER_PHYSICIAN_COMPARE", -"PROVIDER_HOSPITAL_COMPARE", -"PROVIDER_ENDOLLA_BARCELONA", -"PROVIDER_BE_CHARGE", -"PROVIDER_ONE_NETWORK", -"PROVIDER_CARENAV_DUPLEX", -"PROVIDER_CARENAV_POI", -"PROVIDER_IN_GOVERNMENT", -"PROVIDER_SURVEY_OF_INDIA", -"PROVIDER_E_ON", -"PROVIDER_ELECTRIFY_CANADA", -"PROVIDER_GRIDCARS", -"PROVIDER_DRIVECO", -"PROVIDER_GREEN_ACTION_STUDIOS", -"PROVIDER_GREEN_ACTION_STUDIO", -"PROVIDER_EVINY", -"PROVIDER_MASTERCARD", -"PROVIDER_VATTENFALL", -"PROVIDER_VIETGIS", -"PROVIDER_UNITE", -"PROVIDER_NEOGY", -"PROVIDER_AMPUP", -"PROVIDER_LOOP", -"PROVIDER_ZEST", -"PROVIDER_EZVOLT", -"PROVIDER_JOLT", -"PROVIDER_CHARGESMITH", -"PROVIDER_PLUGO", -"PROVIDER_ELECTRIC_ERA", -"PROVIDER_FLO", -"PROVIDER_DIGITAL_CHARGING_SOLUTIONS", -"PROVIDER_ELECTRIC_PE" -], -"enumDeprecated": [ -false, -false, -true, -false, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -true, -false, -true, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -true, -true, -false, -true, -false, -true, -true, -false, -true, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -false, -false, -false, -true, -false, -true, -false, -true, -true, -true, -false, -true, -true, -true, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"ABSTRACT The root of all provider types. This should never be present on an actual feature, but can be useful when calling InCategory.", -"not actually a legal value, used as sentinel", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730C2", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This is an internal *only* provider meant for sending wipeout requests to mapfacts.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Never rely on source infos with this provider to identify whether or not a feature is a Prominent Place! Instead, use the proper API, localsearch_clustering::QualityTierHelper::IsProminentPlace().", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Shopping Attributes Discovery", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"", -"The next new \"Google\" provider entries should be placed above.", -"UMBRELLA", -"", -"", -"", -"This is a testing provider for teams that wish to integrate with components of the Geo Data Infrastructure that require a valid provider. No production data should ever be sent using this provider.", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"0x1117F must not be used, since its range extends the PROVIDER_GOOGLE hierarchy.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Deprecated in favor of PROVIDER_GOOGLE_STRUCTURED_DATA (for attributes) and PROVIDER_GOOGLE_GEO_TIGER (for categories).", -"", -"0x1117FF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. aka Local AI.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"No data is obtained from this provider. It is only used to identify features that must be served on MapMaker.", -"", -"", -"", -"", -"", -"", -"0x1117FFF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. Synthetically generated areas (sublocalities/neighborhoods/ postal codes/etc) based on dropped terms from approximate geocoding. More info on go/syntheticareas.", -"", -"", -"", -"Similar to Google Transit, a provider that aggregates positions of bicycle rental points that we have agreements with to show on maps", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This range has been depleted. For new Ids see PROVIDER_GOOGLE_SUBRANGE above.", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"Small Scale International Boundary Lines", -"", -"NOAA", -"PGC", -"USDA", -"National Provider Identifier Registry", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"old name for PROVIDER_NAVIGO", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"See b/33687395", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"ABSTRACT", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"IBAMA", -"MMA", -"ANA", -"IBGE", -"FUNAI", -"DNIT", -"", -"", -"", -"", -"", -"ABSTRACT", -"Department of Cartography", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"0x12 and 0x120 are not to be used. OOO CET", -"ABSTRACT", -"Estonian Land Board", -"", -"ABSTRACT", -"Danish Geodata Agency", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"Ministry of land, infrastructure and transport, \uad6d\ud1a0\uad50\ud1b5\ubd80, Guktogyotongbu", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Use PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION.", -"", -"ABSTRACT", -"Land Information New Zealand", -"NZ Department of Conservation", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Note: Next available value is 0x1275." -], -"type": "string" -}, -"sourceDataset": { -"deprecated": true, -"type": "string" -} -}, -"type": "object" -}, -"GeostoreOpeningHoursProto": { -"description": "Message to represent opening hours including regular weekly hours and a set of exceptions.", -"id": "GeostoreOpeningHoursProto", -"properties": { -"exception": { -"description": "Date delimited exceptions to the typical recurring opening hours. May only be present if regular weekly hours are also specified.", -"items": { -"$ref": "GeostoreExceptionalHoursProto" -}, -"type": "array" -}, -"regularHours": { -"$ref": "GeostoreBusinessHoursProto", -"description": "Typical recurring opening hours, expressed as a weekly schedule. NOTE: this field was introduced to have a more client-friendly format for representing weekly hours but, as of November 2018, it's not used for the main opening hours of TYPE_ESTABLISHMENT features (instead, the data is stored in the `EstablishmentProto.hours` field, see b/23105782 tracking the possible schema migration). It is however used in other contexts where `OpeningHoursProto` appears in the Geo Schema. In openinghours.h there is a utility function `GetOpeningHoursFromFeature` that merges `EstablishmentProto.hours` into this proto." -} -}, -"type": "object" -}, -"GeostoreOperationsProto": { -"description": "Information about a feature's operations, e.g. when the feature is temporarily closed.", -"id": "GeostoreOperationsProto", -"properties": { -"temporaryClosure": { -"description": "Records temporary status changes of the feature, such as remodel, vacation, etc.: the feature is temporarily (but not permanently) unavailable. See go/geo-schema-reference:operational-lifecycle#temporary-closure for guidance on interpreting this data and constraints on writing it. NOTE: Order of the TemporaryClosureProtos is *not* guaranteed to be chronological.", -"items": { -"$ref": "GeostoreTemporaryClosureProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreOverrideBorderStatusProto": { -"description": "This message captures a border status override. For instance, if the TYPE_BORDER feature between China and Hong Kong must be hidden on ditu.google.cn (the Chinese domain for Google Maps) but may be displayed on other domains, we will have a country override border status for \"CN\" set to STATUS_NEVER_DISPLAY. At least one override restriction must be defined. Currently the only supported restriction is by country code.", -"id": "GeostoreOverrideBorderStatusProto", -"properties": { -"countryCode": { -"description": "The two-letter ISO 3166-1 country code corresponding to the domain this status override applies to, when rendering the border polyline.", -"type": "string" -}, -"status": { -"description": "The override status, from the BorderStatus enumeration. The value here must be different from the main status (otherwise there's no point in providing the override).", -"enum": [ -"STATUS_NORMAL", -"STATUS_DISPUTED", -"STATUS_UNSURVEYED", -"STATUS_INTERNATIONAL_WATER", -"STATUS_NEVER_DISPLAY", -"STATUS_TREATY", -"STATUS_PROVISIONAL", -"STATUS_NO_LABEL" -], -"enumDescriptions": [ -"Most border lines have this status, which typically means both parties agree on the location of the line. These might be called \"de jure\" borders.", -"This status is used when the two parties disagree on the location of the line. There can be multiple border lines in a given disputed area. One might be the line proposed by country \"A\" and another the line proposed by country \"B\". A third line might mark the de facto line of control. Other border lines might indicate historical borders, e.g., \"1949 Armistice Line\".", -"This is used for one section of border between Argentina and Chile. Both parties agree that a border line exists somewhere on the glacier, but the exact location has not been determined.", -"This is a border line between a country and international water.", -"This status is only used for the borders that should never display to users. Typically it is for the borders that we don't want to represent but that we would still like to use to model country boundaries, e.g. Hong Kong and Macau.", -"This is used for borders that refer to well-defined boundaries which have been established by a specific treaty, agreement, armistice or other such agreement between two or more parties, yet the position and/or status is not considered to be legal and final (e.g., the final demarcation of the exact position has not occurred or the final political status is not resolved).", -"This is used for borders for which a formal agreement has not been established between the parties involved, yet the existing line acts as a de facto functional border without existence of any dispute.", -"This is used for borders which should not have country labels to either side, but should otherwise be styled the same as a border with STATUS_NORMAL. This is typically for borders which are not themselves disputed but still form part of the boundary around a disputed area that is not otherwise modeled or labeled as a country, or as an override where we are obligated to remove labels for region specific versions of Maps." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostorePaintedElementLogicalColorProto": { -"description": "Painted element logical color. Most legal definitions only specify a color category (like \"yellow\") and don't specify an exact hue, rather stating that the colors must be distinguishable from each other. We refer to this as the \"logical\" color.", -"id": "GeostorePaintedElementLogicalColorProto", -"properties": { -"color": { -"enum": [ -"UNKNOWN_LOGICAL_COLOR", -"WHITE", -"YELLOW", -"RED", -"GREEN", -"BLUE", -"BLACK", -"GREY", -"ORANGE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreParkingAllowanceProto": { -"description": "Describes the parking allowances for a feature, or the situations and requirements under which one may be permitted to park, such as certain vehicle types, valet parking, and permit parking. Also describes the cost of parking, which may vary based on the time and duration parked. Includes vehicle type, any other conditions for eligibility, and the cost of parking, which may vary based on the time and duration parked. If is_discount is set to true on a given allowance, that allowance represents a discount that can be applied to lower the cost of non-discount allowances specified on feature via parking_provider_feature. In this way, allowances can be 'layered,' i.e. appended, onto each other when denormalizing references via parking_provider_feature.", -"id": "GeostoreParkingAllowanceProto", -"properties": { -"allowanceType": { -"description": "The type of parking for this allowance. Allowance details only apply to the type of parking specified.", -"enum": [ -"STANDARD", -"VALET", -"PERMIT", -"PICKUP_GOODS", -"PICKUP_PASSENGERS" -], -"enumDescriptions": [ -"Standard parking without qualifications. This is a long-term allowance.", -"This is a long-term allowance that only applies for valet parking.", -"The is a long-term allowance that applies to permit-holders. See permit_type for details.", -"This is a short-term allowance that only applies to picking up goods.", -"This is a short-term allowance that only applies to picking up passengers." -], -"type": "string" -}, -"isDiscount": { -"description": "If true, this allowance represents a discount rather than an individual rate; any rate values specified in this allowance describe a discount to be applied to the non-discount allowances in the ParkingProto.", -"type": "boolean" -}, -"minPurchaseForValidation": { -"description": "If this rate requires validation, this expresses the minimum purchase required for validation in each applicable currency. Should have an ID of /measurement_unit/money_value and consist of two properties: one with an ID of /measurement_unit/money_value/amount and a float value with the amount, and another with the ID /measurement_unit/money_value/currency and an ID value with the MID of the proper currency (from the /finance/currency type). A value of 0 suggests that no purchase is required. If empty, this suggests that no validation is required for this rate.", -"items": { -"$ref": "FreebaseTopic" -}, -"type": "array" -}, -"permitType": { -"description": "Any additional details about the permit type; e.g. \u201cZone A\u201d. In any local languages. Should only be set if allowance_type is PERMIT.", -"items": { -"$ref": "GeostoreLanguageTaggedTextProto" -}, -"type": "array" -}, -"serviceType": { -"description": "The types of services that this parking allowance applies to. For instance, some cities have streets that only allow traditional taxis to pick up passengers.", -"items": { -"enum": [ -"SERVICE_ALL", -"SERVICE_GENERAL_DRIVER", -"SERVICE_RIDESHARE", -"SERVICE_TAXI", -"SERVICE_COMMERCIAL" -], -"enumDescriptions": [ -"Signifies that this restriction is always applicable, regardless of the service type.", -"Signifies that this restriction is applicable to a general driver.", -"Signifies that this restriction is applicable to a ridesharing driver.", -"Signifies that this restriction is applicable to a traditional taxi driver.", -"Signifies that this restriction is applicable to a commercial vehicle. This generally applies to vehicles used for carrying goods or fare-paying passengers. However, the exact definition of a commercial vehicle can vary by locality." -], -"type": "string" -}, -"type": "array" -}, -"timeBasedRate": { -"description": "Describes the rate structures. Each TimeBasedRateProto defines a rate which may apply based on a particular arrival, departure or utilization time; for example, one rate might apply if arriving before 9am, and another might apply regardless of arrival or departure time.", -"items": { -"$ref": "GeostoreTimeBasedRateProto" -}, -"type": "array" -}, -"vehicleType": { -"description": "Restrictions on which vehicle type(s) the allowance applies to. By default, the allowance applies to any vehicle types.", -"enum": [ -"ANY", -"CAR", -"MOTORCYCLE", -"TRUCK" -], -"enumDescriptions": [ -"", -"", -"", -"Note: The exact definition of a truck varies by city, but it usually refers to vehicles with three or more axles. This value does not necessarily apply to all commercial vehicles or vehicles colloquially referred to as trucks (eg. pickup trucks)." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreParkingProto": { -"description": "Used to describe the parking facilities provided by or available to a feature. Features of TYPE_ROAD or TYPE_COMPOUND can have a ParkingProto (with parking_provider_feature unset) that describes the parking facilities provided by that feature. Features of TYPE_COMPOUND or TYPE_ESTABLISHMENT_POI can have a ParkingProto (with parking_provider_feature set) to indicate that the feature has dedicated parking and provide details about it, and defer some details of the parking facilities to the referent feature(s). ParkingProto is maintained by the Harbor Pilot team (go/harbor-pilot). Detailed modeling information is described at go/parking-schema.", -"id": "GeostoreParkingProto", -"properties": { -"allowance": { -"description": "Describes the parking allowances for the feature, which are the situations and requirements under which one is permitted to park at the feature\u2019s parking facilities, or discounts that a user may be eligible for.", -"items": { -"$ref": "GeostoreParkingAllowanceProto" -}, -"type": "array" -}, -"openingHours": { -"$ref": "GeostoreOpeningHoursProto", -"description": "Hours in which the parking facility is open; that is, permits both arrivals and departures of the facility. Should only be set on compounds (i.e. parking lots or garages); roads are considered to always be \u201copen,\u201d though parking at certain times may be prohibited via restrictions. If unset on a compound, this suggests we don\u2019t know the opening hours, or they are the same as the hours of the entity for which this feature offers parking facilities." -}, -"parkingAvailable": { -"description": "Indicates whether long-term parking is available at the feature; if true, long-term parking is available at the feature and parking allowances may be present on this feature, or parking_provider_feature may indicate deferred parking feature(s). If false, this is an explicit statement that there is no long-term parking associated with this feature. If unset, we don't know whether there is long-term parking associated with this feature. If false or unset, only additional restrictions or short-term allowances will be populated.", -"type": "boolean" -}, -"parkingProviderFeature": { -"description": "If empty, indicates that the feature containing this ParkingProto provides parking facilities, which are described by this proto. If nonempty, indicates that the feature with this ParkingProto does not contain parking facilities itself, but visitors of this feature may use the parking available to the referent feature(s). The referent feature(s) may themselves contain parking facilities or defer to other features. A ParkingProto may defer parking details to another feature, but still include its own data. This suggests that a visitor of the referrer feature is eligible for different rates or discounts. The data in these fields applies transitively, and any fields in a referrer may be applied to the referent (for a visitor of the referrer).", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"restriction": { -"description": "Describes any parking restrictions that apply to this feature. Should only be set on road segments for which parking is explicitly prohibited for some or all times; for roads which do not prohibit parking and for all other facilities, the ability to park should be expressed using allowances. In the instance that both a restriction and an allowance applies at a given time, restrictions always have precedence over the same parking allowances. However, explicit short-term allowances (PICKUP_GOODS, PICKUP_PASSENGERS) take precedence over general NO_PARKING, NO_STANDING, or NO_STOPPING restrictions.", -"items": { -"$ref": "GeostoreParkingRestrictionProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreParkingRestrictionProto": { -"description": "Expresses a parking restriction on a road; i.e. times at which parking on the road is prohibited.", -"id": "GeostoreParkingRestrictionProto", -"properties": { -"restrictedHours": { -"$ref": "GeostoreTimeScheduleProto", -"description": "Times at which parking is prohibited." -}, -"restrictionType": { -"description": "clang-format on The type of restriction that applies at this time.", -"enum": [ -"RESTRICTION_UNKNOWN", -"RESTRICTION_PARKING", -"RESTRICTION_STANDING", -"RESTRICTION_STOPPING", -"RESTRICTION_PICKUP_GOODS", -"RESTRICTION_PICKUP_PASSENGERS" -], -"enumDescriptions": [ -"Default value. Should never be set in practice.", -"Parking is not allowed. This is a long-term restriction.", -"No standing is allowed. This means a vehicle cannot wait or stop to load/unload packages. A vehicle may stop to drop-off passengers or to pick up passengers already waiting at the location. This is a short-term restriction.", -"A vehicle may not stop at this location at any time. This is a short-term restriction.", -"A vehicle may not stop here to pick up goods. This is a short-term restriction.", -"A vehicle may not stop here to pick up passengers. This is a short-term restriction." -], -"type": "string" -}, -"serviceType": { -"description": "The types of services that this parking restriction applies to. We expect most parking restrictions to apply to all services, but some airports have specific rideshare parking or taxi parking zones.", -"items": { -"enum": [ -"SERVICE_ALL", -"SERVICE_GENERAL_DRIVER", -"SERVICE_RIDESHARE", -"SERVICE_TAXI", -"SERVICE_COMMERCIAL" -], -"enumDescriptions": [ -"Signifies that this restriction is always applicable, regardless of the service type.", -"Signifies that this restriction is applicable to a general driver.", -"Signifies that this restriction is applicable to a ridesharing driver.", -"Signifies that this restriction is applicable to a traditional taxi driver.", -"Signifies that this restriction is applicable to a commercial vehicle. This generally applies to vehicles used for carrying goods or fare-paying passengers. However, the exact definition of a commercial vehicle can vary by locality." -], -"type": "string" -}, -"type": "array" -}, -"vehicleType": { -"description": "The types of vehicles that this parking restriction applies to. For instance, some streets may allow motorcycles to park but not automobiles or trucks.", -"items": { -"enum": [ -"ANY", -"CAR", -"MOTORCYCLE", -"TRUCK" -], -"enumDescriptions": [ -"", -"", -"", -"Note: The exact definition of a truck varies by city, but it usually refers to vehicles with three or more axles. This value does not necessarily apply to all commercial vehicles or vehicles colloquially referred to as trucks (eg. pickup trucks)." -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostorePeakProto": { -"description": "This protocol buffer holds related data for features of type TYPE_PEAK and TYPE_VOLCANO.", -"id": "GeostorePeakProto", -"properties": { -"prominenceMeters": { -"description": "Topographic prominence in meters: the height of the peak\u2019s summit above the lowest contour line encircling it and no higher summit.", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostorePedestrianCrossingProto": { -"description": "A crossing describes a path from the end point of a segment to the start point of its sibling. Each individual crossing should uniquely represent a physically distinct crossing in the real world. Pedestrian crossings are bidirectional. This proto represents \"simple\" crossings. More complicated crossings (such as the diagonal crosswalk at Shibuya Station in Tokyo) will be represented using a separate pedestrian network. Example 1: Standard four way crossing. Assume each road (A/B/C) has a crosswalk (denoted by a '-') right before intersection X. Assume crossing at D is prohibited (denoted by a '%'). Assume \"prime\" roads (A',B',...) are OUT segments relative to X. A\\\\A' B//B' \\\\ // \\\\ // - - X - % // \\\\ // \\\\ C'//C D'\\\\D * A CROSSABLE PedestrianCrossing should be added to A, B and C. * An UNCROSSABLE PedestrianCrossing should be added to D. Example 2: Simple intersection with one crossing. Assume intersection X was added to accommodate the crosswalk (through X, denoted by '-'). Assume \"prime\" roads (A',B',...) are OUT segments relative to X. A\\\\A' \\\\ \\\\ -X- \\\\ \\\\ B'\\\\B * A CROSSABLE PedestrianCrossing must be added to either A or B, but not both because each real-world crossing should be represented exactly once. Duplicative crossings may be arbitrarily removed.", -"id": "GeostorePedestrianCrossingProto", -"properties": { -"angleDegrees": { -"description": "This value specifies the angle of the crosswalk. Zero degrees represents a crosswalk perpendicular to the direction of travel, towards the right side of the segment. The crosswalk angle, winds clockwise. Range [-90, 90]. The following crosswalk would have a 15 degree angle: / / <--/-------------------------------- / /", -"format": "double", -"type": "number" -}, -"crossAnywhere": { -"description": "This value enables crossing anywhere (not just at the segment\u2019s endpoint), typically used on long, low-traffic residential streets. This attribute is only respected for trivial segment -> sibling routes. All other routes can cross at a MapFacts intersection.", -"type": "boolean" -}, -"crossingType": { -"description": "Crossing type is used as a restriction and can also be used for rendering.", -"enum": [ -"UNKNOWN", -"CROSSABLE", -"UNMARKED_CROSSING", -"MARKED_CROSSING", -"UNCROSSABLE" -], -"enumDescriptions": [ -"RESERVED", -"", -"", -"", -"" -], -"type": "string" -}, -"offset": { -"description": "The crossing offset defines a fraction between the distance from the segment endpoint to the centerline of the crosswalk and the length of the segment. For example, the segment length is 20 meters and the distance from segment end to center of crosswalk is 2 meters, the value of offset will be 0.1.", -"format": "float", -"type": "number" -}, -"restriction": { -"description": "Restrictions for this crossing (such as constructions on the crosswalk). They must not have subpath or travel_mode.", -"items": { -"$ref": "GeostoreRestrictionProto" -}, -"type": "array" -}, -"width": { -"description": "This value defines the full width of the crossing in the direction perpendicular to the direction which pedestrians walk on the crossing (in meters). The crossing is allowed to \"spill\" into the next segment (0.5 * width can be greater than the offset). Cannot be a negative value.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostorePhysicalLineProto": { -"description": "Describes a single physical marker line. NOTE: PhysicalLineProto could be compared against one another by canonicalizing them via GetCanonicalPhysicalLine() in google3/geostore/base/internal/lane_marker.cc. Any fields that don't contribute to the definition of a physical line in the real world should be bundled with the annotative fields near the bottom and excluded in GetCanonicalPhysicalLine(). LINT.IfChange", -"id": "GeostorePhysicalLineProto", -"properties": { -"dashLengthMeters": { -"description": "Applicable for DASHED and DOTTED_DASHED lines.", -"format": "float", -"type": "number" -}, -"gapColor": { -"$ref": "GeostorePaintedElementLogicalColorProto", -"description": "This should be rarely needed, but can represent patterns of alternating colors." -}, -"gapLengthMeters": { -"description": "Applicable for DASHED, DOTTED, and DOTTED_DASHED lines.", -"format": "float", -"type": "number" -}, -"material": { -"items": { -"enum": [ -"UNKNOWN_STRIPE_MATERIAL", -"PAINT_STRIPE", -"ROUND_DOT", -"SQUARE_DOT" -], -"enumDescriptions": [ -"", -"Solid paint stripe.", -"Round dot, typically nonreflective.", -"Square dot, typically reflective." -], -"type": "string" -}, -"type": "array" -}, -"paintColor": { -"$ref": "GeostorePaintedElementLogicalColorProto", -"description": "Color for the painted elements. Applicable to all types." -}, -"pattern": { -"enum": [ -"UNKNOWN_DASH_PATTERN", -"SOLID", -"DASHED", -"DOTTED", -"DOTTED_DASHED" -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"physicalLineToken": { -"description": "A token that can be used to identify the version of the data about this marker line.", -"type": "string" -} -}, -"type": "object" -}, -"GeostorePointCurvatureProto": { -"id": "GeostorePointCurvatureProto", -"properties": { -"curvatureStatus": { -"enum": [ -"CURVATURE_UNSPECIFIED", -"CURVATURE_UNKNOWN" -], -"enumDescriptions": [ -"", -"A curvature value can be explicitly set as UNKNOWN when we do not have enough information to make a determination." -], -"type": "string" -}, -"radiansPerMeter": { -"description": "Curvature in radians per meter. Negative is a curve to the left and positive is a curve to the right.", -"format": "float", -"type": "number" -}, -"startPointFraction": { -"description": "How far along the line this curvature value starts to apply, in the format of decimal between 0 and 1.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostorePointProto": { -"id": "GeostorePointProto", -"properties": { -"latE7": { -"format": "uint32", -"type": "integer" -}, -"lngE7": { -"format": "uint32", -"type": "integer" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "NOTE: If removing metadata, remove 'option objc_class_prefix = \"GS\";' together. See cl/189921100. Field-level metadata for this point. NOTE: there are multiple PointProto fields in the Geo Schema. Metadata here is only expected to be present on FeatureProto.point[] and FeatureProto.center." -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a point. Never set in MapFacts." -} -}, -"type": "object" -}, -"GeostorePointWithHeightProto": { -"description": "PointWithHeightProto encodes lat/lng through PointProto and contains altitude information.", -"id": "GeostorePointWithHeightProto", -"properties": { -"altitudeMeters": { -"description": "Altitude of this point is assumed to be relative to the ground level.", -"format": "float", -"type": "number" -}, -"point": { -"$ref": "GeostorePointProto" -} -}, -"type": "object" -}, -"GeostorePoliticalProto": { -"description": "This protocol buffer is included from feature.proto as an optional message. Political features represent the different ways that people are divided into geographical regions. This protocol buffer is applicable only to TYPE_POLITICAL features and is used to store political information from the feature's point of view.", -"id": "GeostorePoliticalProto", -"properties": { -"capital": { -"$ref": "GeostoreFeatureIdProto", -"deprecated": true, -"description": "Many political regions have a conceptual center (capitals of a country or a top-level division are examples). If set, the target feature must be a TYPE_LOCALITY feature." -}, -"claim": { -"description": "Feature IDs of the features claimed by this feature's government that are not necessarily included in the feature's geometry.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"grossDomesticProductUsdMillions": { -"deprecated": true, -"description": "The Gross Domestic Product of the political region measured in millions of current United States dollars. It must not be negative.", -"format": "double", -"type": "number" -}, -"literacyPercent": { -"deprecated": true, -"description": "Percentage of population that are literate within a political region. It must be between 0 and 100.", -"format": "float", -"type": "number" -}, -"population": { -"deprecated": true, -"description": "The number of people in this political region. This field is intended to store accurate population, not an estimation such as representative value for population range. It must not be negative.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GeostorePolyLineProto": { -"id": "GeostorePolyLineProto", -"properties": { -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this polyline. NOTE: there are multiple PolyLineProto fields in the Geo Schema. Metadata here is only expected to be present on FeatureProto.polyline[]." -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a polyline. Never set in MapFacts." -}, -"vertex": { -"description": "A sequence of vertices connected by geodesics (the equivalent of straight lines on the sphere). Adjacent vertices are connected by the shorter of the two geodesics that connect them, i.e. all edges are 180 degrees or less. Note that the edge definition becomes numerically unstable as the arc length approaches 180 degrees. Polylines are generally expected to be non-self-intersecting, but any such restriction is imposed by the user of the polyline rather than the polyline itself.", -"items": { -"$ref": "GeostorePointProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostorePolygonProto": { -"description": "A general non-self-intersecting spherical polygon, consisting of one or more loops defining multiple disconnected regions possibly with holes. All loops should be oriented CCW around the region they define. This applies to the exterior loop(s) as well as any holes. Within MapFacts (and underlying infrastructure) the data fields may be replaced by a single shape_id stored in internal_feature. Any such PolygonProtos shouldn't be expected to work with public functions in //geostore/base/public/polygon.h.", -"id": "GeostorePolygonProto", -"properties": { -"baseMeters": { -"description": "The polygon loops above are basically flat: each point has a latitude and a longitude but no altitude. We don't want to build real 3D models here, but we do want to be able to generate 2.5D models. A 2.5D model is built by translating the flat polygon upward some distance (base) then extruding it upward some additional distance (height). The elevation of the bottom of the extruded polygon (above ground level).", -"format": "float", -"type": "number" -}, -"cellId": { -"deprecated": true, -"description": "** DEPRECATED ** This is part of a splitting strategy for large polygons, which was never fully launched and we decided not to pursue. For features with very complex polygonal geometry, we break up the polygon into pieces that align with S2 cells at various levels. We do this for performance reasons as some geometry operations have quadratic complexity with regards to the total number of vertices. In these cases, we store the S2 cell ID corresponding to the piece of the overall polygon that is described by this specific PolygonProto message. Each polygon piece is expected to be fully contained by the S2 cell corresponding to this cell ID. However, note that the S2 cell ID is not required to correspond to the smallest S2 cell that fully contains the polygon (and often won't be). In addition, polygon pieces are required to not have any overlap (which translates to having entirely disjoint S2 cell IDs, i.e. one can not be parent (or grand parent, etc.) of another).", -"format": "uint64", -"type": "string" -}, -"encoded": { -"description": "Encoding of the polygon using S2Polygon::Encode()'s compressed representation.", -"format": "byte", -"type": "string" -}, -"heightMeters": { -"description": "The distance from the bottom of the extruded polygon to the top.", -"format": "float", -"type": "number" -}, -"loop": { -"deprecated": true, -"description": "** DEPRECATED ** We have switched to using exclusively the encoded form in the wire format to and from MapFacts, so this field should never be populated there. See go/encoded-polygons for more info. \"Classic\" polygon representation, defined by one or more loops. The last vertex of each polyline is implicitly connected to the first vertex. All loops should be specified in CCW order.", -"items": { -"$ref": "GeostorePolyLineProto" -}, -"type": "array" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this polygon." -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a polygon. Never set in MapFacts." -}, -"unsuitableForDisplay": { -"description": "Some polygons are known to be rough proxies for a feature's \"real\" polygonal representation. Such polygons are generally unsuitable for display. Rendering clients should not show these in most cases. Polygons unsuitable for display do have other uses, such as user location or containment analysis, or as an input to learning algorithms. This is an orthogonal concept to FeatureProto.synthetic_geometry, which only pertains to the method by which a polygon was created, rather than its fidelity to ground truth. For features that have multiple polygons, this bit should be consistently set to the same value on all polygons.", -"type": "boolean" -} -}, -"type": "object" -}, -"GeostorePoseProto": { -"description": "A pose is an object's position in space, as well as its orientation. All fields except lat and lng are optional. All fields are in the WGS-84 ellipsoid, and rotations are right-hand rule (i.e. if the right hand thumb points along a vector, curled fingers indicate positive rotation direction). An un-rotated pose would be pointing due North, along the surface of the ellipsoid. Rotations are applied in the order: yaw, pitch, roll. Note that the rotation axes are rotated along with the model for each rotation step. WARNING: This proto is not meant to be used directly. Please use the provided libraries: //geostore/base/public/pose.h //java/com/google/geostore/base/Pose.java", -"id": "GeostorePoseProto", -"properties": { -"altitude": { -"description": "The height of the pose. A positive height is above the WGS-84 ellipsoid in meters; negative is below.", -"format": "double", -"type": "number" -}, -"index": { -"description": "The index of the PoseProto in a list of PoseProtos.", -"format": "int32", -"type": "integer" -}, -"lat": { -"description": "The latitude of the pose in degrees [-90, 90].", -"format": "double", -"type": "number" -}, -"lng": { -"description": "The longitude of the pose in degrees (-180,180].", -"format": "double", -"type": "number" -}, -"pitch": { -"description": "The rotation around the longitude line East tangent in degrees [-90, 90].", -"format": "double", -"type": "number" -}, -"roll": { -"description": "The rotation around the latitude line North tangent in degrees (-180, 180].", -"format": "double", -"type": "number" -}, -"yaw": { -"description": "The rotation around the Up vector, from North, in degrees (-180, 180].", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostorePriceInfoFoodNutritionFacts": { -"description": "This message represents nutrition facts for a food dish.", -"id": "GeostorePriceInfoFoodNutritionFacts", -"properties": { -"calories": { -"$ref": "GeostorePriceInfoFoodNutritionFactsCaloriesFact" -}, -"cholesterol": { -"$ref": "GeostorePriceInfoFoodNutritionFactsNutritionFact", -"description": "Cholesterol information for a given food dish." -}, -"protein": { -"$ref": "GeostorePriceInfoFoodNutritionFactsNutritionFact", -"description": "Protein information for a given food dish." -}, -"sodium": { -"$ref": "GeostorePriceInfoFoodNutritionFactsNutritionFact", -"description": "Sodium information for a given food dish." -}, -"totalCarbohydrate": { -"$ref": "GeostorePriceInfoFoodNutritionFactsNutritionFact", -"description": "Carbohydrate information for a given food dish." -}, -"totalFat": { -"$ref": "GeostorePriceInfoFoodNutritionFactsNutritionFact", -"description": "Fat information for a given food dish." -} -}, -"type": "object" -}, -"GeostorePriceInfoFoodNutritionFactsCaloriesFact": { -"description": "This message denotes calories information with an upper bound and lower bound range.", -"id": "GeostorePriceInfoFoodNutritionFactsCaloriesFact", -"properties": { -"lowerAmount": { -"format": "int32", -"type": "integer" -}, -"unit": { -"description": "Unit of the given calories information.", -"enum": [ -"UNDEFINED_ENERGY_UNIT", -"CALORIE", -"JOULE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"upperAmount": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostorePriceInfoFoodNutritionFactsNutritionFact": { -"description": "This message denotes nutrition information with an upper bound and lower bound range and can be represented by mass unit.", -"id": "GeostorePriceInfoFoodNutritionFactsNutritionFact", -"properties": { -"lowerAmount": { -"format": "double", -"type": "number" -}, -"unit": { -"description": "Unit of the given nutrition information.", -"enum": [ -"UNDEFINED_MASS_UNIT", -"GRAM", -"MILLIGRAM" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"upperAmount": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostorePriceInfoProto": { -"id": "GeostorePriceInfoProto", -"properties": { -"priceList": { -"description": "The actual food menus. This is a repeated field because a restaurant may offer multiple menus, e.g. for different language or for different available time, such as holidays vs non-holidays.", -"items": { -"$ref": "GeostorePriceListProto" -}, -"type": "array" -}, -"priceListUrl": { -"description": "All URLs that give price list information for this establishment. For food menus, this would represent menu_urls. Note that this field is a repeated list of UrlListProtos. Each UrlListProto instance in the list is intended to hold lists of URLs that are translations of the same URL.", -"items": { -"$ref": "GeostoreUrlListProto" -}, -"type": "array" -}, -"status": { -"$ref": "GeostorePriceInfoStatus", -"description": "Message containing metadata about the verified status of the PriceInfo. Only verified listings should be displayed." -} -}, -"type": "object" -}, -"GeostorePriceInfoStatus": { -"description": "Providers of PriceInfo (e.g. SinglePlatform, YEXT) send verified and unverified listings. PriceInfoStatus is used to encapsulate this information.", -"id": "GeostorePriceInfoStatus", -"properties": { -"isVerified": { -"type": "boolean" -} -}, -"type": "object" -}, -"GeostorePriceListNameInfoProto": { -"description": "A PriceListNameInfoProto is used by PriceListProto and fields and messages contained in it for storing names, descriptions, languages, and IDs. The name field and the description field must be in the same language, as specified by the language field. None of the fields in this proto is required, although it is not expected to have the language field set unless there is a name or description. When the language field is not set, it is understood to be the preferred language of the locale where the establishment is located. An empty string for any of the fields is not allowed (as enforced by lints).", -"id": "GeostorePriceListNameInfoProto", -"properties": { -"description": { -"type": "string" -}, -"id": { -"description": "IDs are intended to be unique identifiers of PriceInfoLists, Sections, and Menu items. This is enforced by the ID_DUPLICATE_PRICE_LIST_ID lint.", -"type": "string" -}, -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. We place extra restrictions on languages in addition to what the III library requires. See go/geo-schema-reference/feature-properties/languages.", -"type": "string" -}, -"name": { -"type": "string" -} -}, -"type": "object" -}, -"GeostorePriceListProto": { -"description": "A PriceListProto can be used to represent any type of price lists, one of which is a menu of food and drinks. It contains names and descriptions, together with its source URL list if it is extracted or attributed to that URL. The names and descriptions are represented using repeated PriceListNameInfo fields to allow versions in different languages. A PriceListProto may contain multiple sections; in the context of a food menu, this would be multiple menu sections, e.g. for breakfast, lunch, dinner, prix fixe, or dinner for two, etc. At least one menu section must be present. Each section contains a number of items; for food menus, it may be FoodMenuItems defined below. At least one item must be present in each section.", -"id": "GeostorePriceListProto", -"properties": { -"aggregatorId": { -"description": "For third party lists, represents the ID of the aggregator which provided this data. Optional.", -"format": "uint64", -"type": "string" -}, -"availableTime": { -"$ref": "GeostoreTimeScheduleProto", -"description": "The time period when this price list is available. Establishments are not required to give available_time for any given price list, however, when this field is not set, the price list is understood as available any time the establishment is open." -}, -"cuisines": { -"description": "Cuisine information if the location the price lists attached to is an eligible feature for a food menu price list. Cuisine information should also only show up in a food price list.", -"items": { -"enum": [ -"CUISINE_UNDEFINED", -"FAST_FOOD", -"AMERICAN", -"JAPANESE", -"BREAK_FAST", -"PIZZA", -"HAMBURGER", -"ITALIAN", -"SEAFOOD", -"FAMILY", -"MEXICAN", -"CHINESE", -"VEGETARIAN", -"SUSHI", -"CHICKEN", -"INDIAN", -"ASIAN", -"MEDITERRANEAN", -"FRENCH", -"BRUNCH", -"KOREAN", -"THAI", -"SPANISH", -"VIETNAMESE", -"LATIN_AMERICAN", -"INDONESIAN", -"GREEK", -"GERMAN", -"TURKISH", -"BRAZILIAN", -"PAKISTANI", -"OTHER_CUISINE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"nameInfo": { -"description": "The repeated name_info field is for price lists listed in multiple languages. When a price list has no names or descriptions, the size of the repeated field name_info may be 0. There should be at most one name_info for any given language.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -}, -"section": { -"description": "Each price list may have multiple sections. Note that these sections within the same price list should most times contain only the same type of items for sale, e.g. all sections should usually contain only food items if the enclosing price list is representing food menu. However, sometimes such a requirement may be wrong, for example, McDonald's may sell burgers as well as toys, and the toys may be in its own section. Thus we don't enforce any requirement that all sections contain only the same type of items.", -"items": { -"$ref": "GeostorePriceListSectionProto" -}, -"type": "array" -}, -"sourceUrl": { -"$ref": "GeostoreUrlListProto", -"description": "Where this price list comes from. If set, this must also be a member of the price_list_url field, and represents translations of a single URL." -} -}, -"type": "object" -}, -"GeostorePriceListSectionProto": { -"description": "A PriceListSectionProto is used to store a section of a PriceListProto. For example, for a PriceListProto representing a food menu, a PriceListSectionProto represents a menu section. Each PriceListSectionProto contains a repeated list of items for sale; these items can be products or services. Right now every section should contain items of one type.", -"id": "GeostorePriceListSectionProto", -"properties": { -"callToAction": { -"$ref": "GeostoreCallToActionProto", -"description": "Call to action for the section." -}, -"foodItem": { -"description": "To store food and drink items when the containing PriceListSectionProto is a food menu section.", -"items": { -"$ref": "GeostoreFoodMenuItemProto" -}, -"type": "array" -}, -"item": { -"description": "To store any items when the containing PriceListSectionProto is not food / legacy services.", -"items": { -"$ref": "GeostoreComposableItemProto" -}, -"type": "array" -}, -"itemType": { -"description": "This has to have at most one value.", -"items": { -"enum": [ -"TYPE_ANY", -"TYPE_FOOD", -"TYPE_SERVICE", -"TYPE_PRODUCT", -"TYPE_JOB", -"TYPE_3P_JOB" -], -"enumDescriptions": [ -"Abstract type, and the root of all types. Not a meaningful type and likewise should never be present in a geostore repository.", -"For food and drink items, stored in FoodMenuItemProto.", -"For service items (stored in FoodMenuItemProto). TYPE_SERVICE is being deprecated. Please use TYPE_JOB for the new services See b/147253790 for more information. data.", -"For product items (stored in ComposableItemProto).", -"For \"1P\" jobs data (stored in ComposableItemProto), summarized from a variety of sources such as GMB/NMX, web crawling, and Structured Reviews.", -"For third party jobs data sourced from 3P partner feeds, which need to remain separate from the TYPE_JOB list." -], -"type": "string" -}, -"type": "array" -}, -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. We place extra restrictions on languages in addition to what the III library requires. See go/geo-schema-reference/feature-properties/languages. When set, represents the language of the section and its items. Any section and item level name infos must match this language. Optional.", -"type": "string" -}, -"media": { -"description": "One or more media items (photos, videos, etc.) describing this section / category.", -"items": { -"$ref": "GeostoreMediaItemProto" -}, -"type": "array" -}, -"nameInfo": { -"description": "The repeated name_info field is for price list sections listed in multiple languages. When a price list section has no names or descriptions, the size of the repeated field name_info may be 0. There should be at most one name_info for any given language.", -"items": { -"$ref": "GeostorePriceListNameInfoProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostorePriceRangeProto": { -"description": "This message represents a price range of an attribute. The meaning of the price bounds is domain specific. But mainly they are soft bounds for a normal usage. E.g. \"restaurant prices\" are subject to an higher level of \"soft\" bounds than \"museum admission price\"", -"id": "GeostorePriceRangeProto", -"properties": { -"currency": { -"description": "Currency code for the price range: a valid currency code from i18n/identifiers/currencycode.h. Lower and upper price are both assumed to use the same currency.", -"type": "string" -}, -"lowerPrice": { -"description": "This message allows unbounded price ranges. e.g. Lower_price is undefined. At least one of the two prices must be set for the price range to be meaningful.", -"format": "double", -"type": "number" -}, -"units": { -"description": "clang-format on", -"enum": [ -"ANY_UNITS", -"PER_USE", -"PER_PHONE_CALL", -"PER_RIDE", -"PER_TIME_UNIT", -"PER_SECOND", -"PER_MINUTE", -"PER_HOUR", -"PER_DAY", -"PER_NIGHT", -"PER_WEEK", -"PER_MONTH", -"PER_YEAR", -"PER_VOLUME_UNIT", -"PER_LITER", -"PER_GLASS", -"PER_BOTTLE", -"PER_POT", -"PER_LENGTH_UNIT", -"PER_CENTIMETER", -"PER_METER", -"PER_KILOMETER", -"PER_MASS_UNIT", -"PER_GRAM", -"PER_KILOGRAM", -"PER_OUNCE", -"PER_POUND" -], -"enumDescriptions": [ -"ABSTRACT The root of the hierarchy. It's an abstract value and shouldn't be present in the repository.", -"", -"", -"e.g. for the price of a ferry or aeroplane flight.", -"", -"", -"", -"", -"", -"e.g. for the price of a hotel room.", -"", -"", -"", -"", -"e.g. for the price of petrol.", -"The following units (PER_GLASS, PER_BOTTLE, and PER_POT) are intended to represent price units for restaurant menus and do not specify how much volume the unit contains. e.g. for wine sold by glass.", -"e.g. for wine sold by bottle.", -"e.g. for tea sold by pot", -"", -"", -"", -"e.g. for the price of a taxi.", -"To simplify the categories for units, we put both mass and weight units under PER_MASS_UNIT, as a lot of times they are used interchangeably in real life anyway.", -"", -"", -"", -"" -], -"type": "string" -}, -"upperPrice": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GeostorePropertyValueStatusProto": { -"description": "PropertyValueStatusProto specifies what we know about a field corresponding to FeaturePropertyId's value in the absence of any specific value. For now, it just indicates when we know that there is no value. Eventually it might also indicate that we know it has value, just not what it is, etc.", -"id": "GeostorePropertyValueStatusProto", -"properties": { -"propertyId": { -"$ref": "GeostoreFeaturePropertyIdProto", -"description": "The property ID whose value status is defined by this proto." -}, -"valueStatus": { -"description": "`value_status` specifies whether the feature has a value for the property. This should always be set to something other than the default value (`PROPERTY_VALUE_STATUS_UNSPECIFIED`).", -"enum": [ -"PROPERTY_VALUE_STATUS_UNSPECIFIED", -"HAS_NO_VALUE", -"HAS_UNKNOWN_VALUE" -], -"enumDescriptions": [ -"The default value means the property's value status is encoded in the property itself: if the property has a value, it has a value. If it doesn't, it may or may not have a value. It is OK to interpret that as having no value. Note that in `PropertyValueStatusProto`, this value is not allowed. We include it and define it to match the default behavior so that it can be used in other protos.", -"`HAS_NO_VALUE` means that we know that the property has no value.", -"'HAS_UNKNOWN_VALUE' means that we know that the property has a value but that we don't know what that value is." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreProvenanceProto": { -"description": "This is a minimal version of SourceInfoProto.", -"id": "GeostoreProvenanceProto", -"properties": { -"dataset": { -"description": "The dataset from which this the referenced data was created. The content of this string will be determined by the data provider, and may encode extra information, such as data confidence.", -"type": "string" -}, -"provider": { -"description": "The data provider from which the referenced data was generated.", -"enum": [ -"PROVIDER_ANY", -"PROVIDER_UNKNOWN", -"PROVIDER_NAVTEQ", -"PROVIDER_TELE_ATLAS", -"PROVIDER_TELE_ATLAS_MULTINET", -"PROVIDER_TELE_ATLAS_CODEPOINT", -"PROVIDER_TELE_ATLAS_GEOPOST", -"PROVIDER_TELE_ATLAS_DATAGEO", -"PROVIDER_TELE_ATLAS_ADDRESS_POINTS", -"PROVIDER_TELCONTAR", -"PROVIDER_EUROPA", -"PROVIDER_ROYAL_MAIL", -"PROVIDER_GOOGLE", -"PROVIDER_GOOGLE_HAND_EDIT", -"PROVIDER_GOOGLE_BORDERS", -"PROVIDER_GOOGLE_SUBRANGE", -"PROVIDER_GOOGLE_GT_FUSION", -"PROVIDER_GOOGLE_ZAGAT_CMS", -"PROVIDER_GOOGLE_PLACE_NAVBOOST", -"PROVIDER_GOOGLE_FOOTPRINT", -"PROVIDER_GOOGLE_PRODUCT_TERMS", -"PROVIDER_GOOGLE_POINTCARDS", -"PROVIDER_GOOGLE_BUSINESS_CHAINS", -"PROVIDER_GOOGLE_LOCAL_SUMMARIZATION", -"PROVIDER_GOOGLE_PRONUNCIATIONS", -"PROVIDER_GOOGLE_DUMPLING", -"PROVIDER_GOOGLE_DISTILLERY", -"PROVIDER_GOOGLE_LOCAL_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_RELATION_MINER", -"PROVIDER_GOOGLE_MAPSPAM", -"PROVIDER_GOOGLE_ROSE", -"PROVIDER_GOOGLE_LOCAL_PLACE_RATINGS", -"PROVIDER_GOOGLE_WIPEOUT", -"PROVIDER_GOOGLE_KNOWLEDGE_GRAPH", -"PROVIDER_GOOGLE_BEEGEES", -"PROVIDER_GOOGLE_REVIEW_SUMMARIZATION", -"PROVIDER_GOOGLE_OFFLINE_NON_CORE_ATTRIBUTE_SUMMARIZATION", -"PROVIDER_GOOGLE_GEO_WORLDMAPS", -"PROVIDER_GOOGLE_GEO_MODERATION", -"PROVIDER_GOOGLE_OYSTER_AUTO_EDITS", -"PROVIDER_GOOGLE_LOCAL_ALCHEMY", -"PROVIDER_GOOGLE_KEROUAC", -"PROVIDER_GOOGLE_MOBRANK", -"PROVIDER_GOOGLE_RAPTURE", -"PROVIDER_GOOGLE_CULTURAL_INSTITUTE", -"PROVIDER_GOOGLE_GEOCODES_FROM_LOCAL_FEEDS", -"PROVIDER_GOOGLE_ATTRIBUTES_FROM_CRAWLED_CHAINS", -"PROVIDER_GOOGLE_TACTILE_MAPS", -"PROVIDER_GOOGLE_MAPS_FOR_MOBILE", -"PROVIDER_GOOGLE_GEO_REALTIME", -"PROVIDER_GOOGLE_PROMINENT_PLACES", -"PROVIDER_GOOGLE_PLACE_ACTIONS", -"PROVIDER_GOOGLE_GT_AUTO_EDITS", -"PROVIDER_GOOGLE_WAZE", -"PROVIDER_GOOGLE_ONTHEGO", -"PROVIDER_GOOGLE_GT_IMPORT", -"PROVIDER_GOOGLE_STRUCTURED_DATA", -"PROVIDER_GOOGLE_HELICOPTER", -"PROVIDER_GOOGLE_ROLLBACK", -"PROVIDER_GOOGLE_RIGHTS_REPAIR", -"PROVIDER_GOOGLE_PERFUME", -"PROVIDER_GOOGLE_MAPS_TRANSLATION", -"PROVIDER_GOOGLE_CALL_ME_MAYBE", -"PROVIDER_GOOGLE_LOCAL_UNIVERSAL", -"PROVIDER_GOOGLE_CROUPIER", -"PROVIDER_GOOGLE_SKYSMART", -"PROVIDER_GOOGLE_RIDDLER", -"PROVIDER_GOOGLE_ROADCLOSURES", -"PROVIDER_GOOGLE_SPORE", -"PROVIDER_GOOGLE_LOCALIZATION", -"PROVIDER_GOOGLE_CATTERMS", -"PROVIDER_GOOGLE_GT_FIELD_OPS", -"PROVIDER_GOOGLE_MATCHMAKER", -"PROVIDER_GOOGLE_ARBITRATION", -"PROVIDER_GOOGLE_BIZBUILDER_OPS", -"PROVIDER_GOOGLE_LOCAL_INVENTORY_ADS", -"PROVIDER_GOOGLE_GT_DRAFTY", -"PROVIDER_GOOGLE_HOTELADS_OPS", -"PROVIDER_GOOGLE_MARKERS", -"PROVIDER_GOOGLE_STATE_MACHINE", -"PROVIDER_GOOGLE_ATTRIBUTES_INFERENCE", -"PROVIDER_GOOGLE_BIKESHARE", -"PROVIDER_GOOGLE_GHOSTWRITER", -"PROVIDER_GOOGLE_EDIT_PLATFORM", -"PROVIDER_GOOGLE_BLUE_GINGER", -"PROVIDER_GOOGLE_GEO_TIGER", -"PROVIDER_GOOGLE_HYADES", -"PROVIDER_GOOGLE_WEBQUARRY", -"PROVIDER_GOOGLE_GEO_MADDEN", -"PROVIDER_GOOGLE_ANDROID_PAY", -"PROVIDER_GOOGLE_OPENING_HOURS_TEAM", -"PROVIDER_GOOGLE_LOCAL_DISCOVERY", -"PROVIDER_GOOGLE_LOCAL_HEALTH", -"PROVIDER_GOOGLE_UGC_MAPS", -"PROVIDER_GOOGLE_FIBER", -"PROVIDER_GOOGLE_REVGEO", -"PROVIDER_GOOGLE_HOTELADS_PARTNER_FRONT_END", -"PROVIDER_GOOGLE_GEO_UGC_TASKS", -"PROVIDER_GOOGLE_GEOCODING", -"PROVIDER_GOOGLE_SPYGLASS", -"PROVIDER_GOOGLE_PLUS_CODES_AS_ADDRESSES", -"PROVIDER_GOOGLE_GEO_CHANGES", -"PROVIDER_GOOGLE_HUME", -"PROVIDER_GOOGLE_MEGAMIND", -"PROVIDER_GOOGLE_GT_ROADSYNTH", -"PROVIDER_GOOGLE_FIREBOLT", -"PROVIDER_GOOGLE_LOCAL_PLACE_OFFERINGS", -"PROVIDER_GOOGLE_UGC_SERVICES", -"PROVIDER_GOOGLE_GEOALIGN", -"PROVIDER_GOOGLE_GT_COMPOUNDS", -"PROVIDER_GOOGLE_FOOD_ORDERING", -"PROVIDER_GOOGLE_HOTEL_KNOWLEDGE_OPS", -"PROVIDER_GOOGLE_URAW", -"PROVIDER_GOOGLE_FLYEYE", -"PROVIDER_GOOGLE_YOUKE", -"PROVIDER_GOOGLE_GT_ZEPHYR", -"PROVIDER_GOOGLE_USER_SAFETY", -"PROVIDER_GOOGLE_ADDRESS_MAKER", -"PROVIDER_GOOGLE_UGC_PHOTOS", -"PROVIDER_GOOGLE_GT_WINDCHIME", -"PROVIDER_GOOGLE_SNAG_FIXER", -"PROVIDER_GOOGLE_GEO_DEALS", -"PROVIDER_GOOGLE_LOCAL_PLACE_TOPICS", -"PROVIDER_GOOGLE_PROPERTY_INSIGHTS", -"PROVIDER_GOOGLE_GEO_CONSUMER_MERCHANT_EXPERIMENTS", -"PROVIDER_GOOGLE_GEO_PORTKEY", -"PROVIDER_GOOGLE_ROAD_MAPPER", -"PROVIDER_GOOGLE_LOCATION_PLATFORM", -"PROVIDER_GOOGLE_POSTTRIP", -"PROVIDER_GOOGLE_TRAVEL_DESTINATION", -"PROVIDER_GOOGLE_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_BIZBUILDER_CLEANUP", -"PROVIDER_GOOGLE_USER", -"PROVIDER_GOOGLE_STATION", -"PROVIDER_GOOGLE_GEO_FOOD", -"PROVIDER_GOOGLE_GEO_AR", -"PROVIDER_GOOGLE_GEO_TEMPORAL", -"PROVIDER_GOOGLE_SERVICES_MARKETPLACE", -"PROVIDER_GOOGLE_IMT_CLEANUP", -"PROVIDER_GOOGLE_GEO_FOOD_MENU", -"PROVIDER_GOOGLE_CARENAV", -"PROVIDER_GOOGLE_DRIVING_FEEDS", -"PROVIDER_GOOGLE_DRIVING_UGC", -"PROVIDER_GOOGLE_POLAR", -"PROVIDER_GOOGLE_TRIWILD", -"PROVIDER_GOOGLE_CROWD_COMPUTE_OPS", -"PROVIDER_GOOGLE_SA_FROM_WEB", -"PROVIDER_GOOGLE_POI_ALIGNMENT", -"PROVIDER_GOOGLE_SA_FROM_HULK", -"PROVIDER_GOOGLE_SERVICES_INTERACTIONS", -"PROVIDER_GOOGLE_ROADS_UGC_EDITOR", -"PROVIDER_GOOGLE_SA_FROM_NG_INFERENCE", -"PROVIDER_GOOGLE_GEO_DRIVING_VIZ", -"PROVIDER_GOOGLE_GEO_TASKING", -"PROVIDER_GOOGLE_CROWDTASK_DATACOMPUTE", -"PROVIDER_GOOGLE_CROWDTASK_TASKADS", -"PROVIDER_GOOGLE_CROWDTASK_TASKMATE", -"PROVIDER_GOOGLE_CROWDTASK_FURBALL", -"PROVIDER_GOOGLE_CROWDTASK_ADAP", -"PROVIDER_GOOGLE_GPAY", -"PROVIDER_GOOGLE_GEO_UGC_TRUSTED_USERS", -"PROVIDER_GOOGLE_THIRD_PARTY_DATA_PRODUCTION", -"PROVIDER_GOOGLE_GEOTRACKER", -"PROVIDER_GOOGLE_LOCAL_LANDMARK_INFERENCE", -"PROVIDER_GOOGLE_GEO_CLOSED_LOOP", -"PROVIDER_GOOGLE_SA_FROM_MERCHANT_POSTS", -"PROVIDER_GOOGLE_CORE_DATA_RIGHTS", -"PROVIDER_GOOGLE_SA_FROM_USER_REVIEWS", -"PROVIDER_GOOGLE_GEO_CONTENT_FIXER", -"PROVIDER_GOOGLE_POLYGON_REFINEMENT", -"PROVIDER_GOOGLE_HANASU", -"PROVIDER_GOOGLE_FULLRIGHTS_GEO_DATA_UPLOAD", -"PROVIDER_GOOGLE_FULLRIGHTS_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_ATTRIBUTION_3P_OUTREACH_UPLOAD", -"PROVIDER_GOOGLE_SA_FROM_FOOD_MENUS", -"PROVIDER_GOOGLE_GT_CONSISTENCY_EDITS", -"PROVIDER_GOOGLE_SA_QUALITY", -"PROVIDER_GOOGLE_GDCE_CLEANUP", -"PROVIDER_GOOGLE_UGC_QUALITY_CHAINS", -"PROVIDER_GOOGLE_ATTRIBUTES_DISCOVERY", -"PROVIDER_GOOGLE_GEO_LDE", -"PROVIDER_GOOGLE_GEO_SIGNAL_TRACKING", -"PROVIDER_GOOGLE_UGC_AGGREGATION", -"PROVIDER_GOOGLE_3D_BASEMAP", -"PROVIDER_GOOGLE_MAPFACTS_PRIVACY", -"PROVIDER_GOOGLE_GT_ALF", -"PROVIDER_GOOGLE_GT_OPERATOR_PROVENANCE", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS", -"PROVIDER_GOOGLE_GT_LANE_AUTOMATION", -"PROVIDER_GOOGLE_GEO_NG_LOCAL", -"PROVIDER_GOOGLE_MAPFACTS_CLEANUP", -"PROVIDER_GOOGLE_THIRD_PARTY_UGC", -"PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", -"PROVIDER_GOOGLE_VACATION_RENTAL_PARTNERS", -"PROVIDER_GOOGLE_FEED_PROCESSOR_ROAD_INCIDENTS", -"PROVIDER_GOOGLE_DYNAMIC_BASEMAP", -"PROVIDER_GOOGLE_LOCAL_SERVICES_ADS_EMEA", -"PROVIDER_GOOGLE_LOCALSEARCH", -"PROVIDER_GOOGLE_TRANSIT", -"PROVIDER_GOOGLE_GEOWIKI", -"PROVIDER_GOOGLE_CHINA_LOCAL_TEAM", -"PROVIDER_GOOGLE_SYNTHESIZED", -"PROVIDER_GOOGLE_INTERNAL_TEST", -"PROVIDER_GOOGLE_DISPUTED_AREAS", -"PROVIDER_GOOGLE_3DWAREHOUSE", -"PROVIDER_GOOGLE_GROUNDS_BUILDER", -"PROVIDER_GOOGLE_SESAME", -"PROVIDER_GOOGLE_GT", -"PROVIDER_GOOGLE_GT_BASEMAP_UPLOAD", -"PROVIDER_GOOGLE_ADSDB", -"PROVIDER_GOOGLE_MACHINE_TRANSLITERATION", -"PROVIDER_GOOGLE_TRAVELSEARCH", -"PROVIDER_GOOGLE_PANORAMIO", -"PROVIDER_GOOGLE_YOUTUBE", -"PROVIDER_GOOGLE_OLD", -"PROVIDER_GOOGLE_STREETVIEW", -"PROVIDER_GOOGLE_STREETVIEW_BIZVIEW", -"PROVIDER_GOOGLE_ZIPIT", -"PROVIDER_GOOGLE_OYSTER_CONNECT_ROUTES", -"PROVIDER_GOOGLE_GOLDEN", -"PROVIDER_GOOGLE_INNERSPACE", -"PROVIDER_GOOGLE_MAPSEARCH", -"PROVIDER_GOOGLE_CATEGORIES_TEAM", -"PROVIDER_GOOGLE_CROWDSENSUS", -"PROVIDER_GOOGLE_LOCAL_ALGORITHMIC_IDENTITY", -"PROVIDER_GOOGLE_FREEBASE", -"PROVIDER_GOOGLE_HOTELADS", -"PROVIDER_GOOGLE_AUTHORITY_PAGES", -"PROVIDER_GOOGLE_PLACES_API", -"PROVIDER_GOOGLE_NAMEHEATMAP", -"PROVIDER_GOOGLE_MAPMAKER", -"PROVIDER_GOOGLE_MAPMAKER_MOBILE", -"PROVIDER_GOOGLE_MAPMAKER_PANCAKE", -"PROVIDER_GOOGLE_MAPMAKER_V2", -"PROVIDER_GOOGLE_LOCAL_CLUSTERING_OPERATOR_OVERRIDE", -"PROVIDER_GOOGLE_SERVED_ON_MAPMAKER", -"PROVIDER_GOOGLE_GT_LOCAL", -"PROVIDER_GOOGLE_GT_LOCAL_WITH_RIGHTS", -"PROVIDER_GOOGLE_LOGS_RANKING_SIGNALS", -"PROVIDER_GOOGLE_ENTITY_NAVBOOST", -"PROVIDER_GOOGLE_RELATED_PLACES", -"PROVIDER_GOOGLE_KNOWN_FOR_TERMS", -"PROVIDER_GOOGLE_SYNTHETIC_AREAS", -"PROVIDER_GOOGLE_AUTHORITY_PAGE_PHOTOS", -"PROVIDER_GOOGLE_CROSS_STREETS", -"PROVIDER_GOOGLE_CORRIDORS", -"PROVIDER_GOOGLE_BICYCLE_RENTAL", -"PROVIDER_GOOGLE_CONCRETE_URLS", -"PROVIDER_GOOGLE_LEANBACK", -"PROVIDER_GOOGLE_LOCKED_LISTINGS", -"PROVIDER_GOOGLE_MONITORING", -"PROVIDER_GOOGLE_SPROUT", -"PROVIDER_GOOGLE_LOCAL_SEARCH_QUALITY", -"PROVIDER_GOOGLE_GOBY", -"PROVIDER_GOOGLE_PROBLEM_REPORT", -"PROVIDER_GOOGLE_CANDID", -"PROVIDER_GOOGLE_BIZBUILDER", -"PROVIDER_AUTOMOTIVE_NAVIGATION_DATA", -"PROVIDER_MAPDATA_SCIENCES", -"PROVIDER_MAPONICS", -"PROVIDER_SKI_RESORTS", -"PROVIDER_ZENRIN", -"PROVIDER_SANBORN", -"PROVIDER_URBAN_MAPPING", -"PROVIDER_US_GOVERNMENT", -"PROVIDER_US_CENSUS", -"PROVIDER_US_POSTAL_SERVICE", -"PROVIDER_US_GEOLOGICAL_SURVEY", -"PROVIDER_US_GNIS", -"PROVIDER_US_LANDSAT", -"PROVIDER_US_NATIONAL_GEOSPATIAL_INTELLIGENCE_AGENCY", -"PROVIDER_US_NGA_GNS", -"PROVIDER_US_SSIBL", -"PROVIDER_US_BUREAU_OF_TRANSPORTATION_STATISTICS", -"PROVIDER_US_NATIONAL_OCEANIC_AND_ATMOSPHERIC_ADMINISTRATION", -"PROVIDER_US_POLAR_GEOSPATIAL_CENTER", -"PROVIDER_US_DEPARTMENT_OF_AGRICULTURE", -"PROVIDER_US_NPI_REGISTRY", -"PROVIDER_US_BUREAU_OF_INDIAN_AFFAIRS", -"PROVIDER_DMTI_SPATIAL", -"PROVIDER_INTERNATIONAL_HYDROGRAPHIC_ORGANIZATION", -"PROVIDER_MAPLINK", -"PROVIDER_KINGWAY", -"PROVIDER_GEOCENTRE", -"PROVIDER_CN_NATIONAL_FOUNDAMENTAL_GIS", -"PROVIDER_CN_MAPABC", -"PROVIDER_SMITHSONIAN_INSTITUTE", -"PROVIDER_TRACKS_FOR_AFRICA", -"PROVIDER_PPWK", -"PROVIDER_LEADDOG", -"PROVIDER_CENTRE_DONNEES_ASTRONOMIQUES_STRASBOURG", -"PROVIDER_GISRAEL", -"PROVIDER_BASARSOFT", -"PROVIDER_MAPINFO", -"PROVIDER_MAPIT", -"PROVIDER_GEOBASE", -"PROVIDER_ORION", -"PROVIDER_CENTRAL_EUROPEAN_DATA_AGENCY", -"PROVIDER_ANASAT", -"PROVIDER_MINED_POSTCODES", -"PROVIDER_DMAPAS", -"PROVIDER_COMMON_LOCALE_DATA_REPOSITORY", -"PROVIDER_CH_SBB", -"PROVIDER_SKENERGY", -"PROVIDER_GBRMPA", -"PROVIDER_KOREA_POST", -"PROVIDER_CN_AUTONAVI", -"PROVIDER_MINED_POI", -"PROVIDER_ML_INFOMAP", -"PROVIDER_SNOOPER", -"PROVIDER_GEOSISTEMAS", -"PROVIDER_AFRIGIS", -"PROVIDER_TRANSNAVICOM", -"PROVIDER_EASYCONNECT", -"PROVIDER_LANTMATERIET", -"PROVIDER_LOGICA", -"PROVIDER_MAPKING", -"PROVIDER_DIANPING", -"PROVIDER_GEONAV", -"PROVIDER_HEIBONSHA", -"PROVIDER_DEUTSCHE_TELEKOM", -"PROVIDER_LINGUISTIC_DATA_CONSORTIUM", -"PROVIDER_ACXIOM", -"PROVIDER_DUN_AND_BRADSTREET", -"PROVIDER_FEDERAL_AVIATION_ADMINISTRATION", -"PROVIDER_INFOUSA", -"PROVIDER_INFOUSA_NIXIE", -"PROVIDER_THOMSON_LOCAL", -"PROVIDER_TELEFONICA_PUBLICIDAD_E_INFORMACION", -"PROVIDER_WIKIPEDIA", -"PROVIDER_INFOBEL", -"PROVIDER_MX_GOVERNMENT", -"PROVIDER_MX_NATIONAL_INSTITUTE_STATISTICS_GEOGRAPHY", -"PROVIDER_MX_SERVICIO_POSTAL_MEXICANO", -"PROVIDER_TELEGATE", -"PROVIDER_TELELISTAS", -"PROVIDER_MAPCITY", -"PROVIDER_EXPLAINER_DC", -"PROVIDER_DAIKEI", -"PROVIDER_NL_CHAMBER_OF_COMMERCE", -"PROVIDER_KOREA_INFO_SERVICE", -"PROVIDER_WIKITRAVEL", -"PROVIDER_FLICKR", -"PROVIDER_DIANCO", -"PROVIDER_VOLT_DELTA", -"PROVIDER_SG_GOVERNMENT", -"PROVIDER_SG_LAND_TRANSPORT_AUTHORITY", -"PROVIDER_MAPBAR", -"PROVIDER_LONGTU", -"PROVIDER_SA_GOVERNMENT", -"PROVIDER_SA_SAUDI_POST", -"PROVIDER_PEAKLIST", -"PROVIDER_LOCAL_BUSINESS_CENTER", -"PROVIDER_LOCAL_FEED_XML", -"PROVIDER_WEB", -"PROVIDER_RAILS_TO_TRAILS", -"PROVIDER_INDIACOM", -"PROVIDER_INFOMEDIA", -"PROVIDER_PICASA", -"PROVIDER_AT_GOVERNMENT", -"PROVIDER_AT_BUNDESAMT_FUR_EICH_UND_VERMESSUNGSWESEN", -"PROVIDER_AT_NATIONAL_TOURIST_OFFICE", -"PROVIDER_AT_AUSTRIA_POST", -"PROVIDER_NO_GOVERNMENT", -"PROVIDER_NO_NORSK_EIENDOMSINFORMASJON", -"PROVIDER_NO_POSTEN_NORGE_AS", -"PROVIDER_CH_GOVERNMENT", -"PROVIDER_CH_SWISS_POST", -"PROVIDER_CH_SWISSTOPO", -"PROVIDER_CH_SWISS_NATIONAL_PARK", -"PROVIDER_NAVIT", -"PROVIDER_GEOSEARCH", -"PROVIDER_DE_GOVERNMENT", -"PROVIDER_BUNDESAMT_KARTOGRAPHIE_UND_GEODASIE", -"PROVIDER_BUNDESNETZAGENTUR", -"PROVIDER_SCHOBER_GROUP", -"PROVIDER_MIREO", -"PROVIDER_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WEBSTER_TEXAS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_AMHERST_MASSACHUSETTS", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BLOOMINGTON_INDIANA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PASADENA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_CHULA_VISTA_CALIFORNIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_TEMPE_ARIZONA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_COLUMBUS_OHIO", -"PROVIDER_US_PUBLIC_MUNICIPALITY_PORTAGE_MICHIGAN", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GEORGETOWN_KENTUCKY", -"PROVIDER_US_PUBLIC_MUNICIPALITY_GREENVILLE_SOUTH_CAROLINA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_NASHVILLE_TENNESSEE", -"PROVIDER_US_PUBLIC_MUNICIPALITY_WASHINGTON_DISTRICT_OF_COLUMBIA", -"PROVIDER_US_PUBLIC_MUNICIPALITY_BOULDER_COLORADO", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY", -"PROVIDER_NZ_PUBLIC_MUNICIPALITY_ENVIRONMENT_BAY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY", -"PROVIDER_PL_PUBLIC_MUNICIPALITY_BIELSKO_BIALA", -"PROVIDER_DE_PUBLIC_MUNICIPALITY", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_FRANKFURT", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_HAMBURG", -"PROVIDER_DE_PUBLIC_MUNICIPALITY_KARLSRUHE", -"PROVIDER_PT_PUBLIC_MUNICIPALITY", -"PROVIDER_PT_PUBLIC_MUNICIPALITY_SANTA_CRUZ", -"PROVIDER_AT_PUBLIC_MUNICIPALITY", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_KLAGENFURT", -"PROVIDER_AT_PUBLIC_MUNICIPALITY_LINZ", -"PROVIDER_ES_PUBLIC_MUNICIPALITY", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_AZKOITIA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_BEASAIN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_GIRONA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_SAN_SEBASTIAN", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_CATALUNYA", -"PROVIDER_ES_PUBLIC_MUNICIPALITY_HONDARRIBIA", -"PROVIDER_AU_PUBLIC_MUNICIPALITY", -"PROVIDER_AU_PUBLIC_MUNICIPALITY_LAUNCESTON_TASMANIA", -"PROVIDER_IS_PUBLIC_MUNICIPALITY", -"PROVIDER_IS_PUBLIC_MUNICIPALITY_REYKJAVIK", -"PROVIDER_NL_PUBLIC_MUNICIPALITY", -"PROVIDER_NL_PUBLIC_MUNICIPALITY_AMELSTEVEEN", -"PROVIDER_BE_PUBLIC_MUNICIPALITY", -"PROVIDER_BE_PUBLIC_MUNICIPALITY_ANTWERPEN", -"PROVIDER_CA_PUBLIC_MUNICIPALITY", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_FREDERICTON_NEW_BRUNSWICK", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_KAMLOOPS_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_NANAIMO_BRITISH_COLUMBIA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_BANFF_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_CALGARY_ALBERTA", -"PROVIDER_CA_PUBLIC_MUNICIPALITY_TORONTO_ONTARIO", -"PROVIDER_SE_PUBLIC_MUNICIPALITY", -"PROVIDER_SE_PUBLIC_MUNICIPALITY_UMEA", -"PROVIDER_UA_PUBLIC_MUNICIPALITY", -"PROVIDER_UA_PUBLIC_MUNICIPALITY_KHARKIV", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY", -"PROVIDER_OTHER_PUBLIC_MUNICIPALITY_AQUA_CALIENTE_CAHUILLA_INDIANS", -"PROVIDER_FR_PUBLIC_MUNICIPALITY", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_PONT_AUDEMER", -"PROVIDER_FR_PUBLIC_MUNICIPALITY_BORDEAUX", -"PROVIDER_SG_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY", -"PROVIDER_BR_PUBLIC_MUNICIPALITY_RIO_DE_JANEIRO", -"PROVIDER_MAPCUBE", -"PROVIDER_3D_REALITYMAPS", -"PROVIDER_DEUTSCHES_ZENTRUM_FUR_LUFT_UND_RAUMFAHRT", -"PROVIDER_3D_CITIES_SOCIEDADE_ANONIMA", -"PROVIDER_DISNEY", -"PROVIDER_CYBERCITY", -"PROVIDER_PRECISION_LIGHTWORKS_MODELWORKS", -"PROVIDER_VIRTUAL_HUNGARY_LIMITED", -"PROVIDER_VIRTUEL_CITY", -"PROVIDER_SCREAMPOINT_INTERNATIONAL", -"PROVIDER_AGENTSCHAP_VOOR_GEOGRAFISCHE_INFORMATIE_VLAANDEREN", -"PROVIDER_FR_GOVERNMENT", -"PROVIDER_FR_INSTITUT_GEOGRAPHIQUE_NATIONAL", -"PROVIDER_FR_CADASTRE", -"PROVIDER_DIADIEM", -"PROVIDER_THE_WEATHER_CHANNEL", -"PROVIDER_COWI", -"PROVIDER_FALKPLAN_ANDES", -"PROVIDER_NL_GOVERNMENT", -"PROVIDER_NL_KADASTER", -"PROVIDER_NL_BOARD_OF_TOURISM_AND_CONVENTIONS", -"PROVIDER_DIGITAL_MAP_PRODUCTS", -"PROVIDER_SILICE_DIGITAL", -"PROVIDER_TYDAC", -"PROVIDER_ALBRECHT_GOLF", -"PROVIDER_HEALTH_CH", -"PROVIDER_VISITDENMARK", -"PROVIDER_FLYHERE", -"PROVIDER_DIGITAL_DATA_SERVICES", -"PROVIDER_MECOMO", -"PROVIDER_ZA_GOVERNMENT", -"PROVIDER_ZA_RURAL_DEVELOPMENT_LAND_REFORM", -"PROVIDER_SENSIS", -"PROVIDER_JJCONNECT", -"PROVIDER_OPPLYSNINGEN", -"PROVIDER_TELLUS", -"PROVIDER_IQONIA", -"PROVIDER_BE_GOVERNMENT", -"PROVIDER_BE_NATIONAAL_GEOGRAFISCH_INSTITUUT", -"PROVIDER_BE_BRUSSELS_MOBILITY", -"PROVIDER_YELLOWMAP_AG", -"PROVIDER_STIFTUNG_GESUNDHEIT", -"PROVIDER_GIATA", -"PROVIDER_SANPARKS", -"PROVIDER_CENTRE_DINFORMATIQUE_POUR_LA_REGION_BRUXELLOISE", -"PROVIDER_INFOPORTUGAL", -"PROVIDER_NEGOCIOS_DE_TELECOMUNICACOES_E_SISTEMAS_DE_INFORMACAO", -"PROVIDER_COLLINS_BARTHOLOMEW", -"PROVIDER_PROTECT_PLANET_OCEAN", -"PROVIDER_KARTTAKESKUS", -"PROVIDER_FI_GOVERNMENT", -"PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION", -"PROVIDER_FI_NATIONAL_LAND_SURVEY", -"PROVIDER_FI_STATISTICS_FINLAND", -"PROVIDER_GB_GOVERNMENT", -"PROVIDER_GB_ORDNANCE_SURVEY", -"PROVIDER_NATURAL_ENGLAND", -"PROVIDER_WELSH_GOVERNMENT", -"PROVIDER_GB_OFFICE_FOR_NATIONAL_STATISTICS", -"PROVIDER_EPSILON", -"PROVIDER_PARTNER_FRONT_END", -"PROVIDER_CARTESIA", -"PROVIDER_SE_GOVERNMENT", -"PROVIDER_SE_TRAFIKVERKET", -"PROVIDER_SE_NATURVARDSVERKET", -"PROVIDER_IE_GOVERNMENT", -"PROVIDER_IE_ORDNANCE_SURVEY_IRELAND", -"PROVIDER_LU_GOVERNMENT", -"PROVIDER_LU_P_AND_T_LUXEMBOURG", -"PROVIDER_LU_ADMINISTRATION_DU_CADASTRE_ET_DE_LA_TOPOGRAPHIE", -"PROVIDER_LU_NATIONAL_TOURIST_OFFICE", -"PROVIDER_MAPFLOW", -"PROVIDER_TKARTOR", -"PROVIDER_JUMPSTART", -"PROVIDER_EPTISA", -"PROVIDER_MC_GOVERNMENT", -"PROVIDER_MC_PRINCIPAUTE_DE_MONACO", -"PROVIDER_MONOLIT", -"PROVIDER_ENVIRONMENTAL_SYSTEMS_RESEARCH_INSTITUTE", -"PROVIDER_MODIS", -"PROVIDER_GEOX", -"PROVIDER_GEODIRECTORY", -"PROVIDER_GEOPLAN", -"PROVIDER_INFODIREKT", -"PROVIDER_GEOGLOBAL", -"PROVIDER_DEUTSCHE_POST", -"PROVIDER_TRACASA", -"PROVIDER_CORREOS", -"PROVIDER_ES_GOVERNMENT", -"PROVIDER_ES_CENTRO_NACIONAL_DE_INFORMACION_GEOGRAFICA", -"PROVIDER_EDIMAP", -"PROVIDER_VERIZON", -"PROVIDER_NATIONAL_GEOGRAPHIC_MAPS", -"PROVIDER_PROMAPS", -"PROVIDER_CONSODATA", -"PROVIDER_DE_AGOSTINI", -"PROVIDER_FEDERPARCHI", -"PROVIDER_NAVIGO", -"PROVIDER_ITALIAMAPPE", -"PROVIDER_CZECOT", -"PROVIDER_NATURAL_EARTH", -"PROVIDER_REGIO", -"PROVIDER_SHIPWRECK_CENTRAL", -"PROVIDER_RUTGERS_STATE_UNIVERSITY", -"PROVIDER_TWINICE", -"PROVIDER_NORTHERN_IRELAND_TOURIST_BOARD", -"PROVIDER_INFOGROUP", -"PROVIDER_TNET", -"PROVIDER_CTT_CORREIOS_DE_PORTUGAL", -"PROVIDER_EUROPARC", -"PROVIDER_IUPPITER", -"PROVIDER_MICHAEL_BAUER_INTERNATIONAL", -"PROVIDER_LEPTON", -"PROVIDER_MAPPOINT", -"PROVIDER_GEODATA", -"PROVIDER_RU_GOVERNMENT", -"PROVIDER_RU_FNS_KLADR", -"PROVIDER_BR_GOVERNMENT", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DO_MEIO_AMBIENTE_E_DOS_RECURSOS_NATURAIS_RENOVAVEIS", -"PROVIDER_BR_MINISTERIO_DO_MEIO_AMBIENTE", -"PROVIDER_BR_AGENCIA_NACIONAL_DE_AGUAS", -"PROVIDER_BR_INSTITUTO_BRASILEIRO_DE_GEOGRAFIA_E_ESTATISTICA", -"PROVIDER_BR_FUNDACAO_NACIONAL_DO_INDIO", -"PROVIDER_BR_DEPARTAMENTO_NACIONAL_DE_INFRAESTRUTURA_DE_TRANSPORTES", -"PROVIDER_AZAVEA", -"PROVIDER_NORTHSTAR", -"PROVIDER_COMMEDI", -"PROVIDER_NEXUS_GEOGRAFICS", -"PROVIDER_INFOERA", -"PROVIDER_AD_GOVERNMENT", -"PROVIDER_AD_AREA_DE_CARTOGRAFIA", -"PROVIDER_MAXXIMA", -"PROVIDER_SI_GOVERNMENT", -"PROVIDER_SI_AGENCY_FOR_ENVIRONMENT", -"PROVIDER_TRANSPORT_HI_TECH_CONSULTANTS", -"PROVIDER_L1_TECHNOLOGIES", -"PROVIDER_TELEMEDIA", -"PROVIDER_CDCOM_PROGOROD", -"PROVIDER_MIT_CITYGUIDE", -"PROVIDER_SUNCART", -"PROVIDER_MICROMAPPER", -"PROVIDER_RICHI", -"PROVIDER_FORUM44", -"PROVIDER_SEAT", -"PROVIDER_VALASSIS", -"PROVIDER_NAVICOM", -"PROVIDER_COLTRACK", -"PROVIDER_PSMA_AUSTRALIA", -"PROVIDER_PT_DUTA_ASTAKONA_GIRINDA", -"PROVIDER_CA_GOVERNMENT", -"PROVIDER_STATISTICS_CANADA", -"PROVIDER_TOCTOC", -"PROVIDER_RMSI", -"PROVIDER_TRUE_TECHNOLOGY", -"PROVIDER_INCREMENT_P_CORPORATION", -"PROVIDER_GOJAVAS", -"PROVIDER_GEOINFORMATION_GROUP", -"PROVIDER_CYBERSOFT", -"PROVIDER_TSENTR_EFFEKTIVNYKH_TEKHNOLOGIY", -"PROVIDER_EE_GOVERNMENT", -"PROVIDER_EE_MAA_AMET", -"PROVIDER_GASBUDDY", -"PROVIDER_DK_GOVERNMENT", -"PROVIDER_DK_GEODATASTYRELSEN", -"PROVIDER_MURCIA_REGION_GOVERNMENT", -"PROVIDER_CORREIOS", -"PROVIDER_WEST_WORLD_MEDIA", -"PROVIDER_INTERNATIONAL_MAPPING_ASSOCIATION", -"PROVIDER_MEDICARE", -"PROVIDER_POLARIS", -"PROVIDER_TW_GOVERNMENT", -"PROVIDER_TW_MINISTRY_OF_THE_INTERIOR_SURVEYING_AND_MAPPING_CENTER", -"PROVIDER_NORDECA", -"PROVIDER_AFRIMAPPING", -"PROVIDER_OVERDRIVE", -"PROVIDER_PROVIDER_NETWORK_DIRECTORIES", -"PROVIDER_BR_MINISTERIO_DA_SAUDE", -"PROVIDER_DIGITAL_EGYPT", -"PROVIDER_INRIX", -"PROVIDER_ARPINDO", -"PROVIDER_IT_GOVERNMENT", -"PROVIDER_ISTITUTO_GEOGRAFICO_MILITARE", -"PROVIDER_EAST_END_GROUP", -"PROVIDER_INGEOLAN", -"PROVIDER_SEMACONNECT", -"PROVIDER_BLINK", -"PROVIDER_EVGO", -"PROVIDER_CHARGEPOINT", -"PROVIDER_TPL_TRAKKER", -"PROVIDER_OI", -"PROVIDER_MAPARADAR", -"PROVIDER_SINGAPORE_POST", -"PROVIDER_CHARGEMASTER", -"PROVIDER_TESLA", -"PROVIDER_VISICOM", -"PROVIDER_GEOLYSIS", -"PROVIDER_ZEPHEIRA", -"PROVIDER_HUBJECT", -"PROVIDER_PODPOINT", -"PROVIDER_CHARGEFOX", -"PROVIDER_KR_GOVERNMENT", -"PROVIDER_KR_MOLIT", -"PROVIDER_KR_MINISTRY_OF_THE_INTERIOR_AND_SAFETY", -"PROVIDER_CRITCHLOW", -"PROVIDER_EIFRIG", -"PROVIDER_GIREVE", -"PROVIDER_CN_NAVINFO", -"PROVIDER_JAPAN_CHARGE_NETWORK", -"PROVIDER_NOBIL", -"PROVIDER_INDIA_BANKS", -"PROVIDER_INDONESIA_ELECTION_KPU", -"PROVIDER_CAREERS360", -"PROVIDER_SOURCE_LONDON", -"PROVIDER_EVBOX", -"PROVIDER_JP_GOVERNMENT", -"PROVIDER_JP_MINISTRY_OF_THE_ENVIRONMENT", -"PROVIDER_YUMYUM", -"PROVIDER_HWW_AUSTRALIA", -"PROVIDER_CINERGY", -"PROVIDER_MTIME", -"PROVIDER_KULTUNAUT", -"PROVIDER_BLITZ", -"PROVIDER_PIA", -"PROVIDER_INTERPARK", -"PROVIDER_CINEMA_ONLINE", -"PROVIDER_BELBIOS", -"PROVIDER_MOVIESEER", -"PROVIDER_SODAMEDYA", -"PROVIDER_ATMOVIES", -"PROVIDER_HOTELBEDS", -"PROVIDER_VERICRED", -"PROVIDER_CIRRANTIC", -"PROVIDER_GOGO_LABS", -"PROVIDER_ELECTRIFY_AMERICA", -"PROVIDER_CMS_MPPUF", -"PROVIDER_DIGIROAD", -"PROVIDER_KONTEX_GEOMATICS", -"PROVIDER_NZ_GOVERNMENT", -"PROVIDER_NZ_LINZ", -"PROVIDER_NZ_DOC", -"PROVIDER_FASTNED", -"PROVIDER_DESTINY_CS", -"PROVIDER_IONITY", -"PROVIDER_EV_CONNECT", -"PROVIDER_PANPAGES", -"PROVIDER_ETECNIC", -"PROVIDER_VOLTA", -"PROVIDER_NISSAN_MEXICO", -"PROVIDER_BMW_GROUP_LATIN_AMERICA", -"PROVIDER_FEDERAL_ELECTRICITY_COMMISSION_MEXICO", -"PROVIDER_VOLVO_CARS_BRASIL", -"PROVIDER_CHARGE_AND_PARKING", -"PROVIDER_DEDUCE_TECHNOLOGIES", -"PROVIDER_SK_TELECOM", -"PROVIDER_ECO_MOVEMENT", -"PROVIDER_GOOGLE_GMS", -"PROVIDER_EASYWAY", -"PROVIDER_PHYSICIAN_COMPARE", -"PROVIDER_HOSPITAL_COMPARE", -"PROVIDER_ENDOLLA_BARCELONA", -"PROVIDER_BE_CHARGE", -"PROVIDER_ONE_NETWORK", -"PROVIDER_CARENAV_DUPLEX", -"PROVIDER_CARENAV_POI", -"PROVIDER_IN_GOVERNMENT", -"PROVIDER_SURVEY_OF_INDIA", -"PROVIDER_E_ON", -"PROVIDER_ELECTRIFY_CANADA", -"PROVIDER_GRIDCARS", -"PROVIDER_DRIVECO", -"PROVIDER_GREEN_ACTION_STUDIOS", -"PROVIDER_GREEN_ACTION_STUDIO", -"PROVIDER_EVINY", -"PROVIDER_MASTERCARD", -"PROVIDER_VATTENFALL", -"PROVIDER_VIETGIS", -"PROVIDER_UNITE", -"PROVIDER_NEOGY", -"PROVIDER_AMPUP", -"PROVIDER_LOOP", -"PROVIDER_ZEST", -"PROVIDER_EZVOLT", -"PROVIDER_JOLT", -"PROVIDER_CHARGESMITH", -"PROVIDER_PLUGO", -"PROVIDER_ELECTRIC_ERA", -"PROVIDER_FLO", -"PROVIDER_DIGITAL_CHARGING_SOLUTIONS", -"PROVIDER_ELECTRIC_PE" -], -"enumDeprecated": [ -false, -false, -true, -false, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -true, -false, -true, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -true, -true, -false, -true, -false, -true, -true, -false, -true, -false, -false, -false, -false, -true, -true, -true, -true, -true, -true, -false, -false, -false, -true, -false, -true, -false, -true, -true, -true, -false, -true, -true, -true, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -true, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"ABSTRACT The root of all provider types. This should never be present on an actual feature, but can be useful when calling InCategory.", -"not actually a legal value, used as sentinel", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730C2", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This is an internal *only* provider meant for sending wipeout requests to mapfacts.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Never rely on source infos with this provider to identify whether or not a feature is a Prominent Place! Instead, use the proper API, localsearch_clustering::QualityTierHelper::IsProminentPlace().", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Shopping Attributes Discovery", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"", -"The next new \"Google\" provider entries should be placed above.", -"UMBRELLA", -"", -"", -"", -"This is a testing provider for teams that wish to integrate with components of the Geo Data Infrastructure that require a valid provider. No production data should ever be sent using this provider.", -"", -"UMBRELLA", -"", -"", -"", -"UMBRELLA", -"0x1117F must not be used, since its range extends the PROVIDER_GOOGLE hierarchy.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Deprecated in favor of PROVIDER_GOOGLE_STRUCTURED_DATA (for attributes) and PROVIDER_GOOGLE_GEO_TIGER (for categories).", -"", -"0x1117FF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. aka Local AI.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"No data is obtained from this provider. It is only used to identify features that must be served on MapMaker.", -"", -"", -"", -"", -"", -"", -"0x1117FFF should not be used, since its range further extends the PROVIDER_GOOGLE hierarchy. Synthetically generated areas (sublocalities/neighborhoods/ postal codes/etc) based on dropped terms from approximate geocoding. More info on go/syntheticareas.", -"", -"", -"", -"Similar to Google Transit, a provider that aggregates positions of bicycle rental points that we have agreements with to show on maps", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"This range has been depleted. For new Ids see PROVIDER_GOOGLE_SUBRANGE above.", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"Small Scale International Boundary Lines", -"", -"NOAA", -"PGC", -"USDA", -"National Provider Identifier Registry", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"old name for PROVIDER_NAVIGO", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"See b/33687395", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"UMBRELLA", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"ABSTRACT", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"ABSTRACT", -"IBAMA", -"MMA", -"ANA", -"IBGE", -"FUNAI", -"DNIT", -"", -"", -"", -"", -"", -"ABSTRACT", -"Department of Cartography", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"0x12 and 0x120 are not to be used. OOO CET", -"ABSTRACT", -"Estonian Land Board", -"", -"ABSTRACT", -"Danish Geodata Agency", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"Ministry of land, infrastructure and transport, \uad6d\ud1a0\uad50\ud1b5\ubd80, Guktogyotongbu", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Use PROVIDER_FI_NATIONAL_ROAD_ADMINISTRATION.", -"", -"ABSTRACT", -"Land Information New Zealand", -"NZ Department of Conservation", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"UMBRELLA", -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Note: Next available value is 0x1275." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRankDetailsProto": { -"description": "This message is embedded within a FeatureProto. It has rank calculation details such as available rank signals and rank signal mixer used to compute final rank. For more details, see the Oyster Rank wiki page: http://wiki.corp.google.com/twiki/bin/view/Main/OysterRank", -"id": "GeostoreRankDetailsProto", -"properties": { -"signal": { -"description": "A list of signals. Each one is extracted separately by a SignalExtractor.", -"items": { -"$ref": "GeostoreRankSignalProto" -}, -"type": "array" -}, -"signalMixerType": { -"description": "The signal mixer that was used to calculate the rank.", -"enum": [ -"MIXER_INVALID", -"MIXER_MISSING", -"MIXER_ADDRESS_AREA", -"MIXER_ROUTE_SEGMENT_INTERSECTION", -"MIXER_POLITICAL_EUROPA", -"MIXER_POLITICAL_AREA", -"MIXER_POLITICAL", -"MIXER_COUNTRY_EUROPA", -"MIXER_COUNTRY_AREA", -"MIXER_COUNTRY", -"MIXER_LOCALITY", -"MIXER_LOCALITY_GEOWIKI", -"MIXER_LOCALITY_EUROPA", -"MIXER_LOCALITY_AREA", -"MIXER_RIVER", -"MIXER_LENGTH_WEBSCORE", -"MIXER_SKENERGY", -"MIXER_GEOCENTRE_GEOCODED_ADDRESS", -"MIXER_PLACERANK", -"MIXER_TRANSIT", -"MIXER_LOCALITY_EUROPA_AREA", -"MIXER_WEBSCORE", -"MIXER_LOCALITY_MAPDATA_SCIENCES", -"MIXER_SUBLOCALITY_MAPDATA_SCIENCES", -"MIXER_PEAK", -"MIXER_BUILDING", -"MIXER_RESERVATION", -"MIXER_AIRPORT", -"MIXER_AREA", -"MIXER_MANAGER", -"MIXER_TEST_1", -"MIXER_TEST_2", -"MIXER_TEST_3", -"MIXER_TEST_4", -"MIXER_TEST_5" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"DEPRECATED", -"", -"", -"DEPRECATED", -"", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"These should never be present on a feature.", -"", -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRankSignalProto": { -"description": "This message is embedded in the RankDetailsProto (below). It represents one rank signal, which is a floating point value estimating the Oyster Rank of the feature.", -"id": "GeostoreRankSignalProto", -"properties": { -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this signal." -}, -"rank": { -"description": "A value in the range [0, 1] estimating Oyster Rank according to this signal. Non-provider specific signals (e.g. SIGNAL_POPULATION) are interpreted by some common code in the ranking pipeline. Because of that, data providers should leave this field empty when setting such signals (so that the rank assignment can be uniform across all features regardless of contributing data providers). On the other hand, provider-specific signals (e.g. SIGNAL_ZENRIN_CITY_CATEGORY) are required to specify the rank field (it is not optional for them). That is because no code other than that of the provider itself will be able to fill in a meaningful value later on. We don't want clients to be reading from the raw_scalar / raw_string fields to interpret the data.", -"format": "float", -"type": "number" -}, -"rawScalar": { -"description": "The raw scalar value that was used to compute 'rank' above. The meaning of this attribute changes depending on the signal type.", -"format": "float", -"type": "number" -}, -"rawString": { -"description": "The raw string value that was used to compute 'rank' above. The meaning of this attribute changes depending on the signal type.", -"type": "string" -}, -"type": { -"enum": [ -"SIGNAL_UNKNOWN", -"SIGNAL_LENGTH", -"SIGNAL_AREA", -"SIGNAL_ADDRESS", -"SIGNAL_LISTING", -"SIGNAL_ROAD_PRIORITY", -"SIGNAL_POI_COUNT", -"SIGNAL_WEBSCORE", -"SIGNAL_PATHRADIUS_LENGTH_METERS", -"SIGNAL_PATHRADIUS_LENGTH_SEGMENTS", -"SIGNAL_PATHRADIUS_POPULARITY", -"SIGNAL_PEAK_ELEVATION_PROMINENCE", -"SIGNAL_ROAD_SEGMENT_COUNT", -"SIGNAL_POI_SCORE", -"SIGNAL_ATTRACTIONS_SCORE", -"SIGNAL_HAND_RANKED_LOCALITY_PROMINENCE", -"SIGNAL_POPULATION", -"SIGNAL_GDP", -"SIGNAL_EUROPA_CLASS", -"SIGNAL_RMF_SOURCE_RANK", -"SIGNAL_MDS_SOURCE_RANK", -"SIGNAL_MULTINET_SOURCE_RANK", -"SIGNAL_LOCALXML_MANUAL_RANK", -"SIGNAL_TRANSIT_LINE", -"SIGNAL_TRANSIT_TRAIN_DEPARTURE_COUNT", -"SIGNAL_TRANSIT_METRO_DEPARTURE_COUNT", -"SIGNAL_TRANSIT_BUS_DEPARTURE_COUNT", -"SIGNAL_TRANSIT_OTHER_DEPARTURE_COUNT", -"SIGNAL_TRANSIT_TRAIN_LINE_COUNT", -"SIGNAL_TRANSIT_METRO_LINE_COUNT", -"SIGNAL_TRANSIT_BUS_LINE_COUNT", -"SIGNAL_TRANSIT_OTHER_LINE_COUNT", -"SIGNAL_TRANSIT_STATION_LOCAL_RANK", -"SIGNAL_TRANSIT_STATION_GLOBAL_RANK", -"SIGNAL_ORION_LEVEL", -"SIGNAL_GEOCENTRE_ADDRESS_RANK", -"SIGNAL_GOOGLE_3DWAREHOUSE_RANK", -"SIGNAL_SKENERGY_CATEGORY", -"SIGNAL_GOOGLE_GEOWIKI_USER_RANK", -"SIGNAL_WIKIPEDIA_ARTICLES", -"SIGNAL_WIKIPEDIA_ARTICLES_IN_OFFICIAL_LANGUAGE", -"SIGNAL_KML_PLACEMARKS", -"SIGNAL_KML_SOURCES", -"SIGNAL_PANORAMIO_USERS", -"SIGNAL_GOOGLE_MAPSHOP_USERS", -"SIGNAL_GOOGLE_LOCALSEARCH_DIRECTORY_INFOS", -"SIGNAL_GOOGLE_MAPS_NAVBOOST_CLICKS", -"SIGNAL_GOOGLE_MAPS_NAVBOOST_CLICKTHROUGH_RATE", -"SIGNAL_GOOGLE_RBL_CLICKS", -"SIGNAL_GOOGLE_RBL_CLICK_FRACTION", -"SIGNAL_GOOGLE_AUTHORITYPAGE_PAGERANK", -"SIGNAL_GOOGLE_AUTHORITYPAGE_PAGERANK_CONFIDENCE", -"SIGNAL_GOOGLE_REVIEWS", -"SIGNAL_GOOGLE_WEB_QUERYVOL", -"SIGNAL_GOOGLE_WEBPAGE_REFERENCE_DOMAINS", -"SIGNAL_GOOGLE_LISTING_IMPRESSIONS", -"SIGNAL_GOOGLE_INFOWINDOW_VIEWS", -"SIGNAL_GOOGLE_DIRECTION_REQUESTS", -"SIGNAL_GOOGLE_HOMEPAGE_CLICKS", -"SIGNAL_GOOGLE_CHAIN_STORES", -"SIGNAL_FLICKR_USERS", -"SIGNAL_GOOGLE_LEANBACK_TOURS", -"SIGNAL_GOOGLE_LOCALSEARCH_PLACERANK", -"SIGNAL_WIKIPEDIA_WIKI_SCORE", -"SIGNAL_ZENRIN_CITY_CATEGORY", -"SIGNAL_ZENRIN_BUILDING_CLASS", -"SIGNAL_ZENRIN_PEAK_CLASS", -"SIGNAL_PLACE_INSIGHTS_LANDMARK", -"SIGNAL_PLACE_INSIGHTS_POPULARITY", -"SIGNAL_PLACE_INSIGHTS_PROMINENCE", -"SIGNAL_PLACE_INSIGHTS_APPROACHABILITY", -"SIGNAL_PLACE_INSIGHTS_TOTAL_ROAD_SEGMENT_USAGE" -], -"enumDeprecated": [ -false, -false, -false, -true, -true, -false, -true, -false, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -true, -false, -true, -false, -false, -true, -false, -true, -true, -true, -false, -false, -true, -false, -false, -false, -true, -false, -false, -false, -false, -false, -true, -false, -false, -false, -true, -true, -true, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"This is reserved as a sentinel value. It should never be used. An unknown signal.", -"Magnitude for one-dimensional features like segments.", -"Magnitude for features with polygonal geometry. Derived from the number of other features that use this feature as an address component.", -"DEPRECATED Derived from the number of local business listings that, once reverse-geocoded, have this feature as an address component.", -"DEPRECATED", -"This signal will be set on all routes and is derived by the value of the priority of all segments composing the route. It is a \"derived\" signal instead of a canonical one because it is not only about inheriting some child segment's priority. We actually take into consideration the priorities of all children segments to come up with a raw value for this signal. Derived from the number of POI that use this feature as an address component.", -"DEPRECATED", -"This signal derived from the number of documents in DocJoin, which contains keywords of the source feature. For each raw feature, we extract keywords from the feature's name and address. We search the keywords in DocJoin (now only 4B) to get the number of webpages which contains all the keywords of the feature, eg, for Quanjude in Beijing which is a famous restaurant in China, we consider Beijing and Quanjude as its keywords, the page containing both Beijing and Quanjude will be counted in. The number will be mapped by logarithm function into [0, 1]. This signal is based on a simple assumption: the more the name appears in webpage, the more famous it is. These signals are calculated by the Path Radius algorithm, using Pathfinder to figure out in how big a neighborhood this segment is used as a thoroughfare. The popularity is simply the fraction of all paths that use this segment.", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"Signal based on elevation and prominence values of peaks.", -"Signal based on the number of road segments referring a feature. The number could be obtained by either looking RELATION_CONTAINED_BY relation or counting children of the feature.", -"Signal based on placerank values of establishments that use this feature as an address or located within it (as determined by relations).", -"Signal based on placerank values of (tourist) attraction establishments that are related to this feature through containment.", -"Signal indicating a city of particular prominence independent of its other attributes. Currently only implemented for Japanese localities, primarily based on official government city statuses.", -"", -"", -"NOTE(jdkim): All rank signals below are provider specific ones, so we use this enum value to identify the enum range for provider specific signals.", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"", -"", -"", -"", -"", -"", -"", -"Score of a particular station in relation to the stations near it. E.g., when displaying list of nearby stations we could prioritize the higher ranked ones.", -"Score of a particular station in relation to all the stations globally. This is valuable for data cleanups, modeling, and proactive checks that could be targeted at the \"Top Stations\" to ensure high impact.", -"", -"DEPRECATED", -"DEPRECATED", -"Rank derived from feature popularity asserted by users. Higher rank values are almost always moderated to verify accuracy, hence reliable.", -"DEPRECATED", -"These are primarily used for computing Placerank, which is now signal 3200.", -"DEPRECATED", -"", -"", -"DEPRECATED", -"", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"", -"", -"DEPRECATED", -"", -"", -"", -"DEPRECATED", -"", -"", -"", -"", -"", -"DEPRECATED", -"", -"", -"Signal based on WIKI_SCORE, which is brought by DataSourceProvider::PROVIDER_WIKIPEDIA.", -"DEPRECATED Signal for buildings in Japan. This signal is a scale of the map this building label should be rendered.", -"DEPRECATED Signal for peaks in Japan. This signal is a scale of the map this peak label should be rendered.", -"DEPRECATED", -"Place Insights landmarkiness fundamental aspect. This score combines the popularity and prominence aspects below with the Kerouac tourist score. Details: go/plank-landmark", -"Place Insights popularity fundamental aspect. This score combines physical visits (Hulk) and direction searches. Details: go/plank-popularity", -"Place Insights prominence fundamental aspect. This score is based on WebRef and the number of photos of a place. Details: go/plank-prominence DEPRECATED: No longer populated, because the data sources were deprecated. Not yet marked as deprecated because some features might still contain the data.", -"Place Insights approachability fundamental aspect. This score indicates whether users can interact with a place without appointment. It is computed from the GCIDs of the place. Details: go/plank-approachability", -"Number of times a user drives the entire length of the segment according to snapped GLS tracks. This is obtained by aggregating the snapped GLS data generated by the traffic team. Design document: go/aggregated-road-traffic-mapfacts" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRawDataProto": { -"description": "A RawDataProto is a key-value pair that represents arbitrary source data from a particular provider. Raw data can be attached to features using their source_info field.", -"id": "GeostoreRawDataProto", -"properties": { -"key": { -"description": "The key associated with this data item. For source data in shape file format, this will typically be a column name. Keys need to be unique with respect to a particular data source (see DataSourceProto), but they do not need to be globally unique. You can look up the documentation for a key (e.g. a longer label and description) by following the source_id link of the parent SourceInfoProto, which takes you to a TYPE_DATA_SOURCE feature, and then looking up the corresponding RawMetadataProto object for this key in that feature's optional data_source field.", -"type": "string" -}, -"valueString": { -"description": "All data items are represented as strings, the logic being that it is easy to convert other data types to strings, and there is no need to access this data efficiently.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRawMetadataProto": { -"id": "GeostoreRawMetadataProto", -"properties": { -"conflationMethod": { -"description": "Method to use when conflating together RawDataProto values at the same key NB: If you add a new ConflationMethod, then you must add the corresponding logic to MergeRawData to conflate the RawDataProto values using this method.", -"enum": [ -"CONFLATION_PICK_FIRST_VALUE", -"CONFLATION_UNION_CSV", -"CONFLATION_SUM" -], -"enumDescriptions": [ -"Simply picks first value from the set of values to be conflated together. If target feature already has a value, then that is the value kept.", -"Parses all values as a comma-separated lists and takes the union of all elements from all lists (e.g. no duplicates) to create a new comma- separated list.", -"Parses all values as int32s and outputs their sum as the final value." -], -"type": "string" -}, -"description": { -"description": "Self-contained documentation about what this field represents and how its values are encoded.", -"type": "string" -}, -"key": { -"description": "The key being described.", -"type": "string" -}, -"label": { -"description": "A longer, human-readable name associated with this key. The label might be used in a data explorer tool, for example.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRectProto": { -"description": "A latitude-longitude rectangle, represented as two diagonally opposite points \"lo\" and \"hi\". The rectangle is considered to be a closed region, i.e. it includes its boundary. The latitude bounds must be in the range -90 to 90 degrees inclusive, and the longitude bounds must be in the range -180 to 180 degrees inclusive. Various cases include: - If lo == hi, the rectangle consists of a single point. - If lo.longitude > hi.longitude, the longitude range is \"inverted\" (the rectangle crosses the 180 degree longitude line). - If lo.longitude == -180 degrees and hi.longitude = 180 degrees, the rectangle includes all longitudes. - If lo.longitude = 180 degrees and hi.longitude = -180 degrees, the longitude range is empty. - If lo.latitude > hi.latitude, the latitude range is empty.", -"id": "GeostoreRectProto", -"properties": { -"hi": { -"$ref": "GeostorePointProto" -}, -"lo": { -"$ref": "GeostorePointProto" -} -}, -"type": "object" -}, -"GeostoreRegionSpecificNameProto": { -"description": "This protocol buffer supports a name per region per language, allowing it to represent the name of a given feature in different regions and languages. For example, the Persian Gulf has different English names in UAE versus Lebanon; each would need to be represented as distinct RegionSpecificNameProtos.", -"id": "GeostoreRegionSpecificNameProto", -"properties": { -"displayableAsAlternativeName": { -"description": "If true, this region specific name should be appended, in parentheses, to the appropriate name from FeatureProto.name, for the default rest of world behavior.", -"type": "boolean" -}, -"name": { -"$ref": "GeostoreNameProto", -"description": "Name to be used for this feature in a specific region and language." -}, -"regionCode": { -"description": "Region code (or other identifier) for the region.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRegulatedAreaProto": { -"description": "A collection of information that applies to a polygonal area.", -"id": "GeostoreRegulatedAreaProto", -"properties": { -"restriction": { -"description": "The set of restrictions that apply to a zone. These restrictions may limit the routability of every segment contained within the defined feature.polygon. Repeated restrictions are treated collectively as an OR meaning that segments in the zone are only routable if none of the restrictions apply. If any segments within the defined polygon should not have these restrictions applied, they must list this regulated area's feature id in their feature.exempt_regulated_area field.", -"items": { -"$ref": "GeostoreRestrictionProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreRelationProto": { -"description": "This message is embedded within a FeatureProto, and represents a geographic or logical relationship of that feature to some other feature. Note that some relation types are there purely for the purpose of grouping together other relation types. They are noted as ABSTRACT in comments. Other relation types are no longer supported / in use. They are noted as DEPRECATED in comments (and marked with the standard deprecated option, too). Other relation types are reserved for future use or just not intended for use at all, for various internal reasons. They are noted as RESERVED in comments. WARNING: Updates to this proto within a FeatureProto's related_feature field handled by standalone pipelines and are NOT atomic with regard to updates to the features being referenced; we do not guarantee that a given MapFacts snapshot will be consistent between this field and the related features.", -"id": "GeostoreRelationProto", -"properties": { -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this relation." -}, -"otherFeatureCountryCode": { -"description": "If and only if the other feature is of TYPE_COUNTRY, the 2-letter country code. This is the FLAG_COUNTRY_CODE_2 name of the country component.", -"type": "string" -}, -"otherFeatureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "The feature ID of the feature to which we're relating. WARNING: the related feature does not necessarily have a bound that encloses this feature, so in a bucketing MapReduce, you may not be able to follow all relationships. Relations that use strong references are annotated above but you can also refer to IsRelationStrong() in geostore/base/public/relation.h." -}, -"otherFeatureName": { -"description": "RESERVED", -"items": { -"$ref": "GeostoreNameProto" -}, -"type": "array" -}, -"otherFeatureTerritorialAdministrator": { -"description": "If and only if the other feature is of TYPE_DISPUTED_AREA, the territorial administrator found in its GeopoliticalAttachmentProto.administered_by field, if any. Since this string is copied exactly, it may be a 2-letter country code or another type of descriptive string.", -"type": "string" -}, -"otherFeatureType": { -"description": "The type of the feature to which we're relating.", -"format": "int32", -"type": "integer" -}, -"overlapFraction": { -"deprecated": true, -"description": "** DEPRECATED ** If relation is exactly RELATION_OVERLAPS but not any of its subcategories, overlap_fraction contains an estimate of the fraction of the geometry of this feature that intersects with the other feature, ranging from 0.0 to 1.0. Note that this is a rough estimate based on cell coverings, and may not be very accurate. In particular, values of 0.0 and 1.0 are possible, even though in principle they should not be.", -"format": "float", -"type": "number" -}, -"relation": { -"description": "The relationship of the feature that contains this RelationProto to the feature other_feature_id. Note the relation_is_reversed field below. Some relations imply weak references, other strong ones. Strong references are annotated above but you can also refer to IsRelationStrong() in geostore/base/public/relation.h.", -"enum": [ -"RELATION_OVERLAPS", -"RELATION_CONTAINED_BY", -"RELATION_EQUAL_TO", -"RELATION_POLITICAL_DEPRECATED", -"RELATION_CAPITAL_OF", -"RELATION_DISAMBIGUATED_BY", -"RELATION_NEIGHBOR_OF", -"RELATION_OPPOSITE_TO", -"RELATION_NEXT_TO", -"RELATION_RIGHT_OF", -"RELATION_LEFT_OF", -"RELATION_BEHIND", -"RELATION_IN_FRONT_OF", -"RELATION_SAME_BUILDING", -"RELATION_ABOVE", -"RELATION_BELOW", -"RELATION_NEAR", -"RELATION_ORGANIZATIONALLY_PART_OF", -"RELATION_DEPARTMENT_OF", -"RELATION_WORKS_AT", -"RELATION_INDEPENDENT_ESTABLISHMENT_IN", -"RELATION_ON_LEVEL", -"RELATION_OCCUPIES", -"RELATION_BUSINESS_LIFE_CYCLE", -"RELATION_BUSINESS_MOVED", -"RELATION_BUSINESS_REBRANDED", -"RELATION_MEMBER_OF_CHAIN", -"RELATION_AUTHORIZED_DEALER_FOR_CHAIN", -"RELATION_SUBSIDIARY_OF", -"RELATION_PRIMARILY_OCCUPIED_BY", -"RELATION_VARIATION", -"RELATION_HAS_VARIANT", -"RELATION_VARIANT_OF", -"RELATION_VARIANT_SIBLING", -"RELATION_CLIENT_DEFINED" -], -"enumDeprecated": [ -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -true, -true, -false -], -"enumDescriptions": [ -"In the real world, this feature's geographic extent intersects the other feature's, but does not contain it. Note that the presence or absence of a relation may contradict the actual geometry in the FeatureProto. In these cases, the client should assume that the geometry of the feature is imprecise. Certain pairs of features are considered disjoint even if the geometries overlap. Allowed source types: - TYPE_ANY (but with geometry or children) Allowed destination types: - TYPE_ANY (but with polygonal geometry)", -"In the real world, the geographical extent of the feature is contained by that of the other feature. Note that this relation implies overlap. As with overlap, the frame-of-reference is the real world, not necessarily the (imprecise) geometry in the FeatureProtos. There may be other semantic rules that govern the assignment of this relation. Two features cannot contain each other - instead they will have a RELATION_EQUAL_TO relation (see below). strong reference", -"In the real world, this feature's geographic extent is exactly equal to the other feature's geographic extent. Note that this relation implies containment and overlap.", -"DEPRECATED", -"This feature is the capital of the other feature. Allowed source types: - TYPE_POLITICAL Allowed destination types: - TYPE_POLITICAL (of higher political level) strong reference", -"ABSTRACT, RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"RESERVED", -"ABSTRACT", -"Used primarily to list co-existing departments of businesses such as universities, hospitals, department stores, etc. This is set from feature A to feature B when A is physically located within the boundaries of the compound feature occupied by B, AND A is a subsidiary or department of B (i.e. both have the same management/ownership). e.g. department -> hospital, department -> university Allowed source types: - TYPE_ESTABLISHMENT_POI Allowed destination types: - TYPE_ESTABLISHMENT_POI", -"Used to relate an individual practitioner (doctor, lawyer, etc.) with the business they work at (hospital, law firm, etc.) Allowed source types: - TYPE_ESTABLISHMENT_POI - TYPE_ESTABLISHMENT_SERVICE Allowed destination types: - TYPE_ESTABLISHMENT_POI", -"Used primarily for the \"mall directory\" use-case, this is set from feature A to feature B when A is physically located within the boundaries of the compound feature occupied by B AND A as an entity is independent of B, i.e. A and B have different management/ownership. e.g. store -> mall Allowed source types: - TYPE_ESTABLISHMENT_POI Allowed destination types: - TYPE_ESTABLISHMENT_POI", -"The feature that represents the level/floor the feature exists on. Allowed source types: - TYPE_CARTOGRAPHIC (at most 1 such relation) - TYPE_COMPOUND_SECTION (at most 1 such relation) - TYPE_ENTRANCE (at most 1 such relation) - TYPE_ESTABLISHMENT_POI - TYPE_INTERSECTION (at most 1 such relation) - TYPE_SEGMENT (at most 2 such relations) - TYPE_TERMINAL_POINT (at most 1 such relations) Allowed destination types: - TYPE_LEVEL strong reference", -"This feature physically occupies the other feature. Allowed source types: - TYPE_ESTABLISHMENT_POI Allowed destination types: - TYPE_COMPOUND Consumers should use the geometry of the target feature instead of the geometry of this feature. strong reference", -"ABSTRACT", -"e.g. \u201cespn zone\" moved from Times Square to Boston. The feature representing the business at the old location, or with the old service area, will refer to the one representing the business at the new location, or with the new service area, with this relation type. If a feature has this relation, its existence should show that the feature has closed and the close reason should be set to MOVED. Allowed source types: - TYPE_ESTABLISHMENT Allowed destination types: - TYPE_ESTABLISHMENT", -"e.g. \"Holiday Inn\" rebranded to \"Double Tree\". The feature representing \"Holiday Inn\" will refer to the feature representing \"Double Tree\" with this relation type. If a feature has this relation, its existence should show that the feature has closed and the close reason should be set to REBRANDED. Allowed source types: - TYPE_ESTABLISHMENT Allowed destination types: - TYPE_ESTABLISHMENT", -"Used by specific chain stores/franchises to refer to their parent chain. e.g. an individual Walmart store is a member of the \"Walmart\" chain. Transit stations can be members of multiple chains (transit agencies). All other establishments can have at most one explicit RELATION_MEMBER_OF_CHAIN relation. Subtypes may have different requirements. TYPE_TRANSIT_AGENCY is a temporarily allowed destination type while we migrate transit stations <-> transit agencies to a new representation (b/197741661) Allowed source types: - TYPE_ESTABLISHMENT Allowed destination types: - TYPE_BUSINESS_CHAIN - TYPE_TRANSIT_AGENCY strong reference", -"Indicates that a feature sells merchandise for a given brand chain (e.g. Honda vehicles). Source currently restricted to features with gcid:car_dealer. strong reference", -"Used by specific sub chain to refer to their parent chain. e.g. \"Walmart Pharmacy\" chain is a member of the \"Walmart\" chain. Currently this is only used by transit agencies. Talk to chains-eng team before using this relation type on other chains. A business chain can have at most one such relation. Allowed source types: - TYPE_BUSINESS_CHAIN - TYPE_TRANSIT_AGENCY Allowed destination types: - TYPE_BUSINESS_CHAIN - TYPE_TRANSIT_AGENCY strong reference", -"Indicates which logical entity is the main occupant of a compound. A compound can have at most one such relation, and the target feature for that relation must refer back to the compound via a RELATION_OCCUPIES relation. Allowed source types: - TYPE_COMPOUND Allowed destination types: - TYPE_ESTABLISHMENT_POI strong reference", -"ABSTRACT, DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"DEPRECATED", -"RESERVED" -], -"type": "string" -}, -"relationIsReversed": { -"description": "RESERVED", -"type": "boolean" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a relation. Never set in MapFacts." -} -}, -"type": "object" -}, -"GeostoreRestrictionGroupProto": { -"description": "A restriction group represents common properties of a set of restrictions on segments that are associated with the same underlying cause across a geographic region. Every segment referenced by this restriction group should have at least one restriction that refers backs to this restriction group. The standard feature properties have the following interpretations: name - A name that represents the name for this restriction group. kg_property - A reference back to a KG event in case this restriction group belongs to an event in KG. /geo/type/restriction_group/associated_event contains a mid to the associated event.", -"id": "GeostoreRestrictionGroupProto", -"properties": { -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this restriction group." -}, -"segment": { -"description": "FeatureId of all segments that have a RestrictionProto referring back to this RestrictionGroup.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreRestrictionProto": { -"description": "A restriction is an expression that limits when an action can be taken. Each restriction has a set of conditions. If all of the conditions are true, then the restriction applies and the action cannot be taken. For example, the restriction \"no turns 3-5pm except buses\" would have two conditions: \"time is 3-5pm\" and \"vehicle is not a bus\". If both of these conditions apply, the restriction is true, and the turn is prohibited. Multiple restrictions may apply to the same action. Clients handle this by always declaring RestrictionProto as a \"repeated\" element. The semantics of having multiple restrictions are that if any restriction applies, then the action cannot be taken. In other words, restrictions are OR-ed together. Putting all of this together, a set of RestrictionProtos can be interpreted as a bool expression in disjunctive normal form: (A and B) or (D and E and F) or (G and H) The action is prohibited if this expression is true. Note that a restriction with no conditions is always true, i.e. its action is always prohibited. NOTE: RestrictionProtos are often compared against one another (e.g. to check for duplicate/redundant restrictions) by canonicalizing them via GetCanonicalRestriction() in google3/geostore/base/internal/restriction.cc. Any fields that don't contribute to the definition of a restriction in the real world should be bundled with the annotative fields near the bottom and excluded in GetCanonicalRestriction(). LINT.IfChange", -"id": "GeostoreRestrictionProto", -"properties": { -"autonomousDrivingProducts": { -"description": "The restriction only applies in these specific autonomous driving product scenarios. NOTE: This should only be set on restrictions with TRAVEL_AUTONOMOUS_VEHICLE travel mode.", -"items": { -"enum": [ -"UNKNOWN", -"HD_L4", -"HD_L2", -"ADAS", -"AUTO_DRIVING_EXPERIENCE" -], -"enumDescriptions": [ -"Indicates that specific product requirements are unknown. It is best to assume that this applies to all autonomous driving scenarios.", -"HD Maps Level 4 product.", -"HD Maps Level 2 product.", -"General advanced driver assist scenario.", -"Geo Auto Driving Experience products (go/dx)." -], -"type": "string" -}, -"type": "array" -}, -"intersectionGroup": { -"$ref": "GeostoreFeatureIdProto", -"description": "Actually *required* if style=STYLE_IN_OUT, otherwise forbidden. Typically the intersection group type is artifact, but either artifact or logical groups can be used for STYLE_IN_OUT restrictions." -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this restriction." -}, -"restrictionGroup": { -"$ref": "GeostoreFeatureIdProto", -"description": "Restriction group this restriction belongs to." -}, -"restrictionToken": { -"description": "A token that can be used to identify the version of the data about this restriction.", -"type": "string" -}, -"schedule": { -"$ref": "GeostoreTimeScheduleProto", -"description": "When specified, restriction applies only at particular times (operating hours or times of the year: reversing lanes, seasonal roads, no left turns from 3-5pm Mon-Fri except holidays). Otherwise, restriction is in effect at all times." -}, -"scope": { -"description": "The scope that the restriction applies to. - SCOPE_DIRECTION means the segment/sibling pair is restricted in the direction of the segment that contains this RestrictionProto. For segment/sibling pairs with pedestrian facilities (and thus side-of-road routing) the RestrictionProto restricts both facilities in the direction of the segment (assuming that the restriction applies to travel mode TRAVEL_PEDESTRIAN). - SCOPE_SIDE means the RestrictionProto applies only to the side of road that the containing segment represents. That sibling's pedestrian facility is restricted in both directions. Schema constraints: - SCOPE_SIDE must be set if and only if travel_mode == [TRAVEL_PEDESTRIAN] and the segment containing the restriction has the pedestrian_facility field set and is not set to PEDESTRIAN_FACILITY_UNKNOWN. Such restrictions must have no subpath. - All other restrictions must have this field set to SCOPE_DIRECTION (whether explicitly or implicitly). This distinction is necessary for cases such as pedestrian facility on one-way segment/sibling roads. NOTE: This field only makes sense in the context of segment restrictions, not lane restrictions.", -"enum": [ -"SCOPE_DIRECTION", -"SCOPE_SIDE" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"style": { -"description": "Restriction Style defines the semantics of the subpath field, as defined above in the documentation of subpath.", -"enum": [ -"STYLE_CONTIGUOUS", -"STYLE_SINGLE", -"STYLE_TURN", -"STYLE_IN_OUT" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"subpath": { -"description": "\"subpath\" specifies the GeoStore segments that this restriction applies to, according to the restriction_style field below. Segments that are referenced by this subpath field also refer to this feature back via the same subpath field. For all styles of restriction, all segments in the subpath must have identical copies of the restriction. In other words, restrictions are duplicated along every segment in the subpath. Note that subpaths of length 1 do not have any purpose and are disallowed. Note that it is possible to represent restrictions either using STYLE_CONTIGUOUS, or depending on the length of the subpath, one of the more specific STYLE_SINGLE, STYLE_TURN, or STYLE_IN_OUT. New code should use the more specific alternatives if possible, as they support instant updates. For restriction_style == STYLE_CONTIGUOUS (the default): \"subpath\" can either be empty, for a single-segment restriction, or it specifies exactly the sequence of segments which this restriction applies to. The subpath may be used to specify a turn restriction (a subpath of length 2) or to prohibit more complex maneuvers. For example, when merging onto a road from the right-hand side it may not be possible to make an immediate left turn due to insufficient time to cross the intervening lanes or the presence of a physical barrier. This would be indicated by a subpath restriction of length 3 or more. For restriction_style == STYLE_SINGLE: The subpath field of the Restriction must be empty. The restriction applies only to the segment it is attached to. There must not be an intersection group specified. For restriction_style == STYLE_TURN: The subpath field of the Restriction must contain exactly two segments. The first is called the \"in_segment\", the second is the \"out_segment\". They must be contiguous, i.e. the end intersection of the in_segment is the start intersection of the out_segment. The restriction applies only to a direct maneuver from the in_segment to the out_segment. Other paths from the in_segment to the out_segment are not restricted. There must not be an intersection group specified. For restriction_style == STYLE_IN_OUT: The subpath field of the Restriction must contain exactly two segments. The first is called the \"in_segment\", the second is the \"out_segment\". Note that the two segments define paths, but may not actually be one. The end intersection of the in_segment must be in an intersection group which also contains the start intersection of the out_segment. The in- and out-segments are not required to be adjacent, but may be. Either way, the restriction applies to any path from the in_segment to the out_segment through the intersection group, not just direct turns. The intersection_group must be specified. Note that clients which read restrictions and need to know which paths are restricted by a given IN_OUT restriction must expand the IN_OUT restriction by finding all paths through the intersection group from the in_segment to the out_segment.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a restriction. Never set in MapFacts." -}, -"travelMode": { -"description": "Restriction applies only to the given travel modes. This field should always be set, but may be missing in old data. WARNING: Restrictions with no travel modes are DEPRECATED. Historically, no travel modes has meant \"all travel modes\", except they didn't really even mean that, because Pathfinder would use a complex set of heuristics to interpret the \"correct\" travel modes. Pathfinder currently (last updated August 2013) has heuristics to cope with incomplete data that reduce or extend application of the specified restrictions to pedestrians or bicycles. We are actively working to remove these heuristics and replace them with explicit, correct travel modes in the data. See b/8746491.", -"items": { -"enum": [ -"TRAVEL_ANY", -"TRAVEL_MOTOR_VEHICLE", -"TRAVEL_AUTO", -"TRAVEL_CARPOOL", -"TRAVEL_MOTORCYCLE", -"TRAVEL_BUS", -"TRAVEL_TRUCK", -"TRAVEL_DELIVERY", -"TRAVEL_TAXI", -"TRAVEL_EMERGENCY", -"TRAVEL_THROUGH_TRAFFIC", -"TRAVEL_AUTONOMOUS_VEHICLE", -"TRAVEL_PEDESTRIAN", -"TRAVEL_BICYCLE" -], -"enumDescriptions": [ -"ABSTRACT", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"type": { -"description": "LINT.ThenChange(//depot/google3/google/geo/entities/v1/fields/segment.proto) clang-format on The type of restriction. This is not a condition, but rather tells you what kind of restriction it is. This field should always be set.", -"enum": [ -"RESTRICTION_TRAVEL_RESTRICTED", -"RESTRICTION_ILLEGAL", -"RESTRICTION_PHYSICAL", -"RESTRICTION_LOGICAL", -"RESTRICTION_GATE", -"RESTRICTION_CONSTRUCTION", -"RESTRICTION_SEASONAL_CLOSURE", -"RESTRICTION_PRIVATE", -"RESTRICTION_WRONG_WAY", -"RESTRICTION_TERMINAL", -"RESTRICTION_PAYMENT_REQUIRED", -"RESTRICTION_TOLL_BOOTH", -"RESTRICTION_USAGE_FEE_REQUIRED", -"RESTRICTION_ENTRANCE_FEE_REQUIRED", -"RESTRICTION_VIGNETTE_REQUIRED", -"RESTRICTION_ADVISORY", -"RESTRICTION_HIGH_CRIME", -"RESTRICTION_POLITICALLY_SENSITIVE", -"RESTRICTION_DISTURBED_BY_MAINTENANCE", -"RESTRICTION_CHECKPOINT", -"RESTRICTION_REGION_SPECIFIC" -], -"enumDescriptions": [ -"Travel or maneuver is prohibited or extremely limited. This type's value may be used for restrictions which do not fall into a more specific subcategory, but the effect is that travel or maneuver is prohibited.", -"Travel or maneuver is prohibited due to traffic regulations. Includes restrictions due to explicit road markings. E.g., U-turns prohibited in marked intersections. NOTE: In the event that a restriction by regulations is enforced by a physical barrier, use RESTRICTION_PHYSICAL instead.", -"A permanent physical barrier prevents maneuver. This restriction type may not be used for restrictions with empty subpaths.", -"Maneuver is prohibited based on logical inference from other restrictions on segments or lanes. This restriction type may not be used for restrictions with empty subpaths. DEPRECATED", -"A gate or other moveable barrier prevents direct access. NOTE: Travel may still be allowed in certain circumstances. Use RESTRICTION_PRIVATE in lieu of RESTRICTION_GATE for segments that may still be routed on in certain circumstances. DEPRECATED", -"Road is closed due to road maintenance (e.g. repaving, adding more lanes) and cannot be used for routing.", -"Travel is prohibited for the duration of some season. E.g., due to ice/snow in the winter.", -"Road has signage discouraging or prohibiting use by the general public. E.g., roads with signs that say \"Private\", or \"No trespassing.\" NOTE: Use this type if travel is still be allowed in certain circumstances (e.g., legitimate visits to a place reachable by restricted roads only). Use RESTRICTION_ILLEGAL if travel is forbidden under all circumstances. This restriction type may not be used for restrictions with non-empty subpaths.", -"Travel over this segment is prohibited because of signage indicating one-way directionality in the opposite direction. This restriction type may not be used for restrictions with non-empty subpaths. Travel over this segment should be avoided at all costs unless the destination is on the segment. This restriction type may be used only for restrictions of STYLE_SINGLE.", -"RESERVED", -"", -"This restriction type may be used only for restrictions of STYLE_TURN, or STYLE_CONTIGUOUS with a subpath of length 2. The intersection at which the turn is restricted is the place of the toll booth.", -"Traveling on the associated subpath requires a fee to be paid.", -"Similar to RESTRICTION_USAGE_FEE_REQUIRED but instead of a fee for the usage, the fee is required for entering a special area (e.g. an amusement park). Also in contrast to RESTRICTION_USAGE_FEE_REQUIRED this restriction is supposed to be applied only to the segments that enter into the isolated restricted area from the outer world. Therefore there are two important points to consider when modeling data this way: 1) Every path that (exactly once) enters the restricted area should have exactly one such restriction (it does not matter whether it is a segment or subpath restriction). 2) All entrances have to be modeled consistently. It is recommended that the restriction is consistently applied to paths entering the area and not applied to paths leaving the area. NOTE: The above points are important because an additive penalty is applied for each individual restriction that appear on a path.", -"Similar to RESTRICTION_USAGE_FEE_REQUIRED but models a specific type of road tax called a 'vignette', used in certain European countries. Traveling on the associated subpath requires the driver or vehicle to have paid the road tax, documented by receipt of a physical or electronic vignette.", -"Restrictions in the RESTRICTION_ADVISORY category refer to paths where travel is permitted, but may not be prudent or is cumbersome. Since this is often a matter of opinion, clients may choose not to make use of this information or not display it directly to the user.", -"", -"", -"Segments with RESTRICTION_DISTURBED_BY_MAINTENANCE indicate that the road undergoes maintenance, however (unlike RESTRICTION_CONSTRUCTION) the segment will be used for routing.", -"An inland location where travelling is expected to be interrupted by authorities to inspect passengers and/or goods. This restriction type may only be used for restrictions with style STYLE_SINGLE.", -"Restrictions with the RESTRICTION_REGION_SPECIFIC category signify restrictions that are unique to a particular region, cannot currently be modeled through RestrictionProto, and/or have semantics that differ if in a particular region (such as car emission restrictions). These restrictions are specified on their restriction group, a traffic zone. Evaluating applicability of these restrictions requires an additional library located in the geostore/base/internal/specialized_restriction/ directory. That library controls the application of the restriction and will generally disregard data on this RestrictionProto. For example, go/cabrio-restriction-schema specifies that this proto should set TRAVEL_MOTOR_VEHICLE because other values have no effect; likewise, TimeScheduleProto has no effect. In truth this restriction's only job is to carry a restriction_group reference." -], -"type": "string" -}, -"vehicleAttributeFilter": { -"$ref": "GeostoreVehicleAttributeFilterProto", -"description": "The restriction only applies to vehicles that meet all of the attributes defined here. If this is empty, it does not affect the scope of the restriction." -} -}, -"type": "object" -}, -"GeostoreRightsStatusProto": { -"description": "Proto used to represent rights for FeatureProto. See go/geo-rights for more details. NOTE: Use google3/geostore/provenance/public/rights.h or google3/java/com/google/geostore/provenance/rights/Rights.java instead of accessing this proto directly.", -"id": "GeostoreRightsStatusProto", -"properties": { -"fieldWithRights": { -"items": { -"$ref": "GeostoreFieldWithRightsProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreRoadConditionalProto": { -"description": "A RoadConditionalProto defines conditions that affect when the road traversal information is applicable.", -"id": "GeostoreRoadConditionalProto", -"properties": { -"timeSchedule": { -"$ref": "GeostoreTimeScheduleProto", -"description": "Specifies what times the information is applicable. This can be specific times (3-5 PM) or days of the week (Mon - Fri), as well as more general times like school hours, dusk to dawn, etc. If no value is set, the restriction is applicable at all times." -}, -"vehicleAttribute": { -"$ref": "GeostoreVehicleAttributeFilterProto", -"description": "Additional attributes that apply to the applied vehicle types." -}, -"vehicleType": { -"description": "Restrictions applying to specific types of vehicles.", -"items": { -"enum": [ -"UNKNOWN", -"ANY", -"CAR", -"MOTORCYCLE", -"TRUCK", -"BUS" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in MapFacts.", -"", -"", -"", -"Note: The exact definition of a truck varies by city, but it usually refers to vehicles with three or more axles. This value does not necessarily apply to all commercial vehicles or vehicles colloquially referred to as trucks (eg. pickup trucks).", -"" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreRoadMonitorProto": { -"description": "A road monitor is a device that observes traffic for road violations like speeding or running a red light. These are modeled within MapFacts so that navigation services can warn users when they drive along road segments that are monitored.", -"id": "GeostoreRoadMonitorProto", -"properties": { -"monitoredRoad": { -"description": "The TYPE_ROAD segment features that this road monitor may observe.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreRoadSignComponentProto": { -"description": "Below is some horrible ASCII art and a description of the components of a road sign. +-------------------+ | A11 E50 Paris | | Chartres | +-------------------+ This sign would be composed of four components (all of them text components, the only option we support for now). The three in the first row would all have a \"major_position\" of zero. Their \"minor_position\" values would be zero for \"A11\", one for \"E50\", and two for \"Paris\". The component in the second row would have \"major_position\" value of one. This message provides the details of a single component of a road sign. A component defines its position within a sign, its type, and its content.", -"id": "GeostoreRoadSignComponentProto", -"properties": { -"featureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "The id of the feature referred to by this component, typically the route or locality feature this sign points towards. In the ASCII art example above, this field would contain the id for the routes A11 and E50 and the localities Chartres and Paris in the corresponding component." -}, -"featureType": { -"description": "The type of the feature referred to by this component. If feature_id is specified type of that feature should be the same as this field.", -"format": "int32", -"type": "integer" -}, -"majorPosition": { -"description": "This is the \"major\" position of this component within the set of components that make up a sign. This number can be thought of as the \"row\" of the sign on which the component appears, but no guarantees are made that there is a one-to-one mapping between \"major_position\" and the rows of information on the actual sign being modeled. A \"major_position\" value of zero would indicate that the component is near the top of the sign.", -"format": "int32", -"type": "integer" -}, -"minorPosition": { -"description": "This is the position of a component within the components of a sign that share a common \"major_position\". It can be though of as the \"column\" of the component, but like \"major_position\", no guarantees are made regarding its mapping to reality. For data sources that don't provide enough information to determine a component's major and minor positions, major position should be populated and minor position should not be present. A \"minor_position\" value of zero would indicate that the component is near the \"beginning\" of the sign. In countries where signs are read from left to right, \"minor_position\" zero would be near the left side of the sign.", -"format": "int32", -"type": "integer" -}, -"routeDirection": { -"description": "The direction of traffic for the referenced TYPE_ROUTE feature.", -"enum": [ -"DIRECTION_NONE", -"DIRECTION_NORTH", -"DIRECTION_EAST", -"DIRECTION_SOUTH", -"DIRECTION_WEST", -"DIRECTION_NORTHEAST", -"DIRECTION_NORTHWEST", -"DIRECTION_SOUTHEAST", -"DIRECTION_SOUTHWEST", -"DIRECTION_INNER", -"DIRECTION_OUTER" -], -"enumDescriptions": [ -"DIRECTION_NONE value is reserved and only present there to avoid using a meaningful entry as the default value. RESERVED", -"", -"", -"", -"", -"", -"", -"", -"", -"Inner/Outer loop. Short names are not supported. The actual direction depends on the local driving rule (right-handed vs. left-handed). TODO(b/291302506) If you need an explicit loop direction or a road sign uses clockwise/counterclockwise, don't use INNER/OUTER but consider adding DIRECTION_CLOCKWISE and DIRECTION_COUNTERCLOCKWISE instead.", -"" -], -"type": "string" -}, -"semanticType": { -"description": "The semantic type of sign.", -"enum": [ -"ROAD_SIGN_SEMANTIC_TYPE_UNSPECIFIED", -"PRIORITY", -"PRIORITY_STOP", -"PRIORITY_STOP_AHEAD", -"PRIORITY_YIELD", -"PRIORITY_YIELD_AHEAD", -"SPEED_LIMIT", -"SPEED_LIMIT_START", -"SPEED_LIMIT_START_MAX", -"SPEED_LIMIT_START_MIN", -"SPEED_LIMIT_START_SCHOOL", -"SPEED_LIMIT_START_ZONE", -"SPEED_LIMIT_START_ADVISORY", -"SPEED_LIMIT_START_AHEAD", -"SPEED_LIMIT_END", -"SPEED_LIMIT_END_MAX", -"SPEED_LIMIT_END_MIN", -"SPEED_LIMIT_END_SCHOOL", -"SPEED_LIMIT_END_ZONE", -"RESTRICTION", -"RESTRICTION_TURN", -"RESTRICTION_TURN_NO_RIGHT", -"RESTRICTION_TURN_NO_LEFT", -"RESTRICTION_TURN_NO_U", -"RESTRICTION_TURN_NO_THRU", -"RESTRICTION_MOVEMENT", -"RESTRICTION_MOVEMENT_PASSING", -"RESTRICTION_ACCESS", -"RESTRICTION_ACCESS_NO_ENTRY", -"WARNING", -"WARNING_CROSSING", -"WARNING_CROSSING_CHILDREN", -"WARNING_CROSSING_PEDESTRIAN", -"WARNING_CROSSING_RAILROAD", -"WARNING_CROSSING_SCHOOL", -"WARNING_ROAD_WORK", -"INFO", -"INFO_BOUNDARY", -"INFO_BOUNDARY_BUILT_UP_AREA", -"INFO_BOUNDARY_BUILT_UP_AREA_START", -"INFO_BOUNDARY_BUILT_UP_AREA_END", -"INFO_BOUNDARY_RESIDENTIAL", -"INFO_BOUNDARY_RESIDENTIAL_START", -"INFO_BOUNDARY_RESIDENTIAL_END", -"INFO_TRANSIT", -"AUXILIARY", -"AUXILIARY_ANIMAL", -"AUXILIARY_DIRECTION", -"AUXILIARY_DISTANCE", -"AUXILIARY_TIME", -"AUXILIARY_WEATHER", -"AUXILIARY_WEIGHT", -"AUXILIARY_VEHICLE" -], -"enumDescriptions": [ -"clang-format off Default value.", -"Signs to control outbound traffic by yielding to other roads or stopping.", -"", -"", -"", -"", -"Speed limit signs indicating start and end of min, max, advisory speed.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Restriction signs to control traffic, turns, and other traffic related activities such as parking.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Various non-regulatory warning signs, usually used to slow traffic down for road work and other conditions like curved roads or crossings. Does not include speed limit related signs, which have their own category.", -"", -"", -"", -"", -"", -"", -"Signs providing information about the area, nearby POIs, or other items of interest to readers.", -"", -"Signs placed when entering/exiting built-up areas to inform people. Definition of build-up area can differ regionally, captured by TYPE_BUILT_UP_AREA features in MapFacts (go/buas for more details).", -"", -"", -"Signs placed entering/exiting dense residential neighborhoods where pedestrians and children might be present on or near the road often.", -"", -"", -"Transit signs that help users enter or navigate to transit locations inside buildings and potentially out on the roads too. These signs are more common inside transit stations, airports, and other complex multitenant buildings.", -"Auxiliary signs that might not mean much by themselves, but they provide context combined with other signs. They can be text or symbolic addendums. For example, a sign can have time info e.g. (9AM-5PM), meaning that the sign is only applicable in the aforementioned time window.", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"text": { -"$ref": "GeostoreNameProto", -"description": "If this sign component is of type \"TYPE_TEXT\", this field contains the text of the component. A NameProto is used to allow language and flags to be associated with the text." -} -}, -"type": "object" -}, -"GeostoreRoadSignProto": { -"description": "A RoadSignProto holds the details of a road sign. Currently this is simply a list of the items that appear on the sign and their relative position.", -"id": "GeostoreRoadSignProto", -"properties": { -"component": { -"description": "The list of components for a single road sign. A sign may be composed of multiple components, each with its own position and content.", -"items": { -"$ref": "GeostoreRoadSignComponentProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreRouteAssociationProto": { -"description": "This protocol buffer holds metadata about the association between a segment and a route.", -"id": "GeostoreRouteAssociationProto", -"properties": { -"displayPreference": { -"description": "clang-format on", -"enum": [ -"DISPLAY_PREFERRED", -"DISPLAY_BEST", -"DISPLAY_OK", -"DISPLAY_HIDE" -], -"enumDescriptions": [ -"These routes should be displayed along this segment.", -"Among the preferred routes, this is the best one to display. There can be at most one of these per segment.", -"These are also valid route names to display, but clients should choose PREFERRED routes if available.", -"These routes should not be displayed." -], -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the route association." -}, -"route": { -"$ref": "GeostoreFeatureIdProto", -"description": "Identifies the route feature to which this metadata applies. This is one of the routes the segment refers to via the SegmentProto.route field." -}, -"routeDirection": { -"description": "The direction of the TYPE_ROUTE feature in this route association. A small number of countries (mostly just the United States, Mexico, and Canada) use directional routes. For example, in the United States highway US-1 is referred to as US-1 North or US-1 South on the sides where flow of traffic moves in those directions.", -"enum": [ -"DIRECTION_NONE", -"DIRECTION_NORTH", -"DIRECTION_EAST", -"DIRECTION_SOUTH", -"DIRECTION_WEST", -"DIRECTION_NORTHEAST", -"DIRECTION_NORTHWEST", -"DIRECTION_SOUTHEAST", -"DIRECTION_SOUTHWEST", -"DIRECTION_INNER", -"DIRECTION_OUTER" -], -"enumDescriptions": [ -"DIRECTION_NONE value is reserved and only present there to avoid using a meaningful entry as the default value. RESERVED", -"", -"", -"", -"", -"", -"", -"", -"", -"Inner/Outer loop. Short names are not supported. The actual direction depends on the local driving rule (right-handed vs. left-handed). TODO(b/291302506) If you need an explicit loop direction or a road sign uses clockwise/counterclockwise, don't use INNER/OUTER but consider adding DIRECTION_CLOCKWISE and DIRECTION_COUNTERCLOCKWISE instead.", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreRouteProto": { -"description": "A route is a collection of segments that forms a logical group - usually a named road or highway. Segments can belong to more than one route, and the segments of one route may be a subset of the segments of another route (e.g. I-5 N is a subset of I-5). Segments in the collection that define the route do not need to constitute a single uninterrupted line, there can be disconnects. The standard feature properties are interpreted as follows: name - Routes should have one or more names. (While unnamed roads certainly exist in the real world, we choose not to create route features for such roads. Instead, the unnamed segments are merely not part of any route.) address - This should always be empty. type - Specifies a particular route subtype, see feature.proto. point - This should always be empty. polyline - This should always be empty. polygon - This should always be empty. child - The pairs of segments that belong to this route (a given route should always reference segments in both travel directions).", -"id": "GeostoreRouteProto", -"properties": { -"childType": { -"description": "The feature type of the route children. Should be set if and only if all children are of the same feature type.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreSchoolDistrictProto": { -"description": "This protocol buffer holds school district specific attributes for features of TYPE_SCHOOL_DISTRICT.", -"id": "GeostoreSchoolDistrictProto", -"properties": { -"type": { -"enum": [ -"TYPE_UNIFIED", -"TYPE_ELEMENTARY", -"TYPE_SECONDARY" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSegmentPathProto": { -"description": "A segment path describes a path through a short set of segments. The segment path can be used for any purpose. At the moment, only TYPE_ROAD_SIGN features can have associated segment paths: The segment path lists the segments that refer to the sign. These are the segments for which the sign is applicable. The sign's physical location is independent of the segments in the path.", -"id": "GeostoreSegmentPathProto", -"properties": { -"subpath": { -"description": "Specifies a sequence of feature ids of GeoStore segments. The feature ids are ordered. The path \"AB\" is not the same as the path \"BA\". The segments along the path are assumed to be connected via the appropriate intersections. The segment features that are referenced by this subpath refer to this feature back via the road_sign field in segment proto extension.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreSegmentProto": { -"description": "--------------------------------------------------------------------------- WARNING - if you add new fields to SegmentProto (or to other protos used by SegmentProto), you need to: - ensure that the ShortSegmentsMerger class (in geostore/tools/internal/mr-mergesegments.cc) is aware of them, otherwise the new fields will be discarded randomly. - consider whether they should be cleared in the ClearFeature() function (in maps/render/process-high-priority-roads.cc) if they are irrelevant for rendering high priority roads at far-out zoom levels. - update the test cases that ensure these two packages know all the SegmentProto fields in both mr-mergesegments_test.cc and maps/render/process-high-priority-roads_test.cc or you will break the VersaTile build. ---------------------------------------------------------------------------", -"id": "GeostoreSegmentProto", -"properties": { -"advisoryMaximumSpeed": { -"items": { -"$ref": "GeostoreAppliedSpeedLimitProto" -}, -"type": "array" -}, -"altitude": { -"description": "RESERVED", -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -}, -"avgSpeedKph": { -"description": "The average speed that should be expected along this route under normal conditions, in kilometers per hour. (Hopefully we'll replace this with something a lot more sophisticated.)", -"format": "float", -"type": "number" -}, -"avgSpeedKphMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the average speed." -}, -"barrier": { -"description": "clang-format on", -"enum": [ -"BARRIER_NONE", -"BARRIER_PRESENT", -"BARRIER_LEGAL", -"BARRIER_PHYSICAL" -], -"enumDescriptions": [ -"Some barrier which prevents turns in the middle of a segment, but the details are not known (or the tester doesn't care to distinguish between legal and physical barriers).", -"", -"An example of this in the United States would be a pair of double-yellow lines. This pattern indicates that all turns are legally prohibited.", -"A concrete strip, island, planter, or other physical barrier. This category is also used when we model the roadways are separated." -], -"type": "string" -}, -"barrierMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the barrier." -}, -"bicycleFacility": { -"description": "clang-format on", -"enum": [ -"BICYCLE_FACILITY_SEPARATE_TRAIL", -"BICYCLE_FACILITY_PEDESTRIAN_PATH", -"BICYCLE_FACILITY_BIKE_FRIENDLY_PEDESTRIAN_PATH", -"BICYCLE_FACILITY_SHARED_ROAD", -"BICYCLE_FACILITY_BIKE_LANE", -"BICYCLE_FACILITY_BIKE_LANE_WITH_PEDESTRIAN_PATH", -"BICYCLE_FACILITY_WIDE_SHOULDER", -"BICYCLE_FACILITY_SHARROW", -"BICYCLE_FACILITY_SHARED_ROAD_WITH_PEDESTRIAN_PATH" -], -"enumDescriptions": [ -"This segment has a detached trail or path running alongside it, suitable for the use of cyclists. When a segment has this attribute, there should never be a distinct segment for the trail, because the road and trail are part of the same logical segment. In other words, the trail is modeled by this attribute on its associated road, rather than by a separate segment.", -"This segment has a sidewalk or other pedestrian pathway that permits bicycles.", -"This segment has a bike-friendly (e.g., wide) sidewalk or other pedestrian pathway.", -"This segment allows bicycles to share the road with motor vehicles. It may or may not provide special accommodation for cyclists.", -"This segment has a striped bicycle lane.", -"This segment has a striped bicycle lane, and also has a sidewalk or other pedestrian pathway that permits bicycles.", -"This segment has a wide shoulder or curb lane.", -"This segment has pavement markings to make motorists aware of bicycles in a shared lane.", -"This segment allows bicycles to share the road with motor vehicles, and also has a sidewalk or other pedestrian pathway that permits bicycles." -], -"type": "string" -}, -"bicycleSafety": { -"enum": [ -"BICYCLE_SAFETY_RECOMMENDED", -"BICYCLE_SAFETY_NEUTRAL", -"BICYCLE_SAFETY_CAUTION" -], -"enumDescriptions": [ -"This segment is explicitly recommended as suitable for biking, for instance by a sign, by a city bike map, or by a user.", -"This segment is similarly suitable for biking as nearby segments with the same 'bicycle_facility'.", -"Cyclists should use extra caution on these segments, as they may be inferior in suitability for biking." -], -"type": "string" -}, -"condition": { -"enum": [ -"CONDITION_GOOD", -"CONDITION_POOR" -], -"enumDescriptions": [ -"", -"" -], -"type": "string" -}, -"conditionMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the condition." -}, -"constructionBeginDate": { -"$ref": "GeostoreDateTimeProto", -"description": "If known, the date that construction is scheduled to begin." -}, -"constructionEndDate": { -"$ref": "GeostoreDateTimeProto", -"description": "If known, the date that construction is scheduled to end." -}, -"constructionStatus": { -"enum": [ -"CONSTRUCTION_PLANNED", -"CONSTRUCTION_STARTED", -"CONSTRUCTION_COMPLETE", -"CONSTRUCTION_CLOSED_FOR_MAINTENANCE", -"CONSTRUCTION_DISTURBED_BY_MAINTENANCE" -], -"enumDeprecated": [ -false, -false, -false, -true, -true -], -"enumDescriptions": [ -"", -"", -"", -"", -"" -], -"type": "string" -}, -"constructionStatusMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the construction status." -}, -"covered": { -"description": "Whether the segment is covered by a roof etc. If this field is missing, the status is unknown.", -"type": "boolean" -}, -"distanceToEdge": { -"description": "Average distance between the segment's polyline and edge of the road on this side in meters. It need not be equal to the sum of width of all lanes in this direction. This width includes on-street bicycle lanes but excludes off-street lanes such as sidewalks. The edge of the road is the rightmost edge for segments in right side driving countries and leftmost edge for left side driving countries. Width of the road is sum of this and sibling's distance_to_edge.", -"format": "float", -"type": "number" -}, -"distanceToEdgeMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for distance_to_edge." -}, -"edgeFollowsSegmentBeginFraction": { -"description": "These indicate for what portion of the segment does the outer curb of the segment follow the segment polyline - i.e., where do the sweep curves connect along the outer curb. If unspecified, may be assumed to be equal to lane retraction, preferring outermost lane.", -"format": "float", -"type": "number" -}, -"edgeFollowsSegmentEndFraction": { -"format": "float", -"type": "number" -}, -"elevation": { -"description": "clang-format on", -"enum": [ -"ELEVATION_NORMAL", -"ELEVATION_BRIDGE", -"ELEVATION_TUNNEL", -"ELEVATION_SKYWAY", -"ELEVATION_STAIRWAY", -"ELEVATION_ESCALATOR", -"ELEVATION_ELEVATOR", -"ELEVATION_SLOPEWAY", -"ELEVATION_MOVING_WALKWAY" -], -"enumDescriptions": [ -"", -"", -"", -"A skyway is a road that is raised on supports for an extended length.", -"Sample path description: \"Go up/down the stairs.\" PedestrianGradeCategory provides information about \"up/down\".", -"Sample path description: \"Go up/down the escalator.\" PedestrianGradeCategory provides information about \"up/down\".", -"Sample path description: \"Take the elevator up/down to the xth floor.\" PedestrianGradeCategory provides information about \"up/down\".", -"Sample path description: \"Ascend/descend the slope\" PedestrianGradeCategory provides information about \"ascend/descend\".", -"Sample path description: \"Get on the moving walkway.\"" -], -"type": "string" -}, -"elevationMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the elevation." -}, -"endpoint": { -"description": "clang-format on", -"enum": [ -"ENDPOINT_UNKNOWN", -"ENDPOINT_UNRESTRICTED", -"ENDPOINT_UNCONTROLLED", -"ENDPOINT_STOP_SIGN", -"ENDPOINT_ALL_WAY_STOP", -"ENDPOINT_TRAFFIC_LIGHT", -"ENDPOINT_THREE_WAY", -"ENDPOINT_FLASHING_RED", -"ENDPOINT_FLASHING_YELLOW", -"ENDPOINT_YIELD", -"ENDPOINT_MERGE", -"ENDPOINT_ROUNDABOUT", -"ENDPOINT_RAILROAD_CROSSING", -"ENDPOINT_NO_EXIT", -"ENDPOINT_WRONG_WAY", -"ENDPOINT_TOLL_BOOTH" -], -"enumDescriptions": [ -"", -"This category is used in cases where traffic on this segment always has the right-of-way (e.g. passing a side street, fork in road).", -"", -"", -"", -"", -"Three-way in this direction", -"Flashing red light (stop)", -"Flashing yellow light (yield)", -"Traffic on this segment has to give way to oncoming traffic, but does not necessarily have to come to a full stop. This category is used where there exists a physical yield sign and/or road painting (triangles), but also where circumstances and applicable local traffic laws require traffic on such segment to give way (e.g. coming from a dirt road onto a paved road).", -"", -"This segment leads to a roundabout entrance. Note this category is not used for segments themselves inside a roundabout. See USAGE_ROUNDABOUT for these.", -"", -"This category denotes a dead-end. Useful to explicitly exclude connections to other data-sets, for example at country borders and building entrances in countries where the indoor and outdoor routing networks are provided by different providers.", -"DEPRECATED Use RESTRICTION_WRONG_WAY", -"This segment directly ends at a toll booth. The intersection capturing these segments represents the location of the toll booth. If it has a name, this is captured in the TYPE_INTERSECTION_GROUP that contains the intersection." -], -"type": "string" -}, -"endpointMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the endpoint." -}, -"gradeLevel": { -"description": "Detailed information about grade levels along the segment. If a GradeLevelProto is not present for any point (index) along the segment, the default grade level is zero. In between two points (indexes), the grade level of the segment is taken to be the max of the grade levels on either side of it. See gradelevel.proto for semantics of repeated indexes.", -"items": { -"$ref": "GeostoreGradeLevelProto" -}, -"type": "array" -}, -"internal": { -"$ref": "GeostoreInternalSegmentProto", -"description": "Internal-only data." -}, -"interpolationOffsetMeters": { -"description": "If specified, the perpendicular offset in meters from a road segment to an interpolated address along that road segment. See go/synthetic-address-positions.", -"format": "float", -"type": "number" -}, -"intersection": { -"$ref": "GeostoreFeatureIdProto", -"description": "The intersection feature corresponding to the destination of this segment. Intersections are used to represent the connectivity between segments. Each intersection stores the segment ids of all the incoming and outgoing segments that meet at that intersection. Turns can be made from this segment to any of the outgoing segments of its intersection, unless there is a restriction that explicitly disallows the turn (see below). Every segment has an intersection object, even if there are no other segments to connect to (i.e., a cul-de-sac or dead end)." -}, -"isMaxPermittedSpeedDerived": { -"description": "Specifies whether the max_permitted_speed_kph was derived from a heuristic as opposed to coming from an authoritative source.", -"type": "boolean" -}, -"lane": { -"description": "Detailed information about each lane in this direction, if available. Lanes are numbered from inside of the road outward, i.e. the lane next to the center line has lane_number 0. Note that lanes that are valid for travel in both directions appear in both segments of a segment pair (left turn lanes, one-lane roads, some passing lanes, reversing lanes). Some lanes may not be usable by cars, such as bike lanes. Also, some lanes may not exist along the entire segment, e.g. left- or right-turn lanes that appear just before the intersection.", -"items": { -"$ref": "GeostoreLaneProto" -}, -"type": "array" -}, -"legalMaximumSpeed": { -"description": "The legal maximum, legal minimum, and advisory (recommended but non-legally binding) maximum speed limits that are permitted on this segment. These should be the segment's legal limits; however, note that it may contain estimated values based on country-wide defaults and other heuristics (see 'AppliedSpeedLimitProto.trust_level'). Before exposing these fields to users as the legal speed limit please consult with Google lawyers.", -"items": { -"$ref": "GeostoreAppliedSpeedLimitProto" -}, -"type": "array" -}, -"legalMinimumSpeed": { -"description": "LINT.ThenChange(//depot/google3/geostore/base/internal/segment.cc:has_speed_limit)", -"items": { -"$ref": "GeostoreAppliedSpeedLimitProto" -}, -"type": "array" -}, -"maxPermittedSpeedKph": { -"description": "LINT.IfChange(speed_limits) The maximum speed that is permitted on this segment, in kilometers per hour. This should be the segment's legal speed limit; however, note that it may contain estimated values based on country-wide defaults and other heuristics (see 'is_max_permitted_speed_derived' below). Before exposing this field to users as the legal speed limit please consult with Google lawyers. ", -"format": "float", -"type": "number" -}, -"maxPermittedSpeedKphMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the maximum permitted speed." -}, -"onRight": { -"description": "Specifies whether this segment carries right-hand traffic (cars keep to the right side of the road) instead of left-hand traffic (cars keep to the left side). This is true for US roads and false for UK roads, for example. See go/wikip/Left-_and_right-hand_traffic.", -"type": "boolean" -}, -"pedestrianCrossing": { -"$ref": "GeostorePedestrianCrossingProto", -"description": "Defines the pedestrian crossing(s) between the end point of this segment and the start point of this segment's sibling." -}, -"pedestrianFacility": { -"description": "clang-format on", -"enum": [ -"PEDESTRIAN_FACILITY_UNKNOWN", -"PEDESTRIAN_FACILITY_NONE", -"PEDESTRIAN_FACILITY_PRESENT", -"PEDESTRIAN_FACILITY_SIDEWALK", -"PEDESTRIAN_FACILITY_WIDE_SHOULDER" -], -"enumDeprecated": [ -false, -false, -false, -true, -true -], -"enumDescriptions": [ -"No data about pedestrian facility is available.", -"No special pedestrian facilities are available on this segment.", -"The segment has some kind of pedestrian facility, like a sidewalk or wide shoulder, that separates pedestrians from motor vehicle traffic.", -"Deprecated. Sidewalks are represented via a TYPE_PEDESTRIAN lane.", -"Deprecated. Wide shoulder is implied when segment.pedestrian_facility = PEDESTRIAN_FACILITY_PRESENT and segment.lane does not include a TYPE_PEDESTRIAN lane." -], -"type": "string" -}, -"pedestrianGrade": { -"enum": [ -"PEDESTRIAN_GRADE_FLAT", -"PEDESTRIAN_GRADE_UP", -"PEDESTRIAN_GRADE_DOWN" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"priority": { -"description": "LINT.ThenChange(//depot/google3/maps/pathfinder/pgraph/pgraph-segment-categories.cc)", -"enum": [ -"PRIORITY_UNKNOWN", -"PRIORITY_NON_TRAFFIC", -"PRIORITY_TERMINAL", -"PRIORITY_LOCAL", -"PRIORITY_MINOR_ARTERIAL", -"PRIORITY_MAJOR_ARTERIAL", -"PRIORITY_SECONDARY_ROAD", -"PRIORITY_PRIMARY_HIGHWAY", -"PRIORITY_LIMITED_ACCESS", -"PRIORITY_CONTROLLED_ACCESS" -], -"enumDescriptions": [ -"The priority of the segment is unknown. This is not actually allowed to be set in MapFacts, but best-practice is to have a default 0 value for enums.", -"A non-traffic segment is not intended for normal vehicles, e.g. a pedestrian mall. 0x10", -"A terminal road is not intended to carry through traffic, e.g. a narrow residential street or an access road to a point of interest. 0x20", -"A small city street, typically for travel in a residential neighborhood, or a small rural road. This is the lowest priority suitable for through traffic. 0x30", -"A moderate-capacity \"collector\" that funnels traffic from local roads towards arterial roads or business areas. 0x40", -"A high-capacity road that carries large volumes of traffic between different neighborhoods or towns. 0x50", -"Roads that act as conduits a) between primary highways (and above), or b) from major/minor arterial roads to primary highways (and above). They may be state highways, for example. 0x60", -"These highways are major preferred roads that connect between regions, but that do not have significant access restrictions. Most crossings are level grade (traffic light, stop sign) and there may be driveways and local roads that connect directly to the highway. NOTE: Where a dense network of limited/controlled access roads carries most of the traffic between regions, this priority may be infrequent. 0x70", -"Limited access highways have some access restrictions, but are not fully access controlled. Typically driveways and local roads connect to a separate \"frontage road\" that is connected to the highway via spaced traffic lights, interchanges, or stop signs. Limited access roads are sometimes called expressways. 0x80", -"Controlled access highways have grade-separated crossings and are accessed exclusively by ramps. They are usually called \"freeways\" or \"motorways\". 0x90" -], -"type": "string" -}, -"priorityMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the priority." -}, -"ramp": { -"$ref": "GeostoreSegmentProtoRampProto", -"description": "May only be set when the segment's usage is `USAGE_RAMP`." -}, -"restriction": { -"description": "The set of restrictions that apply to this segment. Restrictions may make a single segment, turn, or more complex maneuver along a set of segments unroutable for the specified travel modes, or may only add penalties or warnings, depending on the restriction type. Turn restrictions are one example of a restriction. By default, turns are allowed onto all outgoing segments from this segment's intersection (including the sibling of this segment, i.e. U-turns are allowed by default). If any of these turns are disallowed they will be listed as \"subpath restrictions\". A subpath restriction disallows travel on given sequence of segments. In the case of a disallowed turn, the subpath simply consists of the source and destination feature ids. There may also be restrictions that apply to all travel on this segment (e.g. chains required, or closed in winter), or restrictions that just apply to certain lanes (e.g. high occupancy vehicle lanes).", -"items": { -"$ref": "GeostoreRestrictionProto" -}, -"type": "array" -}, -"roadMonitor": { -"description": "The road monitors that monitor this segment for traffic violations.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"roadSign": { -"description": "The road sign(s) which this segment refers to. These are features of TYPE_ROAD_SIGN that are applicable to this segment. For example, a sign that says \"TO KIRKLAND\" might apply to several segments on a freeway off-ramp (until the end of the ramp). Note that this field makes it easy to find the signs for a given road segment. The feature for the sign lists the segments that refer to it.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"route": { -"description": "The route(s) to which this segment belongs.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"routeAssociation": { -"description": "Holds metadata about the associations between this segment and the route features listed in the route field. This metadata need not be present; the only consistency requirement is that every feature ID that appears inside 'route_association' must also appear in the repeated 'route' field. If a route does not appear in route_association, consumers should assume that it has a default initialized RouteAssociationProto.", -"items": { -"$ref": "GeostoreRouteAssociationProto" -}, -"type": "array" -}, -"separatedRoadways": { -"description": "Indicates whether the segment's opposing lanes of traffic are separated from this segment, and hence have been represented in a separate feature. This means that there are two pairs of siblings instead of one.", -"type": "boolean" -}, -"sibling": { -"$ref": "GeostoreFeatureIdProto", -"description": "The other segment of this segment pair (see above). The segment that is referenced by the sibling field refers to this feature back via the same sibling field. Both segment and sibling should have the same properties such as geometry, country code, elevation, level relation, priority etc. Since routes are required to have segment and sibling at the same time, the set of routes on a segment is same to that of the sibling." -}, -"slope": { -"description": "Each slope instance is tied to a point along the segment polyline (unrelated to the vertices in the segment's polyline) and represents the slope of the segment between that point and the point tied to the next slope istance, or the end of the segment if it's the last slope instance. A segment should have at least one slope.", -"items": { -"$ref": "GeostoreSlopeProto" -}, -"type": "array" -}, -"surface": { -"description": "clang-format on LINT.ThenChange(//depot/google3/geostore/base/proto/lane.proto) Specific lanes may override this segment-level surface type.", -"enum": [ -"SURFACE_UNKNOWN", -"SURFACE_PAVED", -"SURFACE_ASPHALT", -"SURFACE_CONCRETE", -"SURFACE_CHIPSEAL", -"SURFACE_BRICK", -"SURFACE_SETT", -"SURFACE_COBBLESTONE", -"SURFACE_UNPAVED", -"SURFACE_GRAVEL", -"SURFACE_DIRT", -"SURFACE_SAND" -], -"enumDescriptions": [ -"RESERVED", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"surfaceMetadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for the surface." -}, -"sweep": { -"description": "The geometric sweeps between this segment and nearby segments, used for real road width rendering. A sweep describes the surface that connects to segments.", -"items": { -"$ref": "GeostoreSweepProto" -}, -"type": "array" -}, -"tollRoad": { -"description": "If this segment is part of a toll road. It would be nice to have data about the toll cost, locations of toll booths, and so forth. Sadly, we don't have this data at this time.", -"type": "boolean" -}, -"usage": { -"description": "clang-format on LINT.ThenChange(//depot/google3/maps/pathfinder/pgraph/pgraph-segment-categories.cc)", -"enum": [ -"USAGE_ANY", -"USAGE_RAMP", -"USAGE_ON_RAMP", -"USAGE_OFF_RAMP", -"USAGE_ON_OFF_RAMP", -"USAGE_INTERCHANGE", -"USAGE_SPECIAL_TRAFFIC_FIGURE", -"USAGE_ROUNDABOUT", -"USAGE_ROUNDABOUT_BYPASS", -"USAGE_ROUNDABOUT_INTERNAL_BYPASS", -"USAGE_ROUNDABOUT_EXTERNAL_BYPASS", -"USAGE_ENCLOSED_TRAFFIC_AREA", -"USAGE_PEDESTRIAN_MALL", -"USAGE_MAJOR_PEDESTRIAN_MALL", -"USAGE_MINOR_PEDESTRIAN_MALL", -"USAGE_WALKWAY", -"USAGE_TRAIL", -"USAGE_STATION_PATH", -"USAGE_ACCESS_PATH", -"USAGE_CROSSING", -"USAGE_MARKED_CROSSING", -"USAGE_UNMARKED_CROSSING", -"USAGE_OVERPASS", -"USAGE_UNDERPASS", -"USAGE_HALLWAY", -"USAGE_TURN_SEGMENT", -"USAGE_INDOOR_CONNECTION_PATH" -], -"enumDescriptions": [ -"Not a true usage category--this value was added just to ease the transition from proto1 to proto2.", -"Ramps are connecting segments to or between limited access roads. They are often called \"slip roads\" in Europe.", -"", -"", -"", -"", -"This road segment is part of a special traffic figure, which is like a roundabout or traffic circle, but doesn't meet all of the requirements of USAGE_ROUNDABOUT. It has a round shape but, e.g., there may be higher priority roads within the interior of the figure (also known as a hamburger roundabout), or the figure doesn't complete an entire circuit.", -"This road segment forms part of a roundabout or traffic circle (these terms don't have the exact same meaning, but the schema doesn't distinguish and captures both). Roundabouts complete one entire circuit, and are one-way through their entire circuit. Sample wording in directions: \"At the roundabout, take the 2nd exit\", or \"Go through 1 roundabout\".", -"This road segment is part of a bypass associated with, but not part of, a roundabout or traffic circle, that shortcuts the outer ring.", -"This road segment is part of a special traffic figure that allows traffic to go straight through the roundabout without entering the circle part of the roundabout. Divides the roundabout into two halves.", -"Shortcut from the entrance of a roundabout to the same destination as the first exit from the roundabout relative to that entrance.", -"A parking lot or parking structure, possibly with multiple entrances that can be significant for routing.", -"A stretch of road closed to normal traffic (trams, buses, delivery vehicles may be allowed).", -"", -"", -"\"Non-traffic\" segments that are roughly associated with the road network but that are being modeled separately from the road network, e.g., a sidewalk or wide shoulder. In contrast, walkways modeled using segments that are part of the road network should be marked with pedestrian and bicycle facilities. NOTE: Separately modeled walkways are unusual and have issues; consult with oyster-team before using this in new data.", -"A non-road segment that can be described as a major path or a trail. A trail is often part of a dedicated network of named biking or walking trails, is not particularly short, and typically has scenic value or facilitates crossing rough terrain. USAGE_TRAIL implies that PRIORITY_NON_TRAFFIC must be set, but compared to a segment with no usage at all, is suitable for extra emphasis by clients as a more desirable segment to travel on. Minor paths, such as pedestrian shortcuts, walkways through building complexes, and short walkways through a city park should be modeled as PRIORITY_NON_TRAFFIC with no usage. Sample path description: \"Take the trail.\"", -"Segment between toll gate and station center which represents logical location trains stop at. The segment's AddressComponent should have a route name which includes appropriate station name whenever possible. This segment should usually be restricted by RESTRICTION_USAGE_FEE_REQUIRED in order to penalize walking directions that just go through this segment. Sample path description: \"Get in Shibuya station, and take the path to the platform.\" NOTE: This attribute is unusual; it should not be set in new data.", -"Alley in park, backyard of communal facilities such as museum, or kind of private properties open to public. The segment's AddressComponent should have a route name which includes the property's name whenever possible. This is different from trails as the purpose of the access path is more specific than trails. Sample path description: \"Go through the access path to the National Museum.\" NOTE: This attribute is unusual and not clearly defined; it should not be set in new data.", -"A segment that connects separately modeled walkways (or other non-traffic segments) across a road. NOTE: USAGE_CROSSING segments make sense only where pedestrian segments are modeled separately. They cannot be used in the common case of sidewalks modeled as pedestrian facilities on road. Using a USAGE_CROSSING segment without connection to other non-traffic segments only to mark the location of a crossing is wrong.", -"Crossing with mark. We assume zebra marking for now. Sample path description: \"Take the crosswalk.\"", -"Crossing with no mark Sample path description: \"Cross the road.\"", -"This is different from ELEVATION_SKYWAY as this implies \"non-traffic\". Sample path description: \"Take the pedestrian overpass.\"", -"Underground crossing paths and implies \"non-traffic\". Sample path description: \"Take the pedestrian underpass.\"", -"Sample path description: \"Go through the hallway.\" NOTE: This attribute is unusual and not clearly defined; it should not be set in new data.", -"Turn segments are connecting segments between lower priority roads that bypass the direct intersection of the roads. They are the preferred route, or perhaps the only valid route, for turns between the roads. For example, in the US this would be a segment that would allow a vehicle to take a free right-turn without having to go through the intersection. Similarly in the UK, this would be a segment that allows a free left-turn. Also, the small segment next to the island of a channelized turn lane would fall under this category.", -"Usage to explicitly indicate that the segment connects walking networks on indoor levels in different buildings." -], -"type": "string" -}, -"visibleLandmark": { -"description": "A collection of landmarks that are visible when traveling along this segment and useful for wayfinding to users following routes using this segment. The landmark need not be on the segment. Each segment in a pair of siblings specifies its landmarks independently. A landmark applicable to both appears in both.", -"items": { -"$ref": "GeostoreLandmarkReferenceProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreSegmentProtoRampProto": { -"description": "Encapsulates ramp-specific properties.", -"id": "GeostoreSegmentProtoRampProto", -"properties": { -"maxConnectedPriority": { -"description": "The highest priority of any TYPE_ROAD endpoint segment which is transitively connected to this ramp via other ramp segments. For instance, if we have two roads connected through a series of ramps (omitting intersections): Road(P=96)-> Ramp 1 -> Ramp 2 -> Ramp 3 -> Road(P=122) -> Road(P=144) The `max_connected_priority` of all three intermediary ramps is 122. It's not 144, since Road(P=144) is connected through another road, not a ramp. This differs from the usual `SegmentProto.priority` field, which contains the lowest priority across any connected road segment.", -"enum": [ -"PRIORITY_UNKNOWN", -"PRIORITY_NON_TRAFFIC", -"PRIORITY_TERMINAL", -"PRIORITY_LOCAL", -"PRIORITY_MINOR_ARTERIAL", -"PRIORITY_MAJOR_ARTERIAL", -"PRIORITY_SECONDARY_ROAD", -"PRIORITY_PRIMARY_HIGHWAY", -"PRIORITY_LIMITED_ACCESS", -"PRIORITY_CONTROLLED_ACCESS" -], -"enumDescriptions": [ -"The priority of the segment is unknown. This is not actually allowed to be set in MapFacts, but best-practice is to have a default 0 value for enums.", -"A non-traffic segment is not intended for normal vehicles, e.g. a pedestrian mall. 0x10", -"A terminal road is not intended to carry through traffic, e.g. a narrow residential street or an access road to a point of interest. 0x20", -"A small city street, typically for travel in a residential neighborhood, or a small rural road. This is the lowest priority suitable for through traffic. 0x30", -"A moderate-capacity \"collector\" that funnels traffic from local roads towards arterial roads or business areas. 0x40", -"A high-capacity road that carries large volumes of traffic between different neighborhoods or towns. 0x50", -"Roads that act as conduits a) between primary highways (and above), or b) from major/minor arterial roads to primary highways (and above). They may be state highways, for example. 0x60", -"These highways are major preferred roads that connect between regions, but that do not have significant access restrictions. Most crossings are level grade (traffic light, stop sign) and there may be driveways and local roads that connect directly to the highway. NOTE: Where a dense network of limited/controlled access roads carries most of the traffic between regions, this priority may be infrequent. 0x70", -"Limited access highways have some access restrictions, but are not fully access controlled. Typically driveways and local roads connect to a separate \"frontage road\" that is connected to the highway via spaced traffic lights, interchanges, or stop signs. Limited access roads are sometimes called expressways. 0x80", -"Controlled access highways have grade-separated crossings and are accessed exclusively by ramps. They are usually called \"freeways\" or \"motorways\". 0x90" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreServiceAreaProto": { -"description": "This proto represents the geographic area served by an establishment. WARNING: This proto is not meant to be used directly. Please use the provided libraries. http://google3/geostore/base/public/service_area.h http://google3/java/com/google/geostore/base/ServiceArea.java", -"id": "GeostoreServiceAreaProto", -"properties": { -"servedFeature": { -"description": "The features that make up the service area for this establishment. These features are subject to the following constraints applied by editing middleware (notably, not strictly enforced by lints in storage): 1. The following feature types (and their subtypes) may be used: + TYPE_ISLAND + TYPE_POLITICAL, except the following prohibited subtypes: - TYPE_CONSTITUENCY - TYPE_LAND_PARCEL + TYPE_POSTAL 2. There is a maximum limit (currently 20) to the number of areas which may be provided. This is due to serving efficiency limitations. 3. There are no additional geometry requirements for these features beyond the requirements based on the feature types above. In practice this means that these features will either have polygonal or point-based geometries. 4. These referenced features are generally required to have names, though this is not strictly enforced.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreServicedStopProto": { -"description": "Defines an ordered reference to a line variant\u2019s stop.", -"id": "GeostoreServicedStopProto", -"properties": { -"id": { -"$ref": "GeostoreFeatureIdProto", -"description": "Reference to a Transit POI feature (gcid:transit_station) or platform compound section (gcid:railway_platform) serviced by the line variant." -}, -"index": { -"description": "An index representing the order in which the above station is serviced by the line variant.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreSkiBoundaryProto": { -"description": "This protocol buffer holds attributes for features of TYPE_SKI_BOUNDARY.", -"id": "GeostoreSkiBoundaryProto", -"properties": { -"type": { -"enum": [ -"TYPE_ANY", -"TYPE_DANGER", -"TYPE_SKI_AREA", -"TYPE_SLOW_ZONE" -], -"enumDescriptions": [ -"ABSTRACT", -"The boundary line for a danger area. This is used to mark lines that should not be crossed for reasons of safety.", -"The boundary line for a ski area. This is generally used to specify the line at which the ski resort ends, rather than a line enclosing a single ski run.", -"An area signposted for slow skiing only." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSkiLiftProto": { -"description": "This protocol buffer holds attributes for features of TYPE_SKI_LIFT.", -"id": "GeostoreSkiLiftProto", -"properties": { -"type": { -"description": "clang-format on", -"enum": [ -"TYPE_ANY", -"TYPE_SURFACE", -"TYPE_T_BAR", -"TYPE_J_BAR", -"TYPE_ROPE_TOW", -"TYPE_POMA", -"TYPE_CARPET", -"TYPE_FUNICULAR", -"TYPE_GONDOLA", -"TYPE_CHAIR", -"TYPE_AERIAL", -"TYPE_TRAM" -], -"enumDescriptions": [ -"ABSTRACT", -"Surface lifts are those that pull or carry a skier along the ground; rope tows are a common example.", -"", -"", -"", -"", -"", -"", -"An aerial lift, such as a gondola or funitel, that moves enclosed cars along a continuously circulating set of cables.", -"An open-air chair lift.", -"Aerial lifts are those that run along cables suspended above the ground", -"An aerial tram system that shuttles back and forth along a fixed set of cables." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSkiTrailProto": { -"description": "This protocol buffer holds attributes for features of TYPE_SKI_TRAIL.", -"id": "GeostoreSkiTrailProto", -"properties": { -"difficulty": { -"enum": [ -"DIFFICULTY_EASIEST", -"DIFFICULTY_EASY", -"DIFFICULTY_INTERMEDIATE", -"DIFFICULTY_ADVANCED_INTERMEDIATE", -"DIFFICULTY_DIFFICULT", -"DIFFICULTY_ADVANCED_DIFFICULT" -], -"enumDescriptions": [ -"The easiest ski trail. In the US, this would be marked with double-green circles.", -"A novice ski trail. In the US, this would be marked with a single green circle.", -"An intermediate-level ski trail. In the US, this would be marked with a single blue square.", -"An advanced-intermediate level ski trail. In the US, this would be marked with double-blue squares.", -"A difficult-level ski trail. In the US, this would be marked with a single black diamond.", -"An advanced-difficult level ski trail. In the US, this would be marked with double-black diamonds." -], -"type": "string" -}, -"type": { -"enum": [ -"TYPE_ANY", -"TYPE_GLADE", -"TYPE_TRAIL_TERRAIN", -"TYPE_TRAIL", -"TYPE_RACE_COURSE", -"TYPE_BOWL" -], -"enumDescriptions": [ -"ABSTRACT", -"Glade trails are ski paths that traverse wooded areas; they may be well-defined or not, and groomed or ungroomed. Ideally, they should be rendered differently on map tiles to distinguish them from standard ski trails.", -"A terrain-park area, usually containing special features such as jumps or half-pipes.", -"A regular ski-trail.", -"A trail specifically used for ski races.", -"Bowls are large open areas, usually steep and often rocky, that are open for skiing but don't have defined trails. They are frequented mostly by expert skiers." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSlopeProto": { -"id": "GeostoreSlopeProto", -"properties": { -"slopeValue": { -"description": "Slope value as elevation change divided by horizontal distance, in the format of decimal, e.g., \u20180.1234\u2019 means a 12.34% slope. If a slope_value is unset, it indicates we don\u2019t have enough information to compute slope at this location.", -"format": "float", -"type": "number" -}, -"startPointFraction": { -"description": "Indicates how far along the segment this slope value starts to apply, in the format of decimal between 0 and 1.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreSocialReferenceProto": { -"description": "MapFacts GAIA ID assigned to this feature. These values are virtual GAIA IDs from MapFacts, and as such are not stored in Focus.", -"id": "GeostoreSocialReferenceProto", -"properties": { -"baseGaiaId": { -"description": "WARNING: Please do NOT introduce new uses; treat this field as if it were deprecated. ", -"format": "int64", -"type": "string" -}, -"claimedGaiaId": { -"description": "GAIA ID used when a business has been claimed. This value is a robot GAIA ID. Robots are a special type of GAIA account used to denote identity for a user or a group of users, but are not logged-in directly by a user.", -"format": "int64", -"type": "string" -}, -"gaiaIdForDisplay": { -"description": "WARNING: Please do NOT introduce new uses; treat this field as if it were deprecated. ", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSourceInfoProto": { -"description": "Source infos are the canonical way to establish data provenance. They can currently be set on features, edits, and issues. Every feature has a repeated list of SourceInfoProto messages to describe the source data that was used in building this feature. The data includes a feature id that points to additional data about the data source (version, copyright notice, etc), and optional \"raw data\" that is taken directly from the provider's format and has not been converted to a canonical form.", -"id": "GeostoreSourceInfoProto", -"properties": { -"attributionUrl": { -"description": "This is the URL of a page representing all the data from this source in this feature. It may have be the ultimate source of the data (in case of scraping) or merely the same data styled according the provider's taste. There is a similar field in DataSourceProto which is NOT cached in this field, since it has a different meaning.", -"items": { -"$ref": "GeostoreUrlProto" -}, -"type": "array" -}, -"collectionDate": { -"$ref": "GeostoreDateTimeProto", -"description": "The time that this particular piece of data was collected. If different attributes were collected on different dates, this is the date of the most recent edit." -}, -"cookie": { -"description": "A source info may have a magic cookie whose content and semantics are defined by the specific import process or third-party feed. For feeds that are processed by Distillery, the cookie, when set, should contain the unique identifier for the feature as provided by the feed.", -"type": "string" -}, -"dataset": { -"description": "The dataset from which this SourceInfoProto was created. The content of this string will be determined by the data provider (e.g. for MultiNet data, \"fra\" would indicate the dataset for France). This field is unnecessary for providers that deliver a single dataset per release (e.g. Basarsoft).", -"type": "string" -}, -"gaiaId": { -"deprecated": true, -"description": "The Gaia ID of the user who provided us with this data. This field should never be set on source infos present on features, but may be set on source infos present on edits. DEPRECATED: Most clients should use the \"user\" field instead where Gaia IDs are encrypted.", -"format": "int64", -"type": "string" -}, -"impersonationUser": { -"$ref": "GeostoreUserProto", -"description": "Information about an internal user or system that is operating on behalf of `user` by way of impersonation." -}, -"layer": { -"description": "The name of the layer from which this SourceInfoProto was created.", -"type": "string" -}, -"ogrFid": { -"description": "The OGR feature identifier from which this SourceInfoProto was created. This is an internal OGR record identifier and has nothing to do with any of the feature's fields or the FeatureIdProto for the FeatureProto containing this SourceInfoProto. This field is present only for debugging purposes and possible use in the match pattern of a FeatureChangeProto designed to fix bad source data very early in the importing process.", -"format": "int64", -"type": "string" -}, -"provider": { -"description": "The data provider from which this source info was generated. The value must be equal to the one on the TYPE_DATA_SOURCE feature referenced by this source info via the source_id reference (see above). ", -"format": "int32", -"type": "integer" -}, -"rawData": { -"description": "A source info may optionally have a set of key-value pairs that provide \"raw data\" specific to that source. The types of raw data available will vary from one provider to another and should not be used in production code. Instead, new fields and/or protocol buffers should be defined to represent this information in a canonical form, and the relevant importers should be modified to populate these new fields.", -"items": { -"$ref": "GeostoreRawDataProto" -}, -"type": "array" -}, -"release": { -"description": "The data release from which this SourceInfoProto was created. The format for this string is provider-dependent (e.g. a MultiNet release would look like \"2008.01\").", -"type": "string" -}, -"sourceId": { -"$ref": "GeostoreFeatureIdProto", -"description": "A source info may have a corresponding TYPE_DATA_SOURCE feature that describes it (provider, copyright information, date of release, etc). In the context of edits and issues, this field should not be set." -}, -"temporaryData": { -"$ref": "Proto2BridgeMessageSet", -"description": "A place for clients to attach arbitrary data to a source info. Never set in MapFacts." -}, -"user": { -"$ref": "GeostoreUserProto", -"description": "RESERVED" -} -}, -"type": "object" -}, -"GeostoreSourceTrustProto": { -"description": "Trust related information about the input source (feed or user) to help feature summarization. Typically, the values in this proto are either based on source's previous observations (e.g., a blocked LBC user or a trusted feed) or their status (Google hired operator or admin user). The proto can later contain a more granular trust score or correctness probabilities. A higher enum value indicates a more trusted source. Leaving room in the value space for adding more granular enums, if they become necessary later.", -"id": "GeostoreSourceTrustProto", -"properties": { -"level": { -"description": "The level of trust for the source of the observation.", -"enum": [ -"UNKNOWN", -"BLOCKED", -"NOT_TRUSTED", -"YP_FEEDS", -"TRUSTED", -"SUPER_TRUSTED" -], -"enumDescriptions": [ -"Value not set", -"Should not be used (e.g. known spammer, 404 url)", -"Edits from external users.", -"Trusted feeds like IUSA etc. (See note below).", -"Internal operators, approved edits.", -"Internal admin operators" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSpeedLimitProto": { -"description": "A speed limit, containing both the limit and the conditions in which it applies.", -"id": "GeostoreSpeedLimitProto", -"properties": { -"category": { -"description": "The type of speed limit.", -"enum": [ -"SPEED_LIMIT_CATEGORY_UNKNOWN", -"NONE", -"SCHOOL", -"CONSTRUCTION" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in MapFacts.", -"Speed limits that apply outside of any particular category (contrast to the following categories.)", -"Speed limits that apply in a school zone.", -"Speed limits that apply in a construction zone." -], -"type": "string" -}, -"condition": { -"description": "The conditions under which this speed limit is applicable. If multiple conditions are set, at least one of them must be true.", -"items": { -"$ref": "GeostoreRoadConditionalProto" -}, -"type": "array" -}, -"sourceType": { -"description": "The source of the speed limit.", -"enum": [ -"SPEED_LIMIT_SOURCE_TYPE_UNKNOWN", -"EXPLICIT", -"IMPLICIT" -], -"enumDescriptions": [ -"Speed limit source is unknown for various reasons. Default value.", -"The speed limit was derived from an explicit observation (such as a speed limit sign).", -"The speed limit was derived implicitly by some policy or regulation (i.e. statuatory speed limit). An implicit speed limit value that is curated from statutory speed limit." -], -"type": "string" -}, -"speedWithUnit": { -"$ref": "GeostoreSpeedProto", -"description": "A constant speed limit." -}, -"unlimitedSpeed": { -"$ref": "GeostoreUnlimitedSpeedProto", -"description": "A speed limit with no limit value. When there is no speed limit in place." -}, -"variableSpeed": { -"$ref": "GeostoreVariableSpeedProto", -"description": "A dynamic speed limit that can vary within a range of values based on road conditions." -} -}, -"type": "object" -}, -"GeostoreSpeedProto": { -"description": "A speed value and unit.", -"id": "GeostoreSpeedProto", -"properties": { -"speed": { -"description": "All speed values are stored in kilometers per hour.", -"format": "float", -"type": "number" -}, -"unit": { -"description": "Mapfacts only allows unit to be KILOMETERS_PER_HOUR.", -"enum": [ -"UNIT_UNKNOWN", -"MILES_PER_HOUR", -"KILOMETERS_PER_HOUR" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in MapFacts.", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreStableFieldPathProto": { -"description": "Represents a way to traverse nested fields by referencing their token fields. Everything starts relative to a known root message, specified externally. E.g., suppose we have a feature proto that has a lane with token 0x123 which in turn has a lane connection with token 0x456 for which we want to assert something about flowline altitudes. The field path in that case will look like: field_path: { field_num: 31 # segment } field_path: { field_num: 6 # lane version_token: \"0x123\" } field_path: { field_num: 8 # lane_connection version_token: \"0x456\" } field_path: { field_num: 3 # flow } field_path: { field_num: 1 # track } field_path: { # Note: pose is repeated. By not specifying a token we refer to all poses # in a track. field_num: 2 # pose } field_path: { field_num: 4 # altitude } This path could also be represented succinctly in a more human-friendly form as something like: segment.lane[@0x123].lane_connection[@0x456].flow.track.pose[*].altitude", -"id": "GeostoreStableFieldPathProto", -"properties": { -"fieldPath": { -"description": "A sequence of field selectors to be traversed starting from the root message.", -"items": { -"$ref": "GeostoreStableFieldPathProtoStableFieldSelector" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreStableFieldPathProtoStableFieldSelector": { -"id": "GeostoreStableFieldPathProtoStableFieldSelector", -"properties": { -"fieldNum": { -"description": "Field number to select.", -"format": "int32", -"type": "integer" -}, -"versionToken": { -"description": "Select repeated field entry by its version token. If this is used, then the message referenced by field_num must have a token field annotated with the (version_token) field option. Must be omitted for leaf non-repeated fields. If unset for a repeated field, we consider this selector to apply equally to all descendants.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreSweepProto": { -"description": "This protocol buffer represents the 2D polygon connecting two segments at an intersection. Collectively, sweep polygons represent intersections for real road width rendering. Notes: - Sweeps represent geometry between the *end* of one segment and the *end* of the other segment (modulo retraction values). - Sweeps are strongly referenced, meaning geometry is stored on both segments involved in the sweep. For example, in the diagram below, the sweep between A and B would be stored on both segment A and segment B. | B | v --A--> - Sweeps are not strictly stored on adjacent segments. Disconnected segments (e.g., segments separated by an intersection group) may also contain sweeps.", -"id": "GeostoreSweepProto", -"properties": { -"otherSegmentFeatureId": { -"$ref": "GeostoreFeatureIdProto", -"description": "The segment feature connected to this segment via the sweep geometry." -}, -"polygon": { -"$ref": "GeostorePolygonProto", -"description": "Polygonal geometry representing the area between this segment and the other segment." -}, -"sweepCurve": { -"$ref": "GeostoreCurveConnectionProto", -"description": "Describes parameters for generating the edge of this sweep that starts at edge_follows_segment_end_fraction. The other side of the sweep should be described on the sweep present on the sibling pair." -}, -"sweepToken": { -"description": "A token that can be used to identify the version of the data about this sweep.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreTelephoneProto": { -"description": "This protocol buffer is used to represent telephone numbers and related information.", -"id": "GeostoreTelephoneProto", -"properties": { -"callRate": { -"description": "RESERVED", -"items": { -"$ref": "GeostorePriceRangeProto" -}, -"type": "array" -}, -"contactCategory": { -"description": "Disambiguates between the types of information or service a caller might seek when contacting this phone number.", -"enum": [ -"CONTACT_CATEGORY_UNSPECIFIED", -"CUSTOMER_SERVICE", -"RESERVATIONS", -"SALES" -], -"enumDescriptions": [ -"", -"Provides service to customers before, during, or after a purchase. /m/03w5xm", -"Allows customers to reserve a seat, ticket, room, etc. /m/0n9nm2h", -"Provides information about a purchase of a product or service. /m/014dgf" -], -"type": "string" -}, -"flag": { -"items": { -"enum": [ -"FLAG_NO_COLD_CALLS", -"FLAG_PREFERRED" -], -"enumDescriptions": [ -"DEPRECATED", -"This number is (one of) the preferred numbers to call. It should be displayed before other numbers in maps/placepage, for example. For each language served, there shall be at most 1 preferred phone (i.e., if there are two preferred phones, then the intersection between the \"language\" array should be empty). Note that the presence of a \"preferred\" phone doesn't state that we have no confidence on the other numbers of this establishment. The preference is based on the fact that this number is the primary customer service number, reservation hot line, etc." -], -"type": "string" -}, -"type": "array" -}, -"isSharedNumber": { -"description": "True if this phone number is not unique to this establishment and might be shared with other features. In case an establishment shares a phone number with a business chain of which it is a member, and the number canonically belongs to that chain, it should be marked as shared for the establishment but not shared for the chain.", -"type": "boolean" -}, -"label": { -"description": "RESERVED", -"items": { -"$ref": "GeostoreNameProto" -}, -"type": "array" -}, -"language": { -"description": "RESERVED", -"items": { -"type": "string" -}, -"type": "array" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this telephone number." -}, -"number": { -"$ref": "TelephoneNumber", -"deprecated": true, -"description": "** DEPRECATED ** This is deprecated in favor of phone_number below. An internationalized representation of a phone number. See //location/country/telephonenumber.proto" -}, -"phoneNumber": { -"$ref": "I18nPhonenumbersPhoneNumber", -"description": "An internationalized representation of a phone number. See //java/com/google/i18n/phonenumbers/phonenumber.proto" -}, -"serviceLocationFeature": { -"description": "The features from which this phone number can be called from. For instance, if a phone number can only be called from Europe, this field will contain a reference to the TYPE_CONTINENT feature of Europe. This field is analogous to http://kg/schema/common/phone_number/service_location. The only valid destination feature types are TYPE_CONTINENT and TYPE_POLITICAL. If empty, this phone number can be called from anywhere in Earth (this is the case for the majority of phones).", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"type": { -"enum": [ -"VOICE", -"FAX", -"TDD", -"DATA", -"MOBILE", -"MESSAGING" -], -"enumDeprecated": [ -false, -false, -false, -true, -true, -false -], -"enumDescriptions": [ -"", -"", -"", -"", -"The MESSAGING type should not be used in FeatureProto, but will be used in the context of KG /common/phone_number/type.", -"RESERVED" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreTemporaryClosureProto": { -"description": "This protocol buffer stores information related to temporary closure of a feature. The only allowed precisions for a date is PRECISION_DAY. DateTimeProto.seconds should have the lowest legal value for the desired date/time and precision. E.g. for PRECISION_MONTH, 2019-02-15 21:10:30 is not valid, it should be 2019-02-01 00:00:00 instead. NOTE: Each date is stored in UTC but should be interpreted as being in the local timezone. So clients should convert the DateTimeProto to local (civil) time using UTC+0, and then treat the result as local to the feature.", -"id": "GeostoreTemporaryClosureProto", -"properties": { -"endAsOfDate": { -"$ref": "GeostoreDateTimeProto", -"description": "The latest when this closure may end, if the exact date is unknown. If set, the feature is operational again no later than this date." -}, -"endDate": { -"$ref": "GeostoreDateTimeProto", -"description": "RESERVED" -}, -"startAsOfDate": { -"$ref": "GeostoreDateTimeProto", -"description": "The latest when this closure may start, if the exact date is unknown. If set, the feature is temporarily closed starting no later than this date." -}, -"startDate": { -"$ref": "GeostoreDateTimeProto", -"description": "RESERVED" -} -}, -"type": "object" -}, -"GeostoreTextAffixProto": { -"description": "Represents text (with an associated language) that is affixed to the beginning and/or end of a primary text.", -"id": "GeostoreTextAffixProto", -"properties": { -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. These strings should be treated as opaque blobs. You can use LanguageCodeConverter::FromOther to convert the string to a LanguageCode reference. You can then call methods on the LanguageCode class to extract language/script/region subtags (if any). See also http://g3doc/i18n/identifiers/g3doc/using-iii. We place extra restrictions on languages in addition to what the III library requires. See http://go/geo-schema-reference/feature-properties/languages.md", -"type": "string" -}, -"prefix": { -"description": "Text to prepend to the primary text, including any necessary trailing whitespace. At least one of prefix or suffix is required.", -"type": "string" -}, -"suffix": { -"description": "Text to append to the end of the primary text, including any necessary leading whitespace. At least one of prefix or suffix is required.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreThreeDimensionalModelProto": { -"id": "GeostoreThreeDimensionalModelProto", -"properties": { -"pointIndices": { -"description": "Triangle vertex indices, each triple defines a triangle.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"points": { -"description": "We store a triangular mesh in indexed format. Points array.", -"items": { -"$ref": "GeostorePointWithHeightProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTimeBasedRateProto": { -"description": "A rate which applies based on the precise times of utilization. Defines a rate, as well as restrictions on the start and end times which must be satisfied in order to be eligible for the rate. See go/rate-schema for more details.", -"id": "GeostoreTimeBasedRateProto", -"properties": { -"durationBasedRate": { -"description": "The rates for this rule. Each duration_based_rate defines the costs associated with a particular duration of a stay. There must be at least one rate with range_start_seconds set to 0 and there cannot be gaps between durations (i.e. there should be no interval uncovered between 0 and the largest range_end_seconds of any duration-based rate).", -"items": { -"$ref": "GeostoreDurationBasedRateProto" -}, -"type": "array" -}, -"taxIncluded": { -"description": "If true, tax is included in the prices in this rate. If false, additional taxes may apply.", -"type": "boolean" -}, -"validEndWithin": { -"$ref": "GeostoreTimeScheduleProto" -}, -"validStartWithin": { -"$ref": "GeostoreTimeScheduleProto", -"description": "Time period during which utilization of this rate must start in order to be eligible for the rate. If not set, there is no restriction on the time when the utilization starts." -} -}, -"type": "object" -}, -"GeostoreTimeComponentProto": { -"id": "GeostoreTimeComponentProto", -"properties": { -"componentType": { -"enum": [ -"COMPONENT_TYPE_POSITIVE", -"COMPONENT_TYPE_MISSING_DATA" -], -"enumDescriptions": [ -"Indicates that this time component is set with regards to what the component is trying to model. For instance, if this time component models opening hours for a business then COMPONENT_TYPE_POSITIVE means open and if it models a road restriction COMPONENT_TYPE_POSITIVE means the road is closed.", -"Indicates that data for this time component is absent and that making assumptions about data over the time period this component represents may be unsafe. For instance, if this time component models opening hours for a business then COMPONENT_TYPE_MISSING_DATA can be used to explicitly record that we are missing data (eg. Sunday is not listed in the hours). NOTE: Not all uses of this proto support the COMPONENT_TYPE_MISSING_DATA component_type." -], -"type": "string" -}, -"interval": { -"description": "The time component is the intersection of these intervals", -"items": { -"$ref": "GeostoreTimeIntervalProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTimeEndpointProto": { -"description": "Not all combinations of optional fields in TimeEndpointProto are allowed. The granularity of time is a path along the directed graph with these edges: second -> minute minute -> hour hour -> day of week hour -> day of month hour -> day of year day of week -> week of month day of week -> week of year day of month -> month day of year -> year week of month -> month week of year -> year month -> year A TimeEndpointProto may not specify two fields that are unordered with respect to each other (\"day of year\" and \"day of week\", for instance). The absence of fields larger than any specified field indicates repetition (e.g. no year indicates that the interval occurs every year). The absence of units that are \"smaller\" than the largest specified unit indicates a default lowest value (no hour means midnight (0)). When intersecting time intervals, \"smaller\" units repeat within the \"larger\" unit as many times as necessary. For example, the intersection of the interval from hour 0 to hour 24 with the interval from second 0 to second 1 is equivalent to the union of the set of intervals which represents the first second of every minute of every hour of every day. ----------------------------------------------------------------------------- WARNING - if you add new fields to TimeEndpointProto you need to: - ensure AreTimeEndpointsEquivalent considers the new fields (and update the corresponding tests) -----------------------------------------------------------------------------", -"id": "GeostoreTimeEndpointProto", -"properties": { -"day": { -"description": "Valid ranges are 0-7, 1-31, and 1-366 (see day_type below)", -"format": "int32", -"type": "integer" -}, -"dayType": { -"enum": [ -"DAY_OF_WEEK", -"DAY_OF_MONTH", -"DAY_OF_YEAR" -], -"enumDescriptions": [ -"Valid day range 0-7. (see enum DayOfWeek)", -"Valid day range 1-31, must be valid day in month", -"Eg day = 31 is not valid if month = September Valid day range 1-366" -], -"type": "string" -}, -"hour": { -"description": "Valid range is 0-24. Because it could be unclear what wrapping hours mean in relation to days, 24 is used to denote midnight at the end of a day.", -"format": "int32", -"type": "integer" -}, -"minute": { -"description": "Valid range is 0-59, except when a repetitive minute interval ends at the end of an hour, in which case 60 is a legal end value.", -"format": "int32", -"type": "integer" -}, -"month": { -"enum": [ -"JANUARY", -"FEBRUARY", -"MARCH", -"APRIL", -"MAY", -"JUNE", -"JULY", -"AUGUST", -"SEPTEMBER", -"OCTOBER", -"NOVEMBER", -"DECEMBER", -"NEXT_JANUARY" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"The following value is used to define the end of a recurring interval that ends at the end of the year. It can be used without a day to represent the end of an interval with month resolution (e.g. November through December) or with a day of month of 1 to represent the end of an interval with day resolution (e.g. November 15th through December 31th). This is similar to using hour 24 to represent the end of a recurring interval with hour resolution that ends at midnight. Also see http://g/pathfinder-team/NFS3S4L2lhs/tdMSGr0H9r4J for a real example." -], -"type": "string" -}, -"second": { -"description": "Valid range is 0-59, except when a repetitive second interval ends at the end of a minute, in which case 60 is a legal end value.", -"format": "int32", -"type": "integer" -}, -"week": { -"description": "Valid ranges are 0-5 and 1-53 (depending on the value of week_type, see below).", -"format": "int32", -"type": "integer" -}, -"weekType": { -"enum": [ -"WEEK_OF_MONTH", -"WEEK_OF_YEAR" -], -"enumDescriptions": [ -"Valid week range 0-5.", -"Note: Weeks of this type are numbered such that week 0 begins on the 1st of the month and extends for 7 days. This has the consequence that week 4 never has a full 7 days. For example, for a month with 31 days, dates 1-7 are in week 0, 8-14 in week 1, 15-21 in week 2, 22-28 in week 3, and 29-31 in week 4. Week 5 is considered the first week of next month, and much like using `24` for the hour field or `NEXT_JANUARY` for the month field, is meant only to denote that an interval includes week 4. Valid week range 1-53." -], -"type": "string" -}, -"year": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreTimeIntervalProto": { -"id": "GeostoreTimeIntervalProto", -"properties": { -"begin": { -"$ref": "GeostoreTimeEndpointProto", -"description": "Begin and end are used to specify a range of times: [begin, end). If one is present, the other must be present as well. Additionally, both must have matching time granularities - all fields specified in the begin TimeEndpointProto must be present in the end TimeEndpointProto and vice-versa. Hours are not allowed to wrap (begin.hour() <= end.hour())." -}, -"end": { -"$ref": "GeostoreTimeEndpointProto" -}, -"inverted": { -"description": "If true, then this interval actually encodes the complement of the specified occasion or range. For example, the following TimeIntervalProto encodes all times other than the month of May. TimeIntervalProto[ type = TYPE_RANGE inverted = true begin = TimeEndpointProto[ month = MAY ] end = TimeEndpointProto[ month = JUNE ] ]", -"type": "boolean" -}, -"occasion": { -"description": "clang-format on", -"enum": [ -"OCCASION_SEASON", -"OCCASION_SEASON_WINTER", -"OCCASION_SEASON_SUMMER", -"OCCASION_DAYS", -"OCCASION_DAYS_SCHOOL", -"OCCASION_DAYS_HOLIDAY", -"OCCASION_DAYS_PRE_HOLIDAY", -"OCCASION_HOURS", -"OCCASION_HOURS_PEAK", -"OCCASION_HOURS_SCHOOL", -"OCCASION_HOURS_MARKET", -"OCCASION_HOURS_BUSINESS", -"OCCASION_HOURS_DUSK_TO_DAWN", -"OCCASION_HOURS_HIGH_TIDE", -"OCCASION_CONDITIONS", -"OCCASION_CONDITIONS_HIGH_WATER", -"OCCASION_CONDITIONS_ADVERSE", -"OCCASION_CONDITIONS_ADVERSE_RAIN", -"OCCASION_CONDITIONS_ADVERSE_WET", -"OCCASION_CONDITIONS_ADVERSE_FOG", -"OCCASION_CONDITIONS_WINTERY", -"OCCASION_CONDITIONS_WINTERY_AVALANCHE", -"OCCASION_CONDITIONS_WINTERY_SNOW", -"OCCASION_CONDITIONS_WINTERY_ICE", -"OCCASION_CONDITIONS_EVENT", -"OCCASION_CONDITIONS_POLLUTION", -"OCCASION_CONDITIONS_LOW_WATER", -"OCCASION_UNDEFINED", -"OCCASION_UNDEFINED_REGULAR", -"OCCASION_UNDEFINED_SELDOM" -], -"enumDescriptions": [ -"NavTeq just uses \"seasonal closure = yes\".", -"", -"\"Ski season\" appears in Tele Atlas once. Use OCCASION_SEASON_WINTER instead.", -"", -"", -"", -"", -"", -"", -"OCCASION_HOURS_OFF_PEAK doesn't exist because it's the inverse of OCCASION_HOURS_PEAK.", -"", -"Business hours appears only three times.", -"", -"OCCASION_HOURS_DAWN_TO_DUSK doesn't exist because it's the inverse of OCCASION_HOURS_DUSK_TO_DAWN.", -"", -"", -"Adverse conditions include rain, storms, and fog.", -"", -"", -"", -"Wintery conditions include snow and ice.", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": { -"enum": [ -"TYPE_OCCASION", -"TYPE_RANGE" -], -"enumDescriptions": [ -"Indicates a fuzzy interval using one of the occasion constants.", -"Indicates a range of times between two endpoints, as specified in TimeEndpointProto. The endpoint is NOT inclusive." -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreTimeScheduleProto": { -"id": "GeostoreTimeScheduleProto", -"properties": { -"component": { -"description": "The schedule is the union of these components.", -"items": { -"$ref": "GeostoreTimeComponentProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTimezoneProto": { -"description": "A TimezoneProto holds information about a feature's related time zone.", -"id": "GeostoreTimezoneProto", -"properties": { -"id": { -"description": "i18n recognized time zone identifier. For the full list of identifiers, see google3/i18n/identifiers/data/timezones.txt.", -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this relation." -} -}, -"type": "object" -}, -"GeostoreTollClusterProto": { -"description": "A collection of information that applies to a toll cluster.", -"id": "GeostoreTollClusterProto", -"properties": { -"intersection": { -"description": "The list of gcid:toll_intersection features that this toll cluster. A toll cluster can consist of either a single or a group of intersection points called toll intersections at the end of various road segments in MapFacts that represent one or more lanes passing through a toll fixture that all go to the same routing destination. This relationship is reciprocal, as a toll intersection also stores a reference to the toll cluster it belongs to. A toll cluster must have a reference to one or more toll interections (i.e. toll_cluster.intersection should always be populated).", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTollPathProto": { -"description": "A TollPathProto describes the sequential travel across one or more /geo/type/toll_cluster features. Traveling across a toll path may entail paying a fee, buying a toll pass, etc (although we don't model this in FeatureProto). To travel across a toll path, one must travel between toll clusters in the exact order specified. See go/geo-schema:toll-paths for more information.", -"id": "GeostoreTollPathProto", -"properties": { -"tollClusterSequence": { -"$ref": "GeostoreTollPathProtoTollClusterSequence", -"description": "Details the /geo/type/toll_cluster features which constitute this toll path." -} -}, -"type": "object" -}, -"GeostoreTollPathProtoIndexedTollCluster": { -"description": "A /geo/type/toll_cluster and its position along a toll path.", -"id": "GeostoreTollPathProtoIndexedTollCluster", -"properties": { -"cluster": { -"$ref": "GeostoreFeatureIdProto", -"description": "The /geo/type/toll_cluster feature at this position." -}, -"index": { -"description": "The position along the path where this cluster appears.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GeostoreTollPathProtoTollClusterSequence": { -"description": "The set of toll clusters in a toll path, along with their position in the path.", -"id": "GeostoreTollPathProtoTollClusterSequence", -"properties": { -"indexedTollClusters": { -"description": "There must be at least one toll cluster in a toll path, and there may not be duplicates. For ordering, one should rely on `IndexedTollClusterProto.index`, rather than the repeated field ordering (elements may be shuffled). This is a unidirectional reference - toll clusters do not reference the toll paths they're a part of. The toll clusters themselves may be part of multiple toll paths.", -"items": { -"$ref": "GeostoreTollPathProtoIndexedTollCluster" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTrackProto": { -"id": "GeostoreTrackProto", -"properties": { -"index": { -"description": "The index of this TrackProto in a list of TrackProtos.", -"format": "int32", -"type": "integer" -}, -"pose": { -"description": "The instantaneous pose of points along this track. The fields set inside each pose must be set consistently along the track.", -"items": { -"$ref": "GeostorePoseProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTransitLineProto": { -"description": "A transit line is a named set of transit trips that are advertised to passengers under a common name, and a number of attributes that are true for all those trips. There is no requirement for each trip to travel through the same list of stops or use the same legs, so a line can contain trips in opposite directions or with variations in the sequence of stops. See go/oysterpedia for an overview of the transit feature types. The standard feature properties are interpreted as follows: name - The names of this line, including both long and short names, if available. Short names like \"10\" or \"Blue\" should carry the FLAG_ABBREVIATED, long names like \"Dublin/Pleasanton line\" should not. The preferred name (one per language) for displaying the line on its own (e.g., as a search result for the line) should carry the FLAG_PREFERRED. website - The official web page describing this line. Repeated if multilingual. Line features have no geometry (neither points nor polylines nor polygons). Within Transit Oyster, geometry can be found in legs. Additional data only in Transit Oyster: child - The legs of this line, in no particular order. source_info - Specifies the GTFS routes that match this feature. Each is given as a PROVIDER_GOOGLE_TRANSIT source_info where dataset is the feed name and cookie is the route_id.", -"id": "GeostoreTransitLineProto", -"properties": { -"agency": { -"description": "The transit agencies responsible for operating this line. All lines should have at least one agency, and most will have exactly one. The following cases are reasons for multiple agencies: - Code share: Two or more agencies share trips - Alternations: Each trip is run by one of multiple agencies - Additional: All trips run by one agency, but a second one sells tickets In all cases the order has no meaning. Clarification comes from the trips.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"labelBackgroundColor": { -"description": "The background color of labels for that transit line. The encoding is like in HTML or CSS, eg. 0x11ff00 means a bit of red, full green, no blue, in sRGB color space. The most significant byte must be zero, i.e. no transparency.", -"format": "uint32", -"type": "integer" -}, -"labelTextColor": { -"description": "The text color of labels for that transit line. Encoding like label_background_color.", -"format": "uint32", -"type": "integer" -}, -"stations": { -"description": "The transit stations (establishment POIs with gcid:transit_station) which this transit line can go through, in no particular order. Usage note: The source of truth are the transit leg features in Transit Oyster. In MapFacts, that information is cached in two locations: in this field, and in transit station attachments on POIs. Do not assume these locations are always up to date and/or synchronized with each other.", -"items": { -"$ref": "GeostoreFeatureIdProto" -}, -"type": "array" -}, -"vehicleType": { -"description": "The type of vehicle that applies to all trips that use this line.", -"enum": [ -"VEHICLE_TYPE_ANY", -"VEHICLE_TYPE_RAIL", -"VEHICLE_TYPE_METRO_RAIL", -"VEHICLE_TYPE_SUBWAY", -"VEHICLE_TYPE_TRAM", -"VEHICLE_TYPE_MONORAIL", -"VEHICLE_TYPE_HEAVY_RAIL", -"VEHICLE_TYPE_COMMUTER_TRAIN", -"VEHICLE_TYPE_HIGH_SPEED_TRAIN", -"VEHICLE_TYPE_LONG_DISTANCE_TRAIN", -"VEHICLE_TYPE_BUS", -"VEHICLE_TYPE_INTERCITY_BUS", -"VEHICLE_TYPE_TROLLEYBUS", -"VEHICLE_TYPE_SHARE_TAXI", -"VEHICLE_TYPE_FERRY", -"VEHICLE_TYPE_CABLE_CAR", -"VEHICLE_TYPE_GONDOLA_LIFT", -"VEHICLE_TYPE_FUNICULAR", -"VEHICLE_TYPE_SPECIAL", -"VEHICLE_TYPE_HORSE_CARRIAGE", -"VEHICLE_TYPE_AIRPLANE" -], -"enumDescriptions": [ -"ABSTRACT", -"ABSTRACT Metropolitan railway transport, mostly for local transit.", -"ABSTRACT", -"Subways run mainly or completely underground.", -"Trams run mainly or completely along streets.", -"Monorails run on track consisting of a single rail, e.g. suspended monorail.", -"Heavy rail is rail transit that consists of larger vehicles, typically to transport more people across longer distances.", -"Commuter trains connect the city with the suburbs. They tend to stop often and run short distances.", -"High speed trains operate at much higher speed than normal trains. They can also run long distances. Typically with speeds upwards of 200 km/h. Examples are TGV, ICE, Shinkansen.", -"Long distance trains connect multiple cities/regions of a country or even multiple countries. They run faster, but not high-speed. Typically speeds under 200 km/h.", -"Buses cover mass transit over roads.", -"A bus that connects cities to each other.", -"A bus powered by electricity obtained from overhead wires by means of a pole.", -"Share taxi is sort of bus transport with ability to drop off and pick up passengers anywhere on it route. Generally share taxi uses minibus vehicles.", -"Ferries are a means of transit over water, often operated on regular, frequent, return services.", -"Cable cars are a form of transit used in the mountains.", -"An aerial lift (colloquially called \"gondola lift\") is a means of cable transport in which cabins, cars, gondolas or open chairs are hauled above the ground by means of one or more cables. Examples: gondola lift, aerial tramway. Funicular is a cable railway in which a pair of tram-like vehicles use each other as counter balance to ascend and descend.", -"", -"Used for easter eggs.", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GeostoreTransitLineVariantProto": { -"description": "A line variant is a specific instantiation of a line concept, denoted by the ordered set of stops and collection of segments that it traverses. Line variants are modeled as TYPE_ROUTE features with gcid:transit_line_variant. This proto stores line-variant-specific information that is not generally applicable to all routes. Schema Design Doc: go/transit-line-concepts-and-variants", -"id": "GeostoreTransitLineVariantProto", -"properties": { -"lineConcept": { -"$ref": "GeostoreFeatureIdProto", -"description": "Reference to the line variant\u2019s line concept." -}, -"stops": { -"description": "Ordered list of stations or platforms serviced by this line variant. The order is captured by the ServicedStopProto.index field.", -"items": { -"$ref": "GeostoreServicedStopProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTransitStationProto": { -"description": "Encapsulates information related to an individual transit station.", -"id": "GeostoreTransitStationProto", -"properties": { -"agencyAssociations": { -"description": "All the transit agencies which service this station. A station can be serviced by multiple stations. There may only be one agency association per transit agency. See go/transit-agency-relation-migration for more details.", -"items": { -"$ref": "GeostoreTransitStationProtoTransitAgencyAssociationProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreTransitStationProtoTransitAgencyAssociationProto": { -"description": "The association between this station and the agency which services this station.", -"id": "GeostoreTransitStationProtoTransitAgencyAssociationProto", -"properties": { -"agency": { -"$ref": "GeostoreFeatureIdProto", -"description": "The transit agency which services this station." -}, -"stationCode": { -"description": "A station code uniquely identifies a transit station within the transit agency's network.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreTrustSignalsProto": { -"id": "GeostoreTrustSignalsProto", -"properties": { -"sourceTrust": { -"$ref": "GeostoreSourceTrustProto", -"description": "Trust signals for the source of a given observation, typically based on historical evidences or status (like internal Google operator)." -} -}, -"type": "object" -}, -"GeostoreUnlimitedSpeedProto": { -"description": "A speed limit without a limit value. Used to indicate the absence of a speed limit.", -"id": "GeostoreUnlimitedSpeedProto", -"properties": {}, -"type": "object" -}, -"GeostoreUrlListProto": { -"description": "Hold a list of URLs, usually to contain translations of a single URL.", -"id": "GeostoreUrlListProto", -"properties": { -"url": { -"items": { -"$ref": "GeostoreUrlProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreUrlProto": { -"description": "A web location for a Feature. URLs should always be stored in repeated fields because some objects (eg. transit schedules in Brussels) have different URLs for different languages.", -"id": "GeostoreUrlProto", -"properties": { -"language": { -"description": "The external form of a Google International Identifiers Initiative (III) LanguageCode object. See google3/i18n/identifiers/languagecode.h for details. We place extra restrictions on languages in addition to what the III library requires. See http://go/geo-schema-reference/feature-properties/languages.md This field represents the language of the content of the web site. It may be missing if the web site is language-independent or if the language is unknown.", -"type": "string" -}, -"metadata": { -"$ref": "GeostoreFieldMetadataProto", -"description": "Field-level metadata for this URL. NOTE: there are multiple UrlProto fields in the Geo Schema. Metadata here is only expected to be present on FeatureProto.website[]." -}, -"pagerank": { -"deprecated": true, -"description": "** DEPRECATED ** The pagerank of this URL. Valid values [0, 65535] See http://wiki/Main/NearestSeeds for more information.", -"format": "int32", -"type": "integer" -}, -"url": { -"description": "The URL.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreUserProto": { -"description": "UserProto identifies a (human) user of Geo Data. Its primary use is in describing the source of pieces of data (e.g. edits). It could be a simple identifier, but isn't so that we can store it in the clear while still preventing correlation between a user's contribution.", -"id": "GeostoreUserProto", -"properties": { -"encryptedGaiaId": { -"description": "The user Gaia ID in encrypted form. Wipeout ids take value of \"\" in bytes.", -"format": "byte", -"type": "string" -}, -"encryptionKeyName": { -"description": "Required. The name of the key used to encrypt the Gaia ID.", -"type": "string" -}, -"keystoreConfigId": { -"description": "Required (valid default provided). The config ID of the owner of the above encryption_key_name. This field must be set if the encryption key name is *not* \"mapfacts_gaia_id_encryption_key\".", -"format": "int32", -"type": "integer" -}, -"username": { -"description": "If possible, writers should set this to a full user email, including the domain. Readers should not assume that this is a well-formed email address. This field may only be set by Atlas, Pushpin and OneRing because they are internal tools which have a PWG exception to store textual usernames in the clear.", -"type": "string" -} -}, -"type": "object" -}, -"GeostoreVariableSpeedProto": { -"description": "A speed limit whose value can change based on road, traffic, and weather conditions.", -"id": "GeostoreVariableSpeedProto", -"properties": {}, -"type": "object" -}, -"GeostoreVehicleAttributeFilterProto": { -"description": "A set of vehicle attribute conditionals (ex: weight >= 20T && num_trailers = 2) used to define a slice of all possible vehicles. This can be useful for filtering one or more vehicles by a predicate.", -"id": "GeostoreVehicleAttributeFilterProto", -"properties": { -"axleCount": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"count < 4\" AND \"count >= 2\" means \"2 <= count < 4\").", -"items": { -"$ref": "GeostoreCountComparisonProto" -}, -"type": "array" -}, -"hasTrailer": { -"description": "Whether the applied vehicle types have a trailer attached to them.", -"type": "boolean" -}, -"hazardousGoods": { -"description": "List of prohibited hazardous goods for a vehicle to carry. A repeated value here is treated as an OR operation, meaning that they may not carry ANY of the goods listed.", -"items": { -"enum": [ -"HAZARDOUS_GOODS_TYPE_UNSPECIFIED", -"EXPLOSIVES", -"GASES", -"FLAMMABLE", -"COMBUSTIBLE", -"ORGANIC", -"POISON", -"RADIOACTIVE", -"CORROSIVE", -"ASPIRATION_HAZARD", -"ENVIRONMENTAL_HAZARD", -"OTHER" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"type": "array" -}, -"numTrailers": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"count <= 4\" AND \"count > 2\" means \"2 < count <= 4\").", -"items": { -"$ref": "GeostoreCountComparisonProto" -}, -"type": "array" -}, -"trailerLength": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"length <= 53ft\" AND \"length > 48ft\" means \"48ft < length <= 53ft\").", -"items": { -"$ref": "GeostoreDimensionComparisonProto" -}, -"type": "array" -}, -"vehicleHeight": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"height > 3m\" AND \"height <= 5m\" means \"3m < height <= 5m\").", -"items": { -"$ref": "GeostoreDimensionComparisonProto" -}, -"type": "array" -}, -"vehicleLength": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"length <= 40m\" AND \"length > 35m\" means \"35m < length <= 40m\").", -"items": { -"$ref": "GeostoreDimensionComparisonProto" -}, -"type": "array" -}, -"vehicleWeight": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"weight < 8T\" AND \"weight >= 3T\" means \"3T <= weight < 8T\").", -"items": { -"$ref": "GeostoreWeightComparisonProto" -}, -"type": "array" -}, -"vehicleWidth": { -"description": "A repeated value here is treated as an AND operation. This allows for ranges to be represented by two values (ex: \"width < 4m\" AND \"width >= 2m\" means \"2m <= width < 4m\").", -"items": { -"$ref": "GeostoreDimensionComparisonProto" -}, -"type": "array" -} -}, -"type": "object" -}, -"GeostoreVerticalOrderingProto": { -"description": "A proto representing a vertical ordering of a feature. NOTE: This shouldn\u2019t be used if a more specific field can be used instead. E.g., for TYPE_SEGMENT features grade_level field should be preferred. For indoor features RELATION_ON_LEVEL should be preferred. See go/aboutgrades for comparison of various types of levels available.", -"id": "GeostoreVerticalOrderingProto", -"properties": { -"level": { -"description": "The level represents the relative vertical ordering of a feature among all overlapping features. For example, we may have features along freeway surface have level = 0, and features on an overpass have level = 1. NOTE: It\u2019s assumed that all features have level 0 by default, so that it\u2019s not necessary for all overlapping features to have this set.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GeostoreWeightComparisonProto": { -"description": "A weight value tagged with a comparison operator.", -"id": "GeostoreWeightComparisonProto", -"properties": { -"comparison": { -"enum": [ -"COMPARISONOPERATOR_UNKNOWN", -"LESS_THAN", -"GREATER_THAN" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in Mapfacts.", -"", -"" -], -"type": "string" -}, -"comparisonOperator": { -"enum": [ -"UNSPECIFIED", -"EQUAL", -"LESS_THAN", -"LESS_THAN_OR_EQUAL", -"GREATER_THAN", -"GREATER_THAN_OR_EQUAL" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"weightWithUnit": { -"$ref": "GeostoreWeightProto" -} -}, -"type": "object" -}, -"GeostoreWeightProto": { -"description": "A weight with a numerical value and unit.", -"id": "GeostoreWeightProto", -"properties": { -"unit": { -"enum": [ -"UNIT_UNKNOWN", -"METRIC_TON", -"LONG_TON", -"SHORT_TON", -"POUND", -"KILOGRAM" -], -"enumDescriptions": [ -"Default proto value, this value will never be set in Mapfacts.", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"weight": { -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoodocAnchorLabel": { -"description": "An anchor label can be attached to any element to give it a reference address. LogicalEntity links (see goodoc-semantics.proto) may use anchor labels to point to goodoc elements (they may also use indices to locate the element, but the indices could become invalid if the goodocs are allowed to mutate). Multiple elements may have the same anchor.", -"id": "GoodocAnchorLabel", -"properties": { -"Anchor": { -"description": "There is a generic method for composing such strings. Please take a look at GoodocUtils::GenerateUniqueAnchorName(...) in ocr/goodoc/goodoc-utils.h.", -"type": "string" -}, -"anchorScope": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocBoundingBox": { -"description": "Bounding box for page structural elements: pictures, paragraphs, characters, etc.", -"id": "GoodocBoundingBox", -"properties": { -"Height": { -"format": "int32", -"type": "integer" -}, -"Label": { -"description": "Optional magic label, so objects can be sorted on bounding box dimensions easily", -"format": "int32", -"type": "integer" -}, -"Left": { -"description": "BoundingBox coordinates and sizes are expressed in pixels", -"format": "int32", -"type": "integer" -}, -"Top": { -"format": "int32", -"type": "integer" -}, -"Width": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocBoxPartitions": { -"description": "A way to specify a simple partitioning of a BoundingBox into a sequence of sub-boxes. +----------------------------------------------------+ | | | | | | | span(0) | (1) | (2) | (3) | (4) | | | | | | | +----------------------------------------------------+ This representation can, for example, be used to store coarse Symbol boundaries within a Word (see Word.CompactSymbolBoxes below) instead of per-Symbol BoundingBoxes, for saving space.", -"id": "GoodocBoxPartitions", -"properties": { -"direction": { -"format": "int32", -"type": "integer" -}, -"span": { -"description": "\"span\" is width or height, determined by \"direction\". If there are k partitions, then there are k - 1 \"span\" values, one for each except the last symbol (which is redundant).", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocBreakLabel": { -"description": "Break label", -"id": "GoodocBreakLabel", -"properties": { -"BreakLabelType": { -"format": "int32", -"type": "integer" -}, -"isPrefix": { -"description": "True if break prepends the element", -"type": "boolean" -} -}, -"type": "object" -}, -"GoodocCharLabel": { -"description": "Font label", -"id": "GoodocCharLabel", -"properties": { -"BaseLine": { -"description": "The shift of a character from the base line of the string in pixels", -"format": "int32", -"type": "integer" -}, -"CharacterHeight": { -"description": "Height of small characters in pixels on the source image", -"format": "int32", -"type": "integer" -}, -"Color": { -"description": "The foreground color of the symbol; the default color is 0 (black)", -"format": "int32", -"type": "integer" -}, -"Confidence": { -"description": "Symbol recognition confidence from OCR. Range depends upon OCR Engine.", -"format": "int32", -"type": "integer" -}, -"FontId": { -"description": "The font ID refers to the fonts table in the document header", -"format": "int32", -"type": "integer" -}, -"FontSize": { -"description": "Size in points (JFYI: point is 1/72\"). This is rounded to the nearest whole number.", -"format": "int32", -"type": "integer" -}, -"FontSizeFloat": { -"description": "Size in points represented as float.", -"format": "float", -"type": "number" -}, -"FontType": { -"format": "int32", -"type": "integer" -}, -"HasUncertainHeight": { -"description": "If CharacterHeight is defined uncertainly", -"type": "boolean" -}, -"HorizontalScale": { -"description": "The horizontal scaling for a character, in percents. The default value for this property is 100, which corresponds to no scaling.", -"format": "int32", -"type": "integer" -}, -"IsBold": { -"type": "boolean" -}, -"IsItalic": { -"type": "boolean" -}, -"IsSmallCaps": { -"type": "boolean" -}, -"IsStrikeout": { -"type": "boolean" -}, -"IsSubscript": { -"type": "boolean" -}, -"IsSuperscript": { -"type": "boolean" -}, -"IsSuspicious": { -"description": "If OCR Engine marked the character as \"suspicious\" (this character is likely to be recognized incorrectly).", -"type": "boolean" -}, -"IsUnderlined": { -"type": "boolean" -}, -"NotOcrablePerQA": { -"description": "True if a QA operator has marked this as not OCRable. This is used for complex equations, scripts that the operator can't type, or handwriting.", -"type": "boolean" -}, -"Penalty": { -"description": "Symbol-level penalty from the garbage text detector. Lower is better; range = [0,100].", -"format": "int32", -"type": "integer" -}, -"SerifProbability": { -"description": "The probability that a character is written with a Serif font", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocDocument": { -"description": "Top-level representation of OCRed document", -"id": "GoodocDocument", -"properties": { -"EditingHistory": { -"description": "Debug info, recording the history of any editing done through the interface in goodoc-editing.h. The strings look like \"MoveParagraph(page_index = 0, source_block_index = 3, ...);", -"items": { -"type": "string" -}, -"type": "array" -}, -"LogicalEntity": { -"description": "Logical entities are stored as blobs. Depending on the kind of thing this is a goodoc of, a separate .proto file is expected to define the logical entity structure. Hence we can still parse this as a goodoc for people who dont care about this, and people who care about this can parse it specifically. ocr/goodoc/logical-entity-utils.h has methods to read and write these. See Goodoc++ doc", -"items": { -"format": "byte", -"type": "string" -}, -"type": "array" -}, -"LogicalEntityMessageName": { -"description": "The names of the proto messages serialized in LogicalEntity, one for each LogicalEntity. The repetitions should number 0 to leave this unspecified, or they should equal the number of LogicalEntity strings.", -"items": { -"type": "string" -}, -"type": "array" -}, -"SubDocuments": { -"description": "For multi-goodoc documents", -"items": { -"$ref": "GoodocDocument" -}, -"type": "array" -}, -"header": { -"$ref": "GoodocDocumentHeader" -}, -"page": { -"items": { -"$ref": "GoodocDocumentPage" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocDocumentHeader": { -"id": "GoodocDocumentHeader", -"properties": { -"OcrEngineId": { -"type": "string" -}, -"OcrEngineVersion": { -"type": "string" -}, -"font": { -"items": { -"$ref": "GoodocDocumentHeaderFont" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocDocumentHeaderFont": { -"id": "GoodocDocumentHeaderFont", -"properties": { -"FontId": { -"format": "int32", -"type": "integer" -}, -"FontName": { -"type": "string" -} -}, -"type": "object" -}, -"GoodocDocumentPage": { -"id": "GoodocDocumentPage", -"properties": { -"GarbageDetectorChangeList": { -"description": "If the garbage text detector was run, the changelist that the binary was sync'ed to (or -1 if unknown), and whether the settings had their production values (or false if unknown).", -"format": "int32", -"type": "integer" -}, -"GarbageDetectorWasProduction": { -"type": "boolean" -}, -"Height": { -"description": "Height in pixels", -"format": "int32", -"type": "integer" -}, -"HorizontalDpi": { -"description": "Horizontal resolution in DPI.", -"format": "int32", -"type": "integer" -}, -"Label": { -"$ref": "GoodocLabel" -}, -"PornScore": { -"description": "Score of porn classifier from analyzing images on page. Note: This should be named porn_score, but we use PornScore as the name in order to be consistent with the rest of this proto.", -"format": "double", -"type": "number" -}, -"TextConfidence": { -"description": "Page text recognition confidence. Range depends on the algorithm but should be consistent in a given volume. 0 is bad, 100 is good.", -"format": "int32", -"type": "integer" -}, -"VerticalDpi": { -"description": "Vertical resolution in DPI.", -"format": "int32", -"type": "integer" -}, -"Width": { -"description": "Width in pixels", -"format": "int32", -"type": "integer" -}, -"block": { -"items": { -"$ref": "GoodocDocumentPageBlock" -}, -"type": "array" -}, -"mergedpageinfo": { -"items": { -"$ref": "GoodocDocumentPageMergedPageInfo" -}, -"type": "array" -}, -"postOcrConfidence": { -"description": "Whether page-level text confidences and other summary data were computed by PostOcrUtils instead of the now-obsolete GarbageTextDetector", -"type": "boolean" -}, -"stats": { -"$ref": "GoodocSummaryStats", -"description": "Page level stats (font size, line spacing, etc.)" -} -}, -"type": "object" -}, -"GoodocDocumentPageBlock": { -"id": "GoodocDocumentPageBlock", -"properties": { -"BlockType": { -"format": "int32", -"type": "integer" -}, -"Box": { -"$ref": "GoodocBoundingBox" -}, -"Label": { -"$ref": "GoodocLabel" -}, -"OrientationLabel": { -"$ref": "GoodocOrientationLabel", -"description": "Which way is upright for this block, and what is the reading order (applicable if there is text here)." -}, -"Paragraph": { -"items": { -"$ref": "GoodocParagraph" -}, -"type": "array" -}, -"RotatedBox": { -"$ref": "GoodocRotatedBoundingBox", -"description": "If RotatedBox is set, Box must be set as well. See RotatedBoundingBox." -}, -"TextConfidence": { -"description": "Block text recognition confidence. Range depends on the algorithm but should be consistent in a given volume. 0 is bad, 100 is good.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocDocumentPageMergedPageInfo": { -"description": "If we have merged text from another goodoc into this one (for example, from a PDF text layer goodoc into an OCR'd goodoc), we record some source goodoc info here.", -"id": "GoodocDocumentPageMergedPageInfo", -"properties": { -"OcrEngineId": { -"type": "string" -}, -"OcrEngineVersion": { -"type": "string" -} -}, -"type": "object" -}, -"GoodocFontSizeStats": { -"description": "Statistics about a particular font size (from CharLabel.FontSize) aggregated over a range of symbols", -"id": "GoodocFontSizeStats", -"properties": { -"fontId": { -"description": "CharLabel.FontId and FontSize", -"format": "int32", -"type": "integer" -}, -"fontSize": { -"format": "int32", -"type": "integer" -}, -"medianHeight": { -"description": "The measurements are in pixels", -"format": "int32", -"type": "integer" -}, -"medianLineHeight": { -"description": "top to bottom", -"format": "int32", -"type": "integer" -}, -"medianLineSpace": { -"description": "bottom to next top in para", -"format": "int32", -"type": "integer" -}, -"medianLineSpan": { -"description": "top to next top in para", -"format": "int32", -"type": "integer" -}, -"medianWidth": { -"format": "int32", -"type": "integer" -}, -"numLineSpaces": { -"description": "Lines (out of num_lines) that have a successor line within their para", -"format": "int32", -"type": "integer" -}, -"numLines": { -"description": "Line stats for this font. \"top\" corresponds to the highest ascender and \"bottom\" to the lowest descender. num_lines = # lines with > 50% symbols having this font", -"format": "int32", -"type": "integer" -}, -"numSymbols": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocLabel": { -"description": "Label aggregates all kinds of optional characteristics of page elements.", -"id": "GoodocLabel", -"properties": { -"AnchorLabel": { -"description": "AnchorLabel identifies a link target.", -"items": { -"$ref": "GoodocAnchorLabel" -}, -"type": "array" -}, -"BreakLabel": { -"$ref": "GoodocBreakLabel" -}, -"CharLabel": { -"$ref": "GoodocCharLabel", -"description": "CharLabel is specifically intended for symbols" -}, -"LanguageLabel": { -"description": "Languages used in the element (page, block, paragraph or word). Ordered by dominant-language first. Note: content scanjobs processed by the garbage_text_detector before CL 9223538 (Dec. 2008) have LanguageLabels in arbitrary order (within Page and Block elements) -- the confidence value should be inspected to find the dominant language guess for these, rather than just taking the first.", -"items": { -"$ref": "GoodocLanguageLabel" -}, -"type": "array" -}, -"SemanticLabel": { -"$ref": "GoodocSemanticLabel", -"description": "SemanticLabel is defined in goodoc-semantics.proto, it allows rich annotation of content, identifying the nature of page elements." -} -}, -"type": "object" -}, -"GoodocLanguageCombinationLanguage": { -"description": "Weighted language", -"id": "GoodocLanguageCombinationLanguage", -"properties": { -"bcp47Tag": { -"description": "Bcp47 language code. Note, this is not the same as OceanCode used by goodoc::Document.", -"type": "string" -}, -"weight": { -"description": "Weight of language. This specifies how likely it is to see the language in the input text. The values don't have to add up to 1.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoodocLanguageLabel": { -"description": "Language label", -"id": "GoodocLanguageLabel", -"properties": { -"ClosestLanguageId": { -"description": "Closest id from i18n/languages/proto/languages.proto; caveat: may not accurately capture the language. GoodocLanguageCodeToLanguage() declared in ocr/goodoc/goodoc-utils.h may be used to convert a Language enum (i18n/languages/proto/languages.proto) to a string suitable for this field.", -"format": "int32", -"type": "integer" -}, -"Confidence": { -"description": "Confidence level on that language, between 0 and 100", -"format": "int32", -"type": "integer" -}, -"LanguageCode": { -"description": "Old (Ocean) Language Code Usage: The language code is inferred during the running of the Garbage Text Detector and gets set at the paragraph, block and page level. Language code is a string of 3 or more characters. The first 3 letters specify the language, according to ISO 639. Optionally, the 3-letter code can be extended with an underscore and a language variant specifier. Specifiers exist for regional variants or for different forms of language spelling. The regional variants are specified as 2-letter country code, according to ISO 3166. Some examples: Standard \"por\" - Portuguese, standard \"rus\" - Russian, standard Regional variants: \"por_br\" - Portuguese, Brazilian \"eng_us\" - English, United States Variants of spelling: \"rus_old\" - Russian, old spelling \"chi_tra\" - Chinese, traditional \"ger_new\" - German, new spelling LanguageToGoodocLanguageCode() declared in ocr/goodoc/goodoc-utils.h may be used to convert a Language enum (i18n/languages/proto/languages.proto) to a string suitable for this field. New Language Code Usage: Most of the usages described above were standardized in BCP 47, and these codes are the new stanadard to be used in this field. To load either new or old language codes to form LanguageCode objects, use the function FromOceanCode() in ocr/quality/lang_util.h Note that the function ocr::FromOceanCode is capable of transforming either version of the LanguageCode to a C++ i18n_identifiers::LanguageCode.", -"type": "string" -} -}, -"type": "object" -}, -"GoodocLogicalEntity": { -"description": "A logical entity in the abstract is just a group of links to the goodoc. Depending on the kind of item, a separate proto file should extend this to define the logical structure for that kind. For example. newspapers.proto defines the logical entity for newspapers. LogicalEntity is also used within some SemanticLabels, for example, for a table-of-contents link.", -"id": "GoodocLogicalEntity", -"properties": { -"Metadata": { -"type": "string" -}, -"link": { -"items": { -"$ref": "GoodocLogicalEntityLink" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocLogicalEntityLink": { -"description": "NOTE(gponcin) 2008/11 This is repeated for articles where we may have multiple links in one entity. From Vivek (Atlantis): \"The block segmenter outputs a list of headlines on a page as a single logical entity that we attach to the logicalentity(1) for the goodoc.\"", -"id": "GoodocLogicalEntityLink", -"properties": { -"Anchor": { -"description": "The preferred way to link to an element is to create an AnchorLabel in the target element and name it here. Multiple elements may contain the same Anchor string.", -"type": "string" -}, -"BlockId": { -"format": "int32", -"type": "integer" -}, -"DocId": { -"description": "Links may also specifically locate the target element with the following indices. Note that during the course of layout analysis, goodoc elements may move around, so such hard links should be created only very late (or not at all -- Anchors would be more reliable target addresses).", -"format": "int32", -"type": "integer" -}, -"PageId": { -"format": "int32", -"type": "integer" -}, -"ParagraphId": { -"format": "int32", -"type": "integer" -}, -"RouteId": { -"format": "int32", -"type": "integer" -}, -"SymbolId": { -"format": "int32", -"type": "integer" -}, -"Url": { -"description": "If not defined, link points to the current doc", -"type": "string" -}, -"WordId": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocOrdinal": { -"description": "An Ordinal message represents a single ordinal component of a page number. It encodes the printed or inferred numbering style (Roman, ASCII, etc.) and the ordinal value of the component. An optional set of variable is defined in order to express a sectioned ordinal. A sectioned ordinal may appear in certain page numbering styles, for example \"12-1\" where \"12\" identifies a chapter and \"1\" identifies the page within it. This case will be encoded with value 1 and section_value 12 both of type ASCII.", -"id": "GoodocOrdinal", -"properties": { -"implicit": { -"enum": [ -"UNKNOWN_IMPLICIT", -"IMPLICIT", -"EXPLICIT" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"sectionStringValue": { -"type": "string" -}, -"sectionValue": { -"format": "int32", -"type": "integer" -}, -"sectionValueType": { -"description": "The following vars describe the section component of an ordinal (if exists). They are used to express situation where a page number has a section component, usually denoating the chapter number. For example pages 5-14, 5-15 will both have the common section 5. (If exists). The semantcis of the section variables correspond to that of the primary part of the ordinal. (Described above).", -"enum": [ -"UNKNOWN_VALUE_TYPE", -"LEGACY_PRE_VALUE", -"ROMAN", -"ASCII", -"ALPHA", -"LEGACY_POST_VALUE", -"UNDEFINED_VALUE_TYPE", -"CHINESE", -"JAPANESE", -"KOREAN", -"ARABIC_ARABIC" -], -"enumDescriptions": [ -"", -"Not to be used", -"Roman numeral", -"ASCII numeral (i.e. 1,2,3)", -"Alphabetic, as in the first part of \"A-11\".", -"Not to be used", -"", -"", -"", -"", -"Arabic numeral as used in Arabic. (Not to be confused with the ASCII numeral a.k.a Arabic numberal.)" -], -"type": "string" -}, -"stringValue": { -"description": "The string page value.", -"type": "string" -}, -"value": { -"description": "The numeric page value.", -"format": "int32", -"type": "integer" -}, -"valueDelta": { -"description": "The delta in which the value increases between pages.", -"enum": [ -"DELTA_HALF", -"DELTA_ONE", -"DELTA_TWO" -], -"enumDescriptions": [ -"appears in typewritten documents", -"the usual numbering scenario", -"appears in documents with two ordinals per page" -], -"type": "string" -}, -"valueType": { -"description": "A value type from the Type enum above.", -"enum": [ -"UNKNOWN_VALUE_TYPE", -"LEGACY_PRE_VALUE", -"ROMAN", -"ASCII", -"ALPHA", -"LEGACY_POST_VALUE", -"UNDEFINED_VALUE_TYPE", -"CHINESE", -"JAPANESE", -"KOREAN", -"ARABIC_ARABIC" -], -"enumDescriptions": [ -"", -"Not to be used", -"Roman numeral", -"ASCII numeral (i.e. 1,2,3)", -"Alphabetic, as in the first part of \"A-11\".", -"Not to be used", -"", -"", -"", -"", -"Arabic numeral as used in Arabic. (Not to be confused with the ASCII numeral a.k.a Arabic numberal.)" -], -"type": "string" -} -}, -"type": "object" -}, -"GoodocOrientationLabel": { -"description": "OrientationLabel groups the details about orientation and reading order.", -"id": "GoodocOrientationLabel", -"properties": { -"deskewAngle": { -"description": "After rotating so that the text orientation is upright, how many radians does one have to rotate the block anti-clockwise for it to be level? We guarantee: -Pi/4 <= deskew_angle <= Pi/4", -"format": "float", -"type": "number" -}, -"mirrored": { -"description": "Whether a text line is mirrored (e.g. reflected in a shiny surface or seen through the opposite side of a storefront window). The intent is that this is a quality of the text line image. It needs to be reflected according to a vertical axis along the direction of upright characters to make it readable. This does not affect the shape of the bounding box. A mirrored line with top to bottom writing remains top to bottom. A mirrored horizontal line will flip left to right. However any child entities (symbols) will remain in the same order, and the writing direction imposed by the language (ltr or rtl) will remain the same.", -"type": "boolean" -}, -"orientation": { -"enum": [ -"ORIENTATION_PAGE_UP", -"ORIENTATION_PAGE_RIGHT", -"ORIENTATION_PAGE_DOWN", -"ORIENTATION_PAGE_LEFT" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"textlineOrder": { -"enum": [ -"TEXTLINE_ORDER_LEFT_TO_RIGHT", -"TEXTLINE_ORDER_RIGHT_TO_LEFT", -"TEXTLINE_ORDER_TOP_TO_BOTTOM" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"writingDirection": { -"enum": [ -"WRITING_DIRECTION_LEFT_TO_RIGHT", -"WRITING_DIRECTION_RIGHT_TO_LEFT", -"WRITING_DIRECTION_TOP_TO_BOTTOM" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GoodocOverrides": { -"description": "This message specifies structure \"overrides\" to apply: it can be used to force certain kinds of GoodocToHTML renderings of elements.", -"id": "GoodocOverrides", -"properties": { -"blockImagination": { -"description": "For text blocks only: do not allow this block to be turned into an image when rendering, even if your algorithms want to do so:", -"enum": [ -"LEAVE_ALONE", -"FORCE_TRUE", -"FORCE_FALSE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"doNotExpandGraphicBox": { -"description": "For graphic blocks, we often expand the block a bit for rendering, to compensate for bad image segmentation. do_not_expand_graphic_box forces this behavior to be turned off.", -"type": "boolean" -}, -"fullPageAsImage": { -"description": "For Pages only: explicitly specify whether or not this page should be rendered fully as an image", -"enum": [ -"LEAVE_ALONE", -"FORCE_TRUE", -"FORCE_FALSE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"fullPageLineated": { -"description": "For Pages only: explicitly specify whether or not all text on this page should be treated as \"LINEATED\"", -"enum": [ -"LEAVE_ALONE", -"FORCE_TRUE", -"FORCE_FALSE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"fullPageSkipped": { -"description": "For Pages only: explicitly specify whether or not this page should be skipped.", -"enum": [ -"LEAVE_ALONE", -"FORCE_TRUE", -"FORCE_FALSE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"needNotSuppressPhoto": { -"description": "This GRAPHIC block's image can be shown even when GoodocToHTMLOptions.suppress_photos_with_this is specified.", -"type": "boolean" -}, -"pageBreakBefore": { -"description": "For blocks: explicitly specify whether or not this block should get a page-break before it.", -"enum": [ -"LEAVE_ALONE", -"FORCE_TRUE", -"FORCE_FALSE" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -}, -"style": { -"items": { -"$ref": "GoodocOverridesStyle" -}, -"type": "array" -}, -"wordHtml": { -"description": "For Words only: replace the rendered HTML by this:", -"type": "string" -} -}, -"type": "object" -}, -"GoodocOverridesStyle": { -"description": "Extra css styles to apply", -"id": "GoodocOverridesStyle", -"properties": { -"name": { -"description": "css attribute name: \"margin-left\", for eg.", -"type": "string" -}, -"value": { -"description": "css attribute vale: \"1em\", for eg.", -"type": "string" -} -}, -"type": "object" -}, -"GoodocParagraph": { -"description": "Represents a paragraph of text in OCRed content.", -"id": "GoodocParagraph", -"properties": { -"Box": { -"$ref": "GoodocBoundingBox" -}, -"FirstLineIndent": { -"format": "int32", -"type": "integer" -}, -"Label": { -"$ref": "GoodocLabel" -}, -"LeftIndent": { -"format": "int32", -"type": "integer" -}, -"LineSpacing": { -"format": "int32", -"type": "integer" -}, -"OrientationLabel": { -"$ref": "GoodocOrientationLabel", -"description": "Which way is upright for this paragraph and what is the dominant reading order?" -}, -"RightIndent": { -"format": "int32", -"type": "integer" -}, -"RotatedBox": { -"$ref": "GoodocRotatedBoundingBox", -"description": "If RotatedBox is set, Box must be set as well. See RotatedBoundingBox." -}, -"SpaceAfter": { -"format": "int32", -"type": "integer" -}, -"SpaceBefore": { -"format": "int32", -"type": "integer" -}, -"SubsumedParagraphProperties": { -"description": "If we merge any paragraphs into this one (through the MergeParagraphWithNext() interface in goodoc-editing.h), then we append the properties of the merged paragraph here, for debugging and to avoid losing any info. Note that the SubsumedParagraphProperties Paragraphs do not contain Routes.", -"items": { -"$ref": "GoodocParagraph" -}, -"type": "array" -}, -"TextConfidence": { -"description": "Paragraph text recognition confidence. Range depends on the algorithm but should be consistent in a given volume. 0 is bad, 100 is good.", -"format": "int32", -"type": "integer" -}, -"Width": { -"format": "int32", -"type": "integer" -}, -"alignment": { -"format": "int32", -"type": "integer" -}, -"droppedcap": { -"$ref": "GoodocParagraphDroppedCap" -}, -"route": { -"items": { -"$ref": "GoodocParagraphRoute" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocParagraphDroppedCap": { -"description": "Information about the paragraph's dropped capital letter", -"id": "GoodocParagraphDroppedCap", -"properties": { -"Box": { -"$ref": "GoodocBoundingBox" -}, -"LettersCount": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocParagraphRoute": { -"id": "GoodocParagraphRoute", -"properties": { -"EndPoint": { -"$ref": "GoodocRoutePoint", -"description": "Route end point" -}, -"StartPoint": { -"$ref": "GoodocRoutePoint", -"description": "Route start point" -}, -"Weight": { -"description": "Route weight, i.e. route", -"format": "int32", -"type": "integer" -}, -"Word": { -"description": "The array of words on this route", -"items": { -"$ref": "GoodocWord" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocRotatedBoundingBox": { -"description": "Similar to goodoc.BoundingBox, but containing an angle of rotation, thus able to represent non-axis-aligned boxes. RotatedBoundingBox can be used in combination with BoundingBox to better represent non-axis-aligned page structural elements. In such case, two bounding boxes can be used per element. A RotatedBoundingBox that is rotated to tightly encompass the element; embedded (as tightly as possible) inside an axis-aligned BoundingBox. Note that there is some amount of ambiguity regarding what angle and vertex to use. Consider a square with axis-aligned diagonals: B / \\ A C \\ / D This can either be represented as a -45 degree rotation around A, a 45 degree rotation around B, a 135 degree rotation around C, or a -135 degree rotation around D. Which one you use depends on your use case, but one recommendation is to use the vertex that would be top left if the reader was reading it in the 'natural' orientation.", -"id": "GoodocRotatedBoundingBox", -"properties": { -"Angle": { -"description": "Angle of rotation of the original non-rotated box around the top left corner of the original non-rotated box, in clockwise degrees from the horizontal.", -"format": "float", -"type": "number" -}, -"Height": { -"format": "int32", -"type": "integer" -}, -"Left": { -"description": "Coordinates and sizes are expressed in pixels, where the top-left pixel is (0, 0). The coordinates refer to the corner of the top-left vertex of the unrotated version of the box.", -"format": "int32", -"type": "integer" -}, -"Top": { -"format": "int32", -"type": "integer" -}, -"Width": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocRoutePoint": { -"id": "GoodocRoutePoint", -"properties": { -"RouteIndex": { -"description": "The sequential route number, starts at 0", -"format": "int32", -"type": "integer" -}, -"WordIndex": { -"description": "The sequential word number, starts at 0", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSemanticLabel": { -"description": "Label identifying a logical part of the page content. This applies mostly at Block level or Paragraph level (but can apply to Words or to arbitrary spans if needed).", -"id": "GoodocSemanticLabel", -"properties": { -"AlternateText": { -"description": "Alternate text for a sequence of the Goodoc, just for the element containing this label, or for a sequence starting from this element to the EndOfSpanningLabel. Typically this is inserted by automatic or manual OCR correction. We use text instead of editing the Goodoc directly since we dont usually have accurate symbol level bboxes for the alternate text. Also the original values from OCR are preserved. It is upto the application to do anything more intelligent like mapping words and finding potential symbol/word bboxes.", -"type": "string" -}, -"Attribute": { -"description": "Page elements can be given Attributes refining meaning/role. We keep this flexible by using strings instead of pre-determined enum values. But it is useful to list all such Attributes in use in ocr/goodoc/goodoc-semantics-attributes.h", -"items": { -"type": "string" -}, -"type": "array" -}, -"ChapterStart": { -"description": "Blocks that are at the beginning of chapters have this set:", -"type": "boolean" -}, -"CleanupAnnotation": { -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"ContinuesFromPreviousPage": { -"type": "boolean" -}, -"ContinuesFromPreviousPageHyphenated": { -"description": "When ContinuesFromPreviousPage=true, this bit can be set to note that the word fragment on the previous page ends in a hyphen.", -"type": "boolean" -}, -"ContinuesOnNextPage": { -"description": "Paragraphs that span across pages can be identified with the following flags. Note that flows just connect Blocks across pages. These continuation flags imply something more specific -- the case of a single logical paragraph split over pages. Only the last Paragraph in the last Block within a given FlowThread() on a page can have ContinuesOnNextPage set. Similarly, only the first Paragraph in the first Block with a given FlowThread() on a page may have ContinuesFromPreviousPage set.", -"type": "boolean" -}, -"EndOfSpanningLabel": { -"$ref": "GoodocLogicalEntity", -"description": "Normally, a SemanticLabel applies exactly to the goodoc element that it is contained in (usually Block or Paragraph, sometimes Word). Occasionally, we need a SemanticLabel to span across the boundary or end before the boundary. For example, a URL may just be a few words within a Paragraph. In such cases, the SemanticLabel is added to the first element of the span and contains this LogicalEntity pointing to the last element of the span:" -}, -"ExperimentalData": { -"$ref": "Proto2BridgeMessageSet", -"description": "Message set for experimental algorithm data. Use case: We keep a set of features that was computed for the unsupervised caption extraction and store it here. Agora question producer will consume this message set to be embedded in a question. The experimental feature set can then be used later to pair up with ground truth labels for designing a supervised algorithm. Currently holding: o ocean/analysis/content/caption_data.proto's TextualElement" -}, -"Flow": { -"description": "Flow identifies a single sequential unit of text (or other content). It is only set on Blocks -- a flow identifies a sequence of Blocks. The default, main flow is just the empty string. The \"FlowThread\" of a block is the flow (if non-empty), suffixed with the block appearance. This is computed by GoodocUtils::FlowThread(). Paragraphs may be split over blocks in the same FlowThread, across pages. The following table shows how FlowThread gets computed: ## Flow Appearance FlowThread (empty) UNSPECIFIED \"UNSPECIFIED\" foo BODY \"foo:BODY\" Please use lower-case strings for flows (such as article-33-box). One useful way to think of flows is this: A logical unit of interest in a a Document (for example, an article) would be identified by a starting block, an ending block, and a list of flows of interest within the [start, end) span. message Article { (page#, block#): article_start; (page#, block#): article_end; repeated string flows; } The reading order of blocks, paragraphs/etc within this article would be the same order as present in the goodoc itself. Some applications (such as rendering) may want to process the article by running over all the flows together, others (such as indexing) may want to deal with the FlowThreads one after the other.", -"type": "string" -}, -"ModificationRecord": { -"description": "This field can be used to record the steps by which AlternateText for a sequence of the Goodoc is generated.", -"type": "string" -}, -"PageNumberOrdinal": { -"$ref": "GoodocOrdinal", -"description": "If Appearence is PAGE_NUMBER:" -}, -"appearance": { -"format": "int32", -"type": "integer" -}, -"columndetails": { -"$ref": "GoodocSemanticLabelColumnDetails" -}, -"contentlink": { -"$ref": "GoodocSemanticLabelContentLink" -}, -"editcorrectioncandidate": { -"items": { -"$ref": "GoodocSemanticLabelEditCorrectionCandidate" -}, -"type": "array" -}, -"overrides": { -"$ref": "GoodocOverrides", -"description": "Structure overrides: typically manual corrections to goodoc renderings." -}, -"snippetfilter": { -"items": { -"$ref": "GoodocSemanticLabelSnippetFilter" -}, -"type": "array" -}, -"tablecelldetails": { -"$ref": "GoodocSemanticLabelTableCellDetails" -}, -"tabledetails": { -"$ref": "GoodocSemanticLabelTableDetails" -} -}, -"type": "object" -}, -"GoodocSemanticLabelColumnDetails": { -"description": "If Appearance is COLUMN:", -"id": "GoodocSemanticLabelColumnDetails", -"properties": { -"Column": { -"format": "int32", -"type": "integer" -}, -"Columns": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSemanticLabelContentLink": { -"description": "If the label is for something that links to another piece of content (in this volume, outside, a url, a citation, etc.).", -"id": "GoodocSemanticLabelContentLink", -"properties": { -"UrlTarget": { -"description": "For URL labels, we note the url here directly (it's also available by grabbing all text symbols within the labeled span). SCHOLARLY_CITATION labels or even CAPTION labels may occasionally contain URLs.", -"type": "string" -}, -"citationtarget": { -"$ref": "GoodocSemanticLabelContentLinkCitationTarget" -}, -"involumetarget": { -"$ref": "GoodocSemanticLabelContentLinkInVolumeTarget" -} -}, -"type": "object" -}, -"GoodocSemanticLabelContentLinkCitationTarget": { -"description": "For SCHOLARLY_CITATION labels:", -"id": "GoodocSemanticLabelContentLinkCitationTarget", -"properties": { -"Authors": { -"description": "separated by semicolons", -"type": "string" -}, -"BibKey": { -"type": "string" -}, -"Confidence": { -"format": "double", -"type": "number" -}, -"Title": { -"type": "string" -}, -"Year": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSemanticLabelContentLinkInVolumeTarget": { -"description": "For CAPTION or FOOTNOTE_POINTER or TOC_ENTRY or INDEX_ENTRY or CONTINUATION labels:", -"id": "GoodocSemanticLabelContentLinkInVolumeTarget", -"properties": { -"Confidence": { -"format": "double", -"type": "number" -}, -"LogicalEntity": { -"$ref": "GoodocLogicalEntity", -"description": "The CAPTION label typically targets the previous or the next Block. The FOOTNOTE_POINTER label typically targets a paragraph in a FOOTNOTE Block. TOC_ENTRY and INDEX_ENTRY labels are links that point to a different page within the volume. CONTINUATION labels also are links that point to a different page within the volume, or maybe even a particular block or paragraph." -} -}, -"type": "object" -}, -"GoodocSemanticLabelEditCorrectionCandidate": { -"description": "If there is more than one edit correction candidate, store all the candidates here. This helps a manual correction utility fire the right kind of question with the relevant options.", -"id": "GoodocSemanticLabelEditCorrectionCandidate", -"properties": { -"EditedWord": { -"type": "string" -}, -"Probability": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GoodocSemanticLabelSnippetFilter": { -"description": "Recording the output of the snippet filter. We run through a series of snippet filters and store all the conditions that this article passed. A condition is denoted by the \"badword_fraction_allowed\" in a running window of size - \"window_size\". If ARTICLE_SNIPPET_NOT_CLEAN annotation is set, this group has 0 items. It can be a part of article logicalentity, but keeping it here for consistency and coherence as SemanticLabel holds all other article metadata.", -"id": "GoodocSemanticLabelSnippetFilter", -"properties": { -"badwordFraction": { -"format": "double", -"type": "number" -}, -"windowSize": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSemanticLabelTableCellDetails": { -"description": "If Appearance is TABLE_CELL:", -"id": "GoodocSemanticLabelTableCellDetails", -"properties": { -"Column": { -"format": "int32", -"type": "integer" -}, -"ColumnSpan": { -"format": "int32", -"type": "integer" -}, -"Row": { -"description": "Row and Column are 0-based", -"format": "int32", -"type": "integer" -}, -"RowSpan": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSemanticLabelTableDetails": { -"description": "If Appearance is TABLE:", -"id": "GoodocSemanticLabelTableDetails", -"properties": { -"Columns": { -"format": "int32", -"type": "integer" -}, -"Rows": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSummaryStats": { -"description": "Goodoc stats for a range of elements, such as one page or a whole book. These stats can be computed using the SummaryStatsCollector class. Some range stats are pre-computed and stored in goodocs/volumes (eg., Page.stats below, and Ocean's CA_VolumeResult.goodoc_stats).", -"id": "GoodocSummaryStats", -"properties": { -"estimatedFontSizes": { -"description": "This flag is set if the histogram above has been derived by estimating font sizes from CharLabel.CharacterHeight; that happens if the FontSize field is constant, as has happened with Abbyy 9.", -"type": "boolean" -}, -"fontSizeHistogram": { -"description": "Symbol counts (and other attributes) for each distinct CharLabel.FontId and FontSize; histogram is in decreasing order of symbol count", -"items": { -"$ref": "GoodocFontSizeStats" -}, -"type": "array" -}, -"meanSymbolsPerBlock": { -"format": "int32", -"type": "integer" -}, -"meanSymbolsPerLine": { -"format": "int32", -"type": "integer" -}, -"meanSymbolsPerParagraph": { -"format": "int32", -"type": "integer" -}, -"meanSymbolsPerWord": { -"format": "int32", -"type": "integer" -}, -"meanWordsPerBlock": { -"format": "int32", -"type": "integer" -}, -"meanWordsPerLine": { -"format": "int32", -"type": "integer" -}, -"meanWordsPerParagraph": { -"format": "int32", -"type": "integer" -}, -"medianBlockSpace": { -"description": "bottom to next top in flow on page", -"format": "int32", -"type": "integer" -}, -"medianEvenPrintedBox": { -"$ref": "GoodocBoundingBox", -"description": "0,2,4.." -}, -"medianFullEvenPrintedBox": { -"$ref": "GoodocBoundingBox" -}, -"medianFullOddPrintedBox": { -"$ref": "GoodocBoundingBox" -}, -"medianFullPrintedBox": { -"$ref": "GoodocBoundingBox", -"description": "Each median_full*_printed_box includes page header/footer but still excludes all graphic blocks" -}, -"medianHeight": { -"format": "int32", -"type": "integer" -}, -"medianHorizontalDpi": { -"format": "int32", -"type": "integer" -}, -"medianLineHeight": { -"description": "top to bottom", -"format": "int32", -"type": "integer" -}, -"medianLineSpace": { -"description": "bottom to next top in para", -"format": "int32", -"type": "integer" -}, -"medianLineSpan": { -"description": "top to next top in para", -"format": "int32", -"type": "integer" -}, -"medianOddPrintedBox": { -"$ref": "GoodocBoundingBox", -"description": "1,3,5.." -}, -"medianParagraphIndent": { -"description": "leading space on first line", -"format": "int32", -"type": "integer" -}, -"medianParagraphSpace": { -"description": "bottom to next top in block", -"format": "int32", -"type": "integer" -}, -"medianPrintedBox": { -"$ref": "GoodocBoundingBox", -"description": "Each median*_printed_box excludes page header/footer and all graphic blocks" -}, -"medianSymbolsPerBlock": { -"format": "int32", -"type": "integer" -}, -"medianSymbolsPerLine": { -"format": "int32", -"type": "integer" -}, -"medianSymbolsPerParagraph": { -"format": "int32", -"type": "integer" -}, -"medianSymbolsPerWord": { -"format": "int32", -"type": "integer" -}, -"medianVerticalDpi": { -"format": "int32", -"type": "integer" -}, -"medianWidth": { -"format": "int32", -"type": "integer" -}, -"medianWordsPerBlock": { -"format": "int32", -"type": "integer" -}, -"medianWordsPerLine": { -"format": "int32", -"type": "integer" -}, -"medianWordsPerParagraph": { -"format": "int32", -"type": "integer" -}, -"numBlockSpaces": { -"description": "blocks that have a successor block within their flow on their page", -"format": "int32", -"type": "integer" -}, -"numBlocks": { -"description": "------ Block stats Median symbols and words omit junk, header and footer blocks; they are intended to be a measure of the typical \"content\" block. There can still be substantial differences between means and medians; however, block values will generally exceed paragraph values (not the case when headers and footers are included).", -"format": "int32", -"type": "integer" -}, -"numLineSpaces": { -"description": "Lines (out of num_lines) that have a successor line within their para", -"format": "int32", -"type": "integer" -}, -"numLines": { -"description": "------ Line stats \"top\" corresponds to the highest ascender and \"bottom\" to the lowest descender.", -"format": "int32", -"type": "integer" -}, -"numNonGraphicBlocks": { -"format": "int32", -"type": "integer" -}, -"numPages": { -"description": "------ Page stats.", -"format": "int32", -"type": "integer" -}, -"numParagraphSpaces": { -"description": "paras that have a successor para within their block", -"format": "int32", -"type": "integer" -}, -"numParagraphs": { -"description": "------ Paragraph stats Median symbols and words omit junk, header and footer blocks; they are intended to be a measure of the typical \"content\" paragraph. There can still be substantial differences between means and medians, particularly if a table is present (every cell is a paragraph).", -"format": "int32", -"type": "integer" -}, -"numSymbols": { -"description": "------ Symbol stats", -"format": "int32", -"type": "integer" -}, -"numWords": { -"description": "------ Word stats", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocSymbol": { -"description": "A single symbol representation", -"id": "GoodocSymbol", -"properties": { -"Box": { -"$ref": "GoodocBoundingBox" -}, -"Code": { -"description": "The unicode character code in UTF-32", -"format": "int32", -"type": "integer" -}, -"Label": { -"$ref": "GoodocLabel" -}, -"RotatedBox": { -"$ref": "GoodocRotatedBoundingBox", -"description": "If RotatedBox is set, Box must be set as well. See RotatedBoundingBox." -}, -"symbolvariant": { -"items": { -"$ref": "GoodocSymbolSymbolVariant" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocSymbolSymbolVariant": { -"id": "GoodocSymbolSymbolVariant", -"properties": { -"Code": { -"format": "int32", -"type": "integer" -}, -"Confidence": { -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoodocWord": { -"description": "A word representation", -"id": "GoodocWord", -"properties": { -"Baseline": { -"description": "The baseline's y-axis offset from the bottom of the word's bounding box, given in pixels. (A value of 2, for instance, indicates the baseline is 2px above the bottom of the box.)", -"format": "int32", -"type": "integer" -}, -"Box": { -"$ref": "GoodocBoundingBox" -}, -"Capline": { -"description": "The capline is the y-axis offset from the top of the word bounding box. A positive value n indicates that capline is n-pixels above the top of this word.", -"format": "int32", -"type": "integer" -}, -"CompactSymbolBoxes": { -"$ref": "GoodocBoxPartitions", -"description": "For space efficiency, we sometimes skip the detailed per-symbol bounding boxes in Symbol.Box, and use this coarser representation instead, where we just store Symbol boundaries within the Word box. Most client code should not have to worry directly about this, it should be handled in the deepest layers of writing/reading goodocs (for example, see Compress() and Uncompress() in ocean/goodoc/goovols-bigtable-volume.h). Note(viresh): I experimented with this compression, and here are some numbers for reference. If the zlib-compressed page goodoc string size was 100 to start with, then this compaction makes it 65. As a possible future relaxation to consider: if we add in, for each symbol, a \"top\" and \"bottom\" box offset then the size would be 75 (that's with \"repeated int32 top/bottom_offset\" fields inside BoxPartitions, instead of inside each symbol)." -}, -"Confidence": { -"description": "Word recognition confidence. Range depends upon OCR Engine.", -"format": "int32", -"type": "integer" -}, -"IsFromDictionary": { -"description": "word. The meaning and range depends on the OCR engine or subsequent processing. Specifies whether the word was found", -"type": "boolean" -}, -"IsIdentifier": { -"description": "a number True if word represents", -"type": "boolean" -}, -"IsLastInSentence": { -"description": "True if the word is the last word in any sub-paragraph unit that functions at the same level of granularity as a sentence. Examples: \"She hit the ball.\" (regular sentence) \"Dewey defeats Truman\" (heading) \"The more, the merrier.\" (no verb) Note: not currently used. Code to set this was introduced in CL 7038338 and removed in OCL=10678722.", -"type": "boolean" -}, -"IsNumeric": { -"description": "in the dictionary True if the word represents", -"type": "boolean" -}, -"Label": { -"$ref": "GoodocLabel" -}, -"Penalty": { -"description": "Penalty for discordance of characters in a", -"format": "int32", -"type": "integer" -}, -"RotatedBox": { -"$ref": "GoodocRotatedBoundingBox", -"description": "If RotatedBox is set, Box must be set as well. See RotatedBoundingBox." -}, -"Symbol": { -"description": "Word characters, the text may", -"items": { -"$ref": "GoodocSymbol" -}, -"type": "array" -}, -"alternates": { -"$ref": "GoodocWordAlternates" -}, -"text": { -"description": "As a shortcut, the content API provides the text of words instead of individual symbols (NOTE: this is experimental). This is UTF8. And the main font for the word is stored in Label.CharLabel.", -"type": "string" -}, -"writingDirection": { -"description": "Writing direction for this word.", -"enum": [ -"WRITING_DIRECTION_LEFT_TO_RIGHT", -"WRITING_DIRECTION_RIGHT_TO_LEFT", -"WRITING_DIRECTION_TOP_TO_BOTTOM" -], -"enumDescriptions": [ -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"GoodocWordAlternates": { -"description": "Alternate OCR words for Ptolemy OCR Correction. This is the output of the Ptolemy error estimator. See http://go/Ptolemy.", -"id": "GoodocWordAlternates", -"properties": { -"ErrorProbability": { -"description": "The probability that the main OCR engine (Abbyy) string is incorrect; range is 0 (definitely correct) to 100 (definitely incorrect).", -"format": "int32", -"type": "integer" -}, -"alternate": { -"items": { -"$ref": "GoodocWordAlternatesAlternate" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoodocWordAlternatesAlternate": { -"description": "An alternate word provided by another OCR engine, used for OCR Correction. This iteration only supports simple substitution errors (exhanging one word for another), but with minor modifications (e.g. adding a word count for each alternate), it could support repairing word segmentation and text detection errors.", -"id": "GoodocWordAlternatesAlternate", -"properties": { -"OcrEngineId": { -"description": "See Document.Header", -"type": "string" -}, -"OcrEngineVersion": { -"description": "See Document.Header", -"type": "string" -}, -"Word": { -"$ref": "GoodocWord", -"description": "In order to compile, this recursive message needs to be optional, even though it's within an optional group." -} -}, -"type": "object" -}, -"GoogleApiServiceconsumermanagementV1BillingConfig": { -"description": "Describes the billing configuration for a new tenant project.", -"id": "GoogleApiServiceconsumermanagementV1BillingConfig", -"properties": { -"billingAccount": { -"description": "Name of the billing account. For example `billingAccounts/012345-567890-ABCDEF`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleApiServiceconsumermanagementV1PolicyBinding": { -"description": "Translates to IAM Policy bindings (without auditing at this level)", -"id": "GoogleApiServiceconsumermanagementV1PolicyBinding", -"properties": { -"members": { -"description": "Uses the same format as in IAM policy. `member` must include both a prefix and ID. For example, `user:{emailId}`, `serviceAccount:{emailId}`, `group:{emailId}`.", -"items": { -"type": "string" -}, -"type": "array" -}, -"role": { -"description": "Role. (https://cloud.google.com/iam/docs/understanding-roles) For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1AudioOutConfig": { -"description": "Specifies the desired format for the server to use when it returns `audio_out` messages.", -"id": "GoogleAssistantAccessoryV1AudioOutConfig", -"properties": { -"audioMode": { -"description": "Current audio mode on the device while issuing the query.", -"enum": [ -"AUDIO_MODE_UNSPECIFIED", -"MUTED", -"SILENT", -"PLAYING" -], -"enumDescriptions": [ -"No audio mode specified. The Assistant may respond as if in `AUDIO_SILENT` mode.", -"Device's audio mute mode has been activated by user. The Assistant will typically not return an audio response.", -"No audio is currently playing, and device's audio mute mode has NOT been activated by user. The Assistant will typically return an audio response.", -"Audio is currently playing, such as a video or music. The Assistant will typically not return an audio response, but may for certain queries that are inherently audio (such as \"what sound does a cow make?\" or \"how do you say 'something' in Korean\". This mode should also typically be used when the screen is on and a screen-reader is activated for accessibility, as the screen-reader will read the contents of the visual response and manage navigation." -], -"type": "string" -}, -"audioRoutingMode": { -"description": "Current audio routing on the device while issuing the query.", -"enum": [ -"AUDIO_ROUTING_MODE_UNSPECIFIED", -"BLUETOOTH_HEADPHONES" -], -"enumDescriptions": [ -"No audio routing mode specified.", -"Audio routes to headphones connected over BLUETOOTH." -], -"type": "string" -}, -"encoding": { -"description": "*Required* The encoding of audio data to be returned in all `audio_out` messages.", -"enum": [ -"ENCODING_UNSPECIFIED", -"LINEAR16", -"MP3", -"OGG_OPUS", -"MULAW", -"OPUS_CONTAINERLESS" -], -"enumDescriptions": [ -"Not specified. Will return result google.rpc.Code.INVALID_ARGUMENT.", -"Uncompressed 16-bit signed little-endian samples (Linear PCM).", -"MP3 audio encoding. The sample rate is encoded in the payload.", -"Opus-encoded audio wrapped in an ogg container. The result will be a file which can be played natively on Android and in some browsers (such as Chrome). The quality of the encoding is considerably higher than MP3 while using the same bitrate. The sample rate is encoded in the payload.", -"Mulaw encoded 8-bit samples, typically used only by telephony. `sample_rate_hertz` must be 8000.", -"Opus encoded audio without a container." -], -"type": "string" -}, -"preferredBitrateBps": { -"description": "*Optional* Specifies preferred encoding bitrate (bits-per-second). Currently this is only implemented for OGG_OPUS for bitrates of 12000, 16000, 24000, 32000. If not specified, OGG_OPUS defaults to 32000.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1DeviceConfig": { -"description": "*Required* Fields that identify the device to the Assistant. See also: * [Register a Device - REST API](https: //developers.google.com/assistant/sdk/re // ference/device-registration/register-device-manual) * [Device Model and Instance Schemas](https: //developers.google.com/assistant/sdk/re // ference/device-registration/model-and-instance-schemas) * [Device Proto](https: //developers.google.com/assistant/sdk/re // ference/rpc/google.assistant.devices.v1#device)", -"id": "GoogleAssistantAccessoryV1DeviceConfig", -"properties": { -"deviceModelCapabilitiesOverride": { -"$ref": "GoogleAssistantEmbeddedV1DeviceModelCapabilitiesOverride", -"description": "Device model capabilities from client to override capabilities in the primary device model." -}, -"heterodyneToken": { -"description": "*Optional* An encrypted heterodyne_experiment_token containing the list of experiment_ids (go/ph-server-tokens).", -"type": "string" -}, -"surfaceIdentity": { -"$ref": "GoogleAssistantEmbeddedV1SurfaceIdentity", -"description": "*Required* Identifier for the device which sent the request." -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1DeviceState": { -"description": "Information about the state of the device. This contains any state that Assistant may need to know about in order to fulfill requests, for example which timers and alarms are set. Next ID: 13", -"id": "GoogleAssistantAccessoryV1DeviceState", -"properties": { -"alarmState": { -"$ref": "GoogleAssistantEmbeddedV1Alarms", -"description": "*Optional* Information about on-device alarms. For devices that support alarms, all on-device alarms must be sent up with the DeviceState in order for Assistant Server to be able to perform operations on them." -}, -"contextParams": { -"description": "Other context params to be sent to Assistant. This is a assistant.embedded.v1.ContextParams message in serialized binary proto format.", -"format": "byte", -"type": "string" -}, -"deviceTime": { -"description": "A timestamp of the current device time when the request was made. This field is required if your device supports alarms or timers. This ensures that requests are fulfilled relative to the current device time and regardless of any clock skew between the client and the server.", -"format": "google-datetime", -"type": "string" -}, -"deviceTimeZone": { -"$ref": "GoogleTypeTimeZone", -"description": "The time zone where the device is currently located. This helps the Assistant answer time-related queries relative to the device's time zone. Generally speaking, mobile devices that support alarms or timers should supply device_time_zone. This field is required if your device supports alarms or timers and the device's location cannot reliably be determined. (See the comment above google.assistant.embedded.v1.DeviceLocation for a description of how the device's location is determined.) If the time zone cannot be determined, some queries for creating or modifying timers or alarms may fail with a generic error such as, \"Sorry, I don't know how to help with that.\"" -}, -"doNotDisturb": { -"description": "Indicate whether do not disturb mode is turned on.", -"type": "boolean" -}, -"fitnessActivitiesState": { -"$ref": "GoogleAssistantEmbeddedV1FitnessActivities", -"description": "Information about on-device fitness activities. For devices that support fitness activities, all on-device fitness activities must be sent up with the DeviceState in order for Assistant Server to be able to perform operations on them." -}, -"installedApps": { -"$ref": "GoogleAssistantAccessoryV1DeviceStateInstalledAppsState", -"description": "Information about apps currently installed on device." -}, -"installedAppsZlib": { -"description": "This contains a zlib-compressed binary-encoded `InstalledAppsState` proto message.", -"format": "byte", -"type": "string" -}, -"timerState": { -"$ref": "GoogleAssistantEmbeddedV1Timers", -"description": "*Optional* Information about on-device timers. For devices that support timers, all on-device timers must be sent up with the DeviceState in order for Assistant Server to be able to perform operations on them." -}, -"unavailableSettings": { -"description": "This indicates which specific settings are currently unavailable for modification, despite being listed as a supported setting. Assistant can use this field to trigger unavailability messages, rather than claiming that a setting is entirely unsupported on device.", -"items": { -"enum": [ -"UNSPECIFIED", -"ABOUT_ME", -"ACCESSIBILITY", -"ACTIVE_EDGE", -"ACTIVE_EDGE_SENSITIVITY", -"ADAPTIVE_BATTERY", -"ADAPTIVE_BRIGHTNESS", -"ADAPTIVE_CHARGING", -"ADAPTIVE_CONNECTIVITY", -"ADAPTIVE_SOUND", -"ADD_ACCOUNT", -"ADD_BLUETOOTH_DEVICE", -"ADD_DEVICE", -"ADD_FINGERPRINT", -"ADS_TRACKING", -"AIRPLANE_MODE", -"ALARM_VOLUME", -"ALARM_SOUND", -"ALLOW_MULTIPLE_USERS", -"AMBIENT_DISPLAY_ALWAYS_ON", -"AMBIENT_DISPLAY_NEW_NOTIFICATION", -"ANDROID_AUTO", -"ANDROID_VERSION", -"APP_BATTERY_USAGE", -"APP_DATA_USAGE", -"APP_DETAILS", -"APP_SHORTCUT", -"APPS_NOTIFICATIONS", -"APPS_STORAGE", -"ASSISTANT_ACCOUNT", -"ASSISTANT_FACE_MATCH", -"ASSISTANT_LANGUAGE", -"ASSISTANT_VOICE_MATCH", -"AUTO_ROTATE", -"AUTO_ROTATE_FACE_DETECTION", -"BACKUP", -"BATTERY_HEALTH", -"BATTERY_LEVEL", -"BATTERY_LOW", -"BATTERY_PERCENTAGE", -"BATTERY_PRESENT", -"BATTERY_SAVER", -"BATTERY_SAVER_SCHEDULE", -"BATTERY_SHARE", -"BATTERY_USAGE", -"BIOMETRIC", -"BLUETOOTH", -"BLUETOOTH_NAME", -"BLUETOOTH_ADDRESS", -"BLUETOOTH_SETTINGS", -"BRIGHTNESS_LEVEL", -"BUBBLES", -"CALL_VOLUME", -"CAMERA_DOUBLE_TWIST", -"CAST", -"CAR_CRASH_DETECTION", -"COLOR_INVERSION", -"COLOR_CORRECTION", -"CONVERSATIONS", -"CHARGING_SOUNDS_AND_VIBRATION", -"CHARGING_STATE", -"CONNECTED_DEVICES", -"CONTACTLESS_PAYMENTS", -"DATA_ROAMING", -"DATA_SAVER", -"DATA_USAGE", -"DATA_LIMIT", -"DATA_LIMIT_LEVEL", -"DATA_WARNING", -"DATA_WARNING_LEVEL", -"DEFAULT_ALARM_SOUND", -"DEFAULT_NOTIFICATION_SOUND", -"DEFAULT_APPS", -"DEVELOPER_OPTIONS", -"DEVICE_ASSISTANT_APP", -"DEVICE_NAME", -"DISPLAY_OVER_OTHER_APPS", -"DISPLAY_SIZE", -"DO_NOT_DISTURB", -"DO_NOT_DISTURB_MESSAGES", -"DO_NOT_DISTURB_CALLS", -"DO_NOT_DISTURB_ALARMS", -"DO_NOT_DISTURB_SCHEDULES", -"DOUBLE_TAP_CHECK_PHONE", -"DRIVING_MODE", -"EARTHQUAKE_ALERTS", -"EMERGENCY", -"EMERGENCY_ALERTS", -"EMERGENCY_CONTACTS", -"EMERGENCY_INFORMATION", -"ETHERNET_TETHERING", -"EXTRA_DIM", -"EXTREME_BATTERY_SAVER", -"FACTORY_RESET", -"FIND_MY_DEVICE", -"FLASHLIGHT", -"FOCUS_MODE", -"FONT_SIZE", -"FREE_UP_SPACE", -"FINGERPRINT_MANAGER", -"GESTURES", -"HAPTIC_FEEDBACK_VIBRATION", -"HARD_KEYBOARD", -"HEADS_UP", -"HIGH_REFRESH_RATE", -"HOT_SPOT", -"HOTSPOT_TETHERING", -"HOT_WORD", -"HUB_MODE", -"IP_ADDRESS", -"IMPROVE_LOCATION_ACCURACY", -"JUMP_TO_CAMERA", -"KEYBOARD_SHORTCUTS", -"LIFT_CHECK_PHONE", -"LIVE_TRANSLATE", -"LOCATION", -"LOCATION_HISTORY", -"LOCATION_BLUETOOTH_SCANNING", -"LOCATION_WIFI_SCANNING", -"LOCK_SCREEN", -"LOCK_SCREEN_DEVICE_CONTROLS", -"LOCK_SCREEN_WALLET", -"MAC_ADDRESS", -"MAGNIFICATION", -"MAGNIFY_BUTTON", -"MAGNIFY_TRIPLE_TAP", -"MANIFY_BUTTON", -"MANIFY_TRIPLE_TAP", -"MEDIA", -"MEDIA_VOLUME", -"MICROPHONE_ACCESS", -"MOBILE", -"MOBILE_DATA", -"MUSIC", -"MUTE_MODE", -"NETWORK", -"NETWORK_RESET", -"NFC", -"NIGHT_LIGHT_INTENSITY", -"NIGHT_LIGHT_SWITCH", -"NIGHT_MODE", -"NOTIFICATION_BADGE", -"NOTIFICATION_SOUND", -"NOTIFICATION_ON_SCREEN", -"NOTIFICATION_HISTORY", -"NOTIFY_FOR_PUBLIC_NETWORKS", -"ONEHANDED_MODE", -"OS_VERSION", -"PASSWORD", -"PERMISSION_MANAGER", -"PERMISSION_USAGE", -"PERSONALIZATION", -"PRINTING", -"PHONE_NUMBER", -"PICTURE_IN_PICTURE", -"POINTER_SPEED", -"POWER_MENU", -"REMINDERS", -"REQUIRE_DEVICE_UNLOCK_FOR_NFC", -"RINGTONE", -"RING_VOLUME", -"NEARBY_DEVICES_SCANNING", -"NEARBY_SHARE", -"SCREEN_LOCKING_SOUND", -"SCREEN_MAGNIFICATION", -"SCREEN_TIMEOUT", -"SCREEN_LOCK", -"SCREEN_SAVER", -"SELECT_TO_SPEAK", -"SET_TIME_AUTOMATICALLY", -"SET_TIME_ZONE_AUTOMATICALLY", -"SETTINGS", -"SIM", -"SIM_MANAGER", -"SPEECH_RATE", -"STORAGE_USAGE", -"SWIPE_FOR_NOTIFICATION", -"SWITCH_ACCESS", -"SYSTEM_UPDATE", -"SYSTEM_UPDATES", -"SYSTEM_NAVIGATION", -"SYSTEM_NAVIGATION_GESTURES", -"SYSTEM_NAVIGATION_BUTTONS", -"TALKBACK_PASSWORDS", -"TEXT_TO_SPEECH", -"TIME_ZONE", -"UNUSED_APPS", -"USB", -"USB_TETHERING", -"VERBOSE_TTS", -"VIBRATE", -"VIBRATION", -"VIBRATION_MODE", -"VOICE", -"VOLUME_LEVEL", -"WAKE_SCREEN_FOR_NOTIFICATIONS", -"WALLPAPERS", -"WEBVIEW", -"WIFI", -"WIFI_ADD_NETWORK", -"WIFI_ADD_NETWORK_QR_CODE", -"WIFI_CALLING", -"WIFI_HOTSPOT", -"ACCESSIBILITY_SELECT_TO_SPEAK", -"CRISIS_ALERTS", -"REGULATORY_LABELS", -"SEND_FEEDBACK", -"OS_BUILD_NUMBER", -"ACCESSIBILITY_SHORTCUTS", -"ACCESSIBILITY_MENU", -"ACCESSIBILITY_SOUND_AMPLIFIER", -"ACCESSIBILITY_LIVE_TRANSCRIBE", -"ACCESSIBILITY_SOUND_NOTIFICATIONS", -"ACCESSIBILITY_TALKBACK", -"ACCESSIBILITY_TIMEOUT", -"CAMERA_SETTINGS", -"BATTERY_SAVER_TOGGLE", -"COVID_NOTIFICATIONS", -"APP_LOCATION", -"LOCATION_SERVICES", -"PRIVATE_DNS", -"UNRESTRICTED_DATA", -"PREFERRED_NETWORK", -"NETWORK_PREFERENCES", -"PASSWORDS_AND_ACCOUNTS", -"PRIVACY_DASHBOARD", -"MORE_SECURITY", -"APP_PINNING", -"CONFIRM_SIM_DELETION", -"ENCRYPT_PHONE", -"FACE_UNLOCK", -"INSTALL_UNKNOWN_APPS", -"NOW_PLAYING", -"STAY_AWAKE", -"ASSISTANT_VOICE", -"RESET_BLUETOOTH_WIFI", -"DEFAULT_PHONE_APP", -"GOOGLE_ACCOUNT", -"ACCESSIBILITY_CAPTION_SIZE_AND_STYLE", -"ACCESSIBILITY_CAPTION_STYLE", -"ACCESSIBILITY_CAPTION_TEXT", -"ACCESSIBILITY_CAPTION_PREFERENCES", -"ACCESSIBILITY_COLOR_AND_MOTION", -"ACCESSIBILITY_BOLD_TEXT", -"LIVE_CAPTION", -"POWER_BUTTON_ENDS_CALL", -"TOUCH_AND_HOLD_DELAY", -"ACCESSIBILITY_VOICE_ACCESS", -"SMS_PERMISSIONS", -"SPECIAL_APP_ACCESS", -"DARK_THEME_SCHEDULE", -"LOCK_SCREEN_TEXT", -"NIGHT_LIGHT_SCHEDULE", -"AUTOFILL", -"USAGE_AND_DIAGNOSTICS", -"SENSITIVE_NOTIFICATIONS", -"ENCRYPTION_AND_CREDENTIALS", -"SPACIAL_AUDIO", -"RESET_OPTIONS", -"QUICK_TAP", -"TIPS_AND_SUPPORT", -"SCREEN_ATTENTION", -"BLUETOOTH_TETHERING", -"ALL_APPS", -"EXTEND_UNLOCK", -"CLEAR_CALLING", -"GOOGLE_SETTINGS", -"APP_LANGUAGES", -"SIM_STATUS", -"MICROPHONE_PERMISSIONS", -"GOOGLE_PLAY_UPDATE", -"ADD_GOOGLE_ACCOUNT", -"ASSISTANT_SPOKEN_NOTIFICATIONS", -"ABOUT_PHONE", -"ACCOUNTS", -"APPLICATION", -"ASSISTANT", -"AUDIO", -"BATTERY", -"BELL_SCHEDULE", -"CONTINUED_CONVERSATION", -"DATE_TIME", -"DARK_THEME", -"DEVICE_INFO", -"DICTIONARY", -"DIGITAL_WELLBEING", -"DISPLAY", -"LANGUAGE", -"NIGHT_LIGHT", -"NOTIFICATION", -"NOTIFICATION_VOLUME", -"PHONE_RINGTONE", -"PRIVACY", -"ROAMING", -"ROUTINES", -"SEARCH", -"SECURITY", -"SOUND", -"SPELL_CHECKER", -"SYSTEM", -"STORAGE", -"VPN", -"AUTOCLICK", -"CARET_HIGHLIGHT", -"CHROMEVOX", -"CURSOR_HIGHLIGHT", -"DOCKED_MAGNIFIER", -"FOCUS_HIGHLIGHT", -"FULLSCREEN_MAGNIFIER", -"HIGH_CONTRAST_MODE", -"LARGE_CURSOR", -"MONO_AUDIO", -"STICKY_KEYS", -"TAP_DRAGGING", -"VIRTUAL_KEYBOARD", -"WEARABLE_AMBIENT", -"WEARABLE_NOISE_CANCELLATION", -"WEARABLE_TOUCH_CONTROLS", -"RAISE_TO_TALK", -"BEDTIME_MODE", -"THEATER_MODE", -"TOUCH_LOCK", -"PRESS_AND_HOLD", -"WATCH_FACE", -"NOTIFICATION_ANNOUNCEMENT" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -true, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Pair a bluetooth device.", -"Link a device to Assistant/Google Home.", -"", -"", -"", -"", -"Deprecated, use DEFAULT_ALARM_SOUND instead.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Denotes if the battery health is normal / reduced / unknown.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Bluetooth on/off toggle", -"", -"", -"Bluetooth preferences page", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Cellular carrier (non-wifi) data usage statistics & settings for the device.", -"Toggle to enable data use limit", -"The mobile data use after which mobile data is automatically disabled.", -"Toggle to enable data use warning", -"The mobile data use after which a warning is shown to the user.", -"", -"", -"", -"", -"Device's voice assistant app selection.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Special mode for using device hands-free while driving.", -"", -"Main emergency settings", -"", -"", -"", -"", -"Makes screen extra dark.", -"", -"", -"", -"", -"", -"", -"", -"", -"A page for device gestures", -"Slider that controls touch feedback vibration", -"", -"", -"", -"Toggle for internet sharing via WiFi.", -"Common settings page for internet sharing via various means.", -"", -"See go/hubmodesettings-PRD.", -"Show my IP address", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Settings page for what is visible on the lock screen.", -"", -"", -"Show my MAC address", -"", -"", -"", -"", -"", -"", -"", -"Microphone Access is a toggleable setting that controls if Microphone usage is allowed at the system level or not. It is not the per-App Microphone permission page. https://screenshot.googleplex.com/4hrskftPSur7hHh", -"Wireless internet settings, including WiFi, Cellular, etc.", -"Setting to enable/disable cellular data connection being used on the device. https://screenshot.googleplex.com/jMSRtW3Aq4o", -"", -"", -"", -"Restarts the device network stack, no settings are deleted.", -"", -"", -"", -"", -"", -"Deprecated, use DEFAULT_NOTIFICATION_SOUND instead.", -"", -"", -"", -"", -"", -"Password manager", -"Shows permissions and allows add/remove allowed apps.", -"Shows usage of each permission by app.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Settings page for screen locking security methods.", -"", -"", -"", -"", -"", -"Currently active/primary SIM.", -"", -"", -"", -"", -"", -"Deprecated, use SYSTEM_UPDATES instead.", -"", -"Settings page for configuring how navigation between apps is done. Specifically android has choice between Buttons of Gestures mode.", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Deprecated, use VIBRATION instead.", -"Vibration master toggle - controls all vibration functions. Corresponds to \"Vibration & haptics\" toggle in the Android settings app.", -"Controls whether the ringer mode will be silent or will vibrate. Details in https://developer.android.com/reference/android/media/AudioManager#RINGER_MODE_VIBRATE Different from VIBRATION from above.", -"deprecated, use ASSISTANT_VOICE", -"", -"", -"", -"", -"", -"", -"", -"", -"Duplicate, use HOT_SPOT instead", -"Accessibility > Select to Speak", -"Safety > Crisis alerts", -"Settings > About phone > Regulatory labels", -"Settings > About phone > Send feedback about this device", -"", -"", -"", -"Settings > Accessibility > Hearing > Sound Amplifier", -"", -"", -"Settings > Accessibility > TalkBack", -"Settings > Accessibility > Time to take action (Accessibility timeout)", -"", -"", -"Settings > Google > COVID-19 Exposure Notifications", -"", -"", -"", -"Settings > Network & Internet > Data Saver > Unrestricted data", -"", -"", -"", -"", -"", -"", -"", -"Settings > Security > Encrypt phone", -"", -"", -"", -"", -"", -"", -"", -"", -"Settings > Accessibility > Caption preferences > Caption size and style", -"Settings > Accessibility > Caption preferences > Caption size and style > Caption Style", -"Settings > Accessibility > Caption preferences > Caption size and style > Text size", -"Settings > Accessibility > Caption preferences", -"Settings > Accessibility > Color and motion", -"Settings > Accessibility > Display size and text > Bold text", -"", -"Settings > Accessibility > Power button ends call", -"", -"", -"", -"Settings > Apps & notifications > Advanced > Special app access", -"", -"Settings > Display > Lock screen > Add text on lock screen", -"", -"", -"", -"", -"Settings > Security > Advanced settings > Encryption & credentials", -"Settings > Sound & vibration > Spatial Audio", -"Settings > System > Advanced > Reset options", -"Settings > System > Gestures > Quick tap", -"Settings > Tips & support", -"Display > Screen timeout > Screen attention", -"", -"", -"", -"", -"", -"", -"", -"Permission Manager > Microphone", -"", -"", -"Settings for assistant to announce messages/notifications", -"Begin of NGA proto consistence", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"Duplicate, use NIGHT_MODE instead", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"End of NGA proto consistence", -"Chrome OS specific accessibility settings", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"End of Chrome OS specific accessibility settings", -"Wearable device settings", -"", -"", -"", -"", -"", -"", -"", -"Settings > Display > Change watch face End of Wearable device settings", -"Assistant Spoken Notification Setting" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1DeviceStateInstalledAppsState": { -"description": "State of the apps currently installed on the device.", -"id": "GoogleAssistantAccessoryV1DeviceStateInstalledAppsState", -"properties": { -"apps": { -"description": "List of apps currently installed on the device.", -"items": { -"$ref": "AssistantApiCoreTypesProvider" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1ResponseConfig": { -"description": "Configuration for the response. Next Id: 11", -"id": "GoogleAssistantAccessoryV1ResponseConfig", -"properties": { -"audioOutConfig": { -"$ref": "GoogleAssistantAccessoryV1AudioOutConfig", -"description": "Specifies the current audio mode on the device." -}, -"deviceConfig": { -"$ref": "GoogleAssistantAccessoryV1DeviceConfig", -"description": "Configuration related to a specific device." -}, -"deviceInteraction": { -"description": "The client interaction to be sent to Assistant. This is a assistant.embedded.v1.DeviceInteraction message in serialized binary proto format.", -"format": "byte", -"type": "string" -}, -"deviceState": { -"$ref": "GoogleAssistantAccessoryV1DeviceState", -"description": "Device state to pass to the Assistant server to use in calculating the response." -}, -"initialAudioBytes": { -"description": "Specifies the initial bytes of TTS audio to send.", -"format": "int32", -"type": "integer" -}, -"isNewConversation": { -"description": "If true, the server will treat the request as a new conversation and not use state from the prior request. Set this field to true when the conversation should be restarted, such as after a device reboot, or after a significant lapse of time since the prior query.", -"type": "boolean" -}, -"outputSampleRateHz": { -"description": "Specifies the desired audio sample rate of the output TTS stream in Hz.", -"format": "int32", -"type": "integer" -}, -"responseType": { -"description": "Specifies the requested response type.", -"enum": [ -"RESPONSE_TYPE_UNSPECIFIED", -"TEXT", -"TRANSCRIPTION" -], -"enumDescriptions": [ -"No response type specified. This is invalid and the host will send a state update indicating an error.", -"Requests an Assistant text response.", -"Requests a voice transcription only." -], -"type": "string" -}, -"screenOutConfig": { -"$ref": "GoogleAssistantAccessoryV1ScreenOutConfig", -"description": "Specifies the desired format to use when server returns a visual screen response." -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1ScreenOutConfig": { -"description": "Specifies the desired format for the server to use when it returns `screen_out` response.", -"id": "GoogleAssistantAccessoryV1ScreenOutConfig", -"properties": { -"dimensions": { -"$ref": "GoogleAssistantAccessoryV1ScreenOutConfigDimensions", -"description": "Device dimensions." -}, -"fontScaleFactor": { -"description": "The scale factor used to convert Scalable Pixel (SP) units to Density-independent Pixel (DP) units (DP = SP * scale factor). Fonts are measured in units of SP, and on some platforms such as Android the SP to DP scale factor can be affected by the font size a user selects in accessibility settings.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoogleAssistantAccessoryV1ScreenOutConfigDimensions": { -"description": "This contains physical and logical characteristics about the device (e.g. screen size and DPI, etc).", -"id": "GoogleAssistantAccessoryV1ScreenOutConfigDimensions", -"properties": { -"screenDpi": { -"description": "Dots (pixels) per inch of the screen.", -"format": "float", -"type": "number" -}, -"screenHeightPx": { -"description": "Height of the device's screen in pixels. If 0 or not specified, it's assumed to be the same as screen_width_px. For a square or round screen, it's recommended to leave this field empty as a bandwidth optimization.", -"format": "int32", -"type": "integer" -}, -"screenShape": { -"description": "The shape of the device's screen", -"enum": [ -"SCREEN_SHAPE_UNSPECIFIED", -"SCREEN_SHAPE_OVAL", -"SCREEN_SHAPE_RECT" -], -"enumDescriptions": [ -"Screen shape is undefined.", -"A round screen (typically found on most Wear devices).", -"Rectangular screens." -], -"type": "string" -}, -"screenWidthPx": { -"description": "Width of the device's screen in pixels.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1Alarm": { -"description": "Alarms are clocks that ring at a specified time on one or more days. The client schedules a time to ring based on the date/time pattern. When it rings, it may be rescheduled off the original time by snoozing or it may be replaced by the next occurrence.", -"id": "GoogleAssistantEmbeddedV1Alarm", -"properties": { -"alarmId": { -"description": "A string key used as an identifier to this alarm. This key needs to be unique amongst all alarms on the device. The client can choose a mechanism of its choice to ensure this. If the server suggests an alarm_id, the client can either use the suggestion or create a new unique alarm_id of its choosing.", -"type": "string" -}, -"alarmSource": { -"description": "The entity that created this alarm. Note this may be different from the device that reported this alarm. In particular, this field is meant for remote alarms which have been synced to the current device by the Clock app. Synced alarms exist in each device's Clock app and can be managed by either device; this field indicates their origin.", -"enum": [ -"ALARM_SOURCE_UNSPECIFIED", -"PHONE", -"WATCH" -], -"enumDescriptions": [ -"Alarm source unknown or not specified.", -"A smartphone device.", -"A smartwatch device." -], -"type": "string" -}, -"datePattern": { -"$ref": "GoogleTypeDate", -"description": "For single alarms: the one date the alarm should next be scheduled for." -}, -"label": { -"description": "A user-provided name for this alarm.", -"type": "string" -}, -"recurrencePattern": { -"$ref": "GoogleAssistantEmbeddedV1AlarmRecurrence", -"description": "For recurring alarms: a description of the dates when the alarm should recur." -}, -"scheduledTime": { -"description": "When SCHEDULED or SNOOZED, the absolute time the alarm will fire next. When SNOOZED, this time includes the additional time added by snoozing the alarm. When FIRING, the absolute time the alarm had been scheduled to fire. When DISABLED, this field is undefined and should be ignored.", -"format": "google-datetime", -"type": "string" -}, -"status": { -"description": "Describes the part of the lifecycle that an alarm is in.", -"enum": [ -"ALARM_STATUS_UNSPECIFIED", -"SCHEDULED", -"FIRING", -"SNOOZED", -"DISABLED" -], -"enumDescriptions": [ -"", -"The alarm is scheduled to fire at some point in the future.", -"The alarm is currently firing.", -"Like SCHEDULED; the alarm has been snoozed after firing.", -"Alarm is disabled, i.e., it won't ring. Scheduled time is undefined for disabled alarms and should be ignored." -], -"type": "string" -}, -"timePattern": { -"$ref": "GoogleTypeTimeOfDay", -"description": "The time of day the alarm should be scheduled for. This value does not change when an alarm enters the SNOOZED state; instead the scheduled_time field should be adjusted to the new alarm time." -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1AlarmRecurrence": { -"description": "A description of the dates when an alarm should recur.", -"id": "GoogleAssistantEmbeddedV1AlarmRecurrence", -"properties": { -"dayOfWeek": { -"description": "Specifies a weekly or daily recurrence. Constraint: The date falls on one of these days of the week, in 0...6 (Sunday...Saturday). Should not be empty.", -"items": { -"enum": [ -"DAY_OF_WEEK_UNSPECIFIED", -"MONDAY", -"TUESDAY", -"WEDNESDAY", -"THURSDAY", -"FRIDAY", -"SATURDAY", -"SUNDAY" -], -"enumDescriptions": [ -"The day of the week is unspecified.", -"Monday", -"Tuesday", -"Wednesday", -"Thursday", -"Friday", -"Saturday", -"Sunday" -], -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1Alarms": { -"description": "Contains information about on-device alarms for devices that support alarms.", -"id": "GoogleAssistantEmbeddedV1Alarms", -"properties": { -"alarms": { -"description": "Information about all on-device alarms.", -"items": { -"$ref": "GoogleAssistantEmbeddedV1Alarm" -}, -"type": "array" -}, -"snoozeDuration": { -"description": "The amount of time for which alarms should be snoozed. If not specified, the productivity vertical applies a default snooze duration, which may be seen here: http://google3/assistant/verticals/productivity/utils/alarm_utils.cc;l=2734;rcl=415933085", -"format": "google-duration", -"type": "string" -}, -"stateFetchError": { -"description": "Indicates if an error occurred while fetching alarm state. If this value is missing, it can be assumed that the state fetch was successful.", -"enum": [ -"STATE_FETCH_ERROR_UNSPECIFIED", -"STATE_FETCH_ERROR_TIMEOUT", -"STATE_FETCH_ERROR_UNSUPPORTED" -], -"enumDescriptions": [ -"Equivalent to no errors from fetching state (i.e. success).", -"The time taken to fetch state exceeded the allowed maximum time.", -"State fetch is not supported in Stateless Timer/Alarm Integration. This is expected for Stateless Integration." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1DeviceModelCapabilitiesOverride": { -"description": "Device model capabilities override from client.", -"id": "GoogleAssistantEmbeddedV1DeviceModelCapabilitiesOverride", -"properties": { -"deviceModelCapabilities": { -"description": "Device model capabilities from client.", -"format": "byte", -"type": "string" -}, -"updateMask": { -"description": "If present, overrides only fields specified in the mask. When doing so, selected message and repeated fields will be replaced rather than merged. Performs a regular proto MergeFrom if no mask is specified.", -"format": "google-fieldmask", -"type": "string" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1FitnessActivities": { -"description": "Contains information about on-device fitness activities for devices that support fitness.", -"id": "GoogleAssistantEmbeddedV1FitnessActivities", -"properties": { -"fitnessActivities": { -"description": "Information about all on-device activities.", -"items": { -"$ref": "GoogleAssistantEmbeddedV1FitnessActivity" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1FitnessActivity": { -"description": "Describes a particular fitness activity, its current state, and other data fields associated with that activity (e.g. elapsed time). LINT.IfChange", -"id": "GoogleAssistantEmbeddedV1FitnessActivity", -"properties": { -"activityId": { -"description": "Required A string key used as an identifier for this activity. This key needs to be unique amongst all activities on the device. The client can choose a mechanism of its choice to ensure this. If the server suggests an activity_id, the client can either use the suggestion or create a new unique activity_id of its choosing.", -"type": "string" -}, -"mostRecentStartTime": { -"deprecated": true, -"description": "DEPRECATED: The most recent time this activity was switched to the ACTIVE state.", -"format": "google-datetime", -"type": "string" -}, -"previouslyAccumulatedDuration": { -"deprecated": true, -"description": "DEPRECATED: The total amount of time this activity has spent in the ACTIVE state until the most recent start time. The total time spent active may be computed by summing (now - most_recent_start_time) with previously_accumulated_duration.", -"format": "google-duration", -"type": "string" -}, -"state": { -"description": "The current state of this activity.", -"enum": [ -"STATE_UNSPECIFIED", -"ACTIVE", -"PAUSED" -], -"enumDescriptions": [ -"The state is either unknown or unspecified.", -"Indicates that this activity is currently in progress.", -"Indicates that this activity has been paused (and may be resumed later). Activities in this state should not continue to increment 'elapsed_time' until moved back into the ACTIVE state." -], -"type": "string" -}, -"type": { -"description": "The type of activity being done.", -"enum": [ -"TYPE_UNSPECIFIED", -"WALK", -"RUN", -"ELLIPTICAL", -"SWIM", -"WEIGHTS", -"TREADMILL", -"BIKE", -"YOGA", -"WORKOUT", -"BOOT_CAMP", -"CIRCUIT_TRAINING", -"GOLF", -"HIKING", -"INTERVAL_TRAINING", -"KICKBOXING", -"MARTIAL_ARTS", -"PILATES", -"SPINNING", -"STAIR_CLIMBING", -"TENNIS", -"AEROBICS", -"CORE_TRAINING", -"DANCING", -"HIGH_INTENSITY_INTERVAL_TRAINING", -"KAYAKING", -"ROWING", -"SKIING", -"STANDUP_PADDLEBOARDING", -"STRENGTH_TRAINING", -"SNOWBOARDING" -], -"enumDescriptions": [ -"The type is unknown.", -"Represents a walking activity type.", -"Represents a running activity type.", -"Represents an elliptical activity type.", -"Represents a swimming activity type.", -"Represents a weightlifting activity type.", -"Represents a treadmill activity type (e.g. walking/running on a treadmill).", -"Represents a cycling activity type.", -"Represents a yoga activity type.", -"Represents a generic workout activity.", -"Represents a bootcamp activity type.", -"Represents a circuit training activity type.", -"Represents a golf activity type.", -"Represents a hiking activity type.", -"Represents an interval training activity type.", -"Represents a kickboxing activity type.", -"Represents a martial arts activity type.", -"Represents a pilates activity type.", -"Represents a spinning(indoor cycling, stationary biking) activity type.", -"Represents a stair climbing activity type.", -"Represents a tennis activity type.", -"Represents an aerobics activity type.", -"Represents a core training activity type.", -"Represents a dancing activity type.", -"Represents a HIIT activity type.", -"Represents a kayaking activity type.", -"Represents a rowing activity type.", -"Represents a skiing activity type.", -"Represents a standup paddleboading(SUP) activity type.", -"Represents a strength training activity type.", -"Represents a snowboarding activity type." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1SurfaceIdentity": { -"description": "Contains fields to identify the device which sent the request.", -"id": "GoogleAssistantEmbeddedV1SurfaceIdentity", -"properties": { -"surfaceType": { -"description": "Surface type that the device identifies as.", -"enum": [ -"UNKNOWN_TYPE", -"ACCL", -"AGSA", -"ANDROID", -"ANDROID_AUTO", -"ANDROID_LITE", -"ANDROID_PHONE", -"ANDROID_SCREENLESS", -"ANDROID_SMART_DISPLAY", -"ANDROID_TABLET", -"ANDROID_THINGS", -"ANDROID_THINGS_CUBE", -"ANDROID_THINGS_JASPER", -"ANDROID_TV", -"ANDROID_WEAR", -"ASSISTANT_KIT", -"ASSISTANT_SDK", -"AUTO", -"CAST_OS", -"CHROME_OS", -"CHROMECAST_MANHATTAN", -"CLOUD_DEVICE", -"CROS", -"FITBIT_OS_WATCH", -"FITBIT_OS_WATCH_ANDROID", -"FITBIT_OS_WATCH_IOS", -"GOOGLE_HOME", -"HEADPHONE", -"HEADPHONE_ANDROID", -"HEADPHONE_ANDROID_SCREENLESS", -"HEADPHONE_IOS", -"IOPA", -"IOS", -"IOS_SCREENLESS", -"IPAD", -"IPHONE", -"KAI_OS", -"KAI_OS_AMA", -"LIBASSISTANT", -"MATTER", -"PHONE", -"PIXEL", -"PIXEL5", -"PIXEL6", -"PIXEL7", -"PIXEL8", -"PIXEL_BUDS", -"PIXEL_BUDS_SCREENLESS", -"PIXEL_TABLET", -"PIXEL_TABLET_HUB_MODE", -"PIXEL_TABLET_PERSONAL_MODE", -"PIXEL_WATCH", -"SCREENLESS", -"SMART_DISPLAY", -"SPEAKER", -"TABLET", -"TELEPHONE", -"THING", -"WATCH", -"WEAR_OS", -"WEAR_OS_WATCH" -], -"enumDescriptions": [ -"Unknown surface type.", -"Android conversation client library (go/accl) is an Android library which implements the Conversation Protocol that allows a surface to have conversations with the Assistant server. OWNERS: surfaces-infra-core@", -"Android Google Search App (go/agsa). OWNERS: surfaces-infra-core@", -"Android. OWNERS: surfaces-infra-core@", -"Assistant on Android Auto (go/ast-auto). OWNERS: opa-android-leads@", -"Assistant on Android Go (not part of AGSA). OWNERS: assistantgo-eng@", -"Assistant on Android phones accessed through the Google App (velvet). This surface is formerly known as OPA_AGSA. OWNERS: opa-android-leads@", -"Assistant on Nexus with screen off/locked. Use go/bisto device to trigger. OWNERS: opa-android-leads@", -"Assistant on Titan hub mode (go/titan-hubmode-surface). This is OPA Android first party Smart Display devices. The UI experience is built on Opal (Gallium and Flutter) and native Android. OWNERS: opa-android-leads@", -"Assistant on Android tablet. OWNERS: opa-android-leads@", -"Android Things (go/things). OWNERS: surfaces-infra-core@", -"Lenovo Smart Clock v1, a stationary smart display device with a 4-inch screen, targeted for bedroom/bedside use cases like alarm, sleep tracking. Based on Android Things. See go/cube-home. OWNERS: cube-eng@", -"Former codename for 3P 8-inch and 10-inch Smart Displays from Lenovo, JBL and LG, targeted for use in the kitchen. Based on Android Things. See go/jasper-home. OWNERS: jasper-eng@", -"The Android platform that powers smart televisions, set-top-boxes, and over-the-top devices. See go/atv-eng and go/opa-tv. OWNERS: opa-tv-eng@", -"Legacy Wear OS Assistant. OWNERS: wear-assistant-eng@", -"AssistantKit (go/assistantkit) is a collection of iOS libraries allowing first party apps to integrate Google Assistant features. OWNERS: assistantkit-eng@", -"Standalone gRPC based client. See go/easi. OWNERS: assistant-sdk-service-eng@", -"Automobile. OWNERS: surfaces-infra-core@", -"Cast OS (go/opal-castos-docs#what-is-castos). OWNERS: surfaces-infra-core@", -"Chrome OS (go/chromsos) OWNERS: surfaces-infra-core@", -"For chromecast with assistant + screen, e.g. Newman. OWNERS: cast-eng-platform@", -"Virtual device for event-based triggering, e.g. executing time scheduled routines: go/scheduled-routines. OWNERS: surfaces-infra-core@", -"Assistant on native Chrome OS (go/croissant). OWNERS: croissant@", -"Assistant on Fitbit OS Watch. OWNERS: assistant-wearable-team@", -"Assistant on Fitbit Watch paired with AGSA. OWNERS: assistant-wearable-team@", -"Assistant on Fitbit Watch paired with iOPA. OWNERS: assistant-wearable-team@", -"Google home speaker, i.e., Chirp devices. See go/assistant-speakers. OWNERS: assistant-speaker-leads@", -"Headphone. OWNERS: surfaces-infra-core@", -"Assistant on headphones with APA. OWNERS: assistant-wearable-team@", -"Assistant on headphones with AGSA-C. OWNERS: assistant-wearable-team@", -"Assistant on headphones paired with iOPA. OWNERS: assistant-wearable-team@", -"Assistant on iOS devices (go/opa-ios). OWNERS: opa-ios-eng@", -"iOS. OWNERS: surfaces-infra-core@", -"Assistant on iOS with screen off/locked. Use go/bisto device to trigger. OWNERS: bisto-team@", -"iPad devices. OWNERS: surfaces-infra-core@", -"iPhone devices. OWNERS: surfaces-infra-core@", -"Kai OS (go/kaios). OWNERS: surfaces-infra-core@", -"Assistant on KaiOS (go/kaiosama). OWNERS: assistant-kaios-eng@", -"LibAssistant (go/libassistant) C++ SDK that powers Assistant experience on both 1P devices like Google Home and 3P devices like Sonos. OWNERS: libassistant-core@", -"Matter based clients. go/matter-prod. OWNERS: assistant-media@", -"A phone. OWNERS: surfaces-infra-core@", -"Pixel devices. OWNERS: surfaces-infra-core@", -"Pixel 5. OWNERS: surfaces-infra-core@", -"Pixel 6. OWNERS: surfaces-infra-core@", -"Pixel 7. OWNERS: surfaces-infra-core@", -"Pixel 8. OWNERS: surfaces-infra-core@", -"Assistant on Pixel Buds with APA. OWNERS: assistant-wearable-team@", -"Assistant on Pixel Buds with AGSA-C. OWNERS: assistant-wearable-team@", -"Assistant on Pixel Tablet/Tangor (a.k.a, TK). OWNERS: assistant-display-eng@", -"Assistant on Tangor running in Hub mode. OWNERS: assistant-display-eng@", -"Assistant on Tangor running in Personal mode. OWNERS: assistant-display-eng@", -"Assistant on Pixel Watch (a.k.a., Rohan). OWNERS: assistant-wearable-team@", -"Devices with screen off or locked (go/bisto). OWNERS: surfaces-infra-core@", -"Smart surface, such as Titan. OWNERS: surfaces-infra-core@", -"A speaker. OWNERS: surfaces-infra-core@", -"A tablet. OWNERS: surfaces-infra-core@", -"Assistant on a phone call (go/telephone). OWNERS: telephone-assistant-eng@", -"IoT device. OWNERS: surfaces-infra-core@", -"Watch. OWNERS: surfaces-infra-core@", -"Wear OS. OWNERS: surfaces-infra-core@", -"Assistant on Wear OS Watch (a.k.a., Edoras). OWNERS: assistant-wearable-team@" -], -"type": "string" -}, -"userAgentSuffix": { -"description": "* Fully formed user agent suffix string.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1Timer": { -"description": "Conceptually, timers are clocks that count down from an initial duration and ring when they reach 0. In practice, as a timer is running, it holds a stable expiration time and computes the remaining duration using the current time. When a timer is paused, it holds a stable remaining duration.", -"id": "GoogleAssistantEmbeddedV1Timer", -"properties": { -"expireTime": { -"description": "The time the timer is scheduled to expire. google.protobuf.Timestamp is a Unix epoch time with a granularity of 1 nanosecond.", -"format": "google-datetime", -"type": "string" -}, -"label": { -"description": "A user-provided name for this timer.", -"type": "string" -}, -"originalDuration": { -"description": "The duration of the timer when it was started. For the ADD_TIME action, this field contains the amount of time to add to the timer with the given timer_id.", -"format": "google-duration", -"type": "string" -}, -"remainingDuration": { -"description": "The remaining duration for the timer.", -"format": "google-duration", -"type": "string" -}, -"status": { -"description": "Describes the part of the lifecycle a timer is in.", -"enum": [ -"TIMER_STATUS_UNSPECIFIED", -"RUNNING", -"PAUSED", -"FIRING" -], -"enumDescriptions": [ -"", -"The timer is currently counting down.", -"The timer is currently paused.", -"The timer has expired and is ringing." -], -"type": "string" -}, -"timerId": { -"description": "A string key used as an identifier to this timer. This key needs to be unique amongst all timers on the device. The client can choose a mechanism of its choice to ensure this. If the server suggests a timer_id, the client can either use the suggestion or create a new unique timer_id of its choosing.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleAssistantEmbeddedV1Timers": { -"description": "Contains information about on-device timers for devices that support timers.", -"id": "GoogleAssistantEmbeddedV1Timers", -"properties": { -"stateFetchError": { -"description": "Indicates if an error occurred while fetching timer state. If this value is missing, it can be assumed that the state fetch was successful.", -"enum": [ -"STATE_FETCH_ERROR_UNSPECIFIED", -"STATE_FETCH_ERROR_TIMEOUT", -"STATE_FETCH_ERROR_UNSUPPORTED" -], -"enumDescriptions": [ -"Equivalent to no errors from fetching state (i.e. success).", -"The time taken to fetch state exceeded the allowed maximum time.", -"State fetch is not supported in Stateless Timer/Alarm Integration. This is expected for Stateless Integration." -], -"type": "string" -}, -"timers": { -"description": "Information about all on-device timers.", -"items": { -"$ref": "GoogleAssistantEmbeddedV1Timer" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1AccessControlAction": { -"description": "Represents the action responsible for access control list management operations.", -"id": "GoogleCloudContentwarehouseV1AccessControlAction", -"properties": { -"operationType": { -"description": "Identifies the type of operation.", -"enum": [ -"UNKNOWN", -"ADD_POLICY_BINDING", -"REMOVE_POLICY_BINDING", -"REPLACE_POLICY_BINDING" -], -"enumDescriptions": [ -"The unknown operation type.", -"Adds newly given policy bindings in the existing bindings list.", -"Removes newly given policy bindings from the existing bindings list.", -"Replaces existing policy bindings with the given policy binding list" -], -"type": "string" -}, -"policy": { -"$ref": "GoogleIamV1Policy", -"description": "Represents the new policy from which bindings are added, removed or replaced based on the type of the operation. the policy is limited to a few 10s of KB." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1Action": { -"description": "Represents the action triggered by Rule Engine when the rule is true.", -"id": "GoogleCloudContentwarehouseV1Action", -"properties": { -"accessControl": { -"$ref": "GoogleCloudContentwarehouseV1AccessControlAction", -"description": "Action triggering access control operations." -}, -"actionId": { -"description": "ID of the action. Managed internally.", -"type": "string" -}, -"addToFolder": { -"$ref": "GoogleCloudContentwarehouseV1AddToFolderAction", -"description": "Action triggering create document link operation." -}, -"dataUpdate": { -"$ref": "GoogleCloudContentwarehouseV1DataUpdateAction", -"description": "Action triggering data update operations." -}, -"dataValidation": { -"$ref": "GoogleCloudContentwarehouseV1DataValidationAction", -"description": "Action triggering data validation operations." -}, -"deleteDocumentAction": { -"$ref": "GoogleCloudContentwarehouseV1DeleteDocumentAction", -"description": "Action deleting the document." -}, -"publishToPubSub": { -"$ref": "GoogleCloudContentwarehouseV1PublishAction", -"description": "Action publish to Pub/Sub operation." -}, -"removeFromFolderAction": { -"$ref": "GoogleCloudContentwarehouseV1RemoveFromFolderAction", -"description": "Action removing a document from a folder." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ActionExecutorOutput": { -"description": "Represents the output of the Action Executor.", -"id": "GoogleCloudContentwarehouseV1ActionExecutorOutput", -"properties": { -"ruleActionsPairs": { -"description": "List of rule and corresponding actions result.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1RuleActionsPair" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ActionOutput": { -"description": "Represents the result of executing an action.", -"id": "GoogleCloudContentwarehouseV1ActionOutput", -"properties": { -"actionId": { -"description": "ID of the action.", -"type": "string" -}, -"actionState": { -"description": "State of an action.", -"enum": [ -"UNKNOWN", -"ACTION_SUCCEEDED", -"ACTION_FAILED", -"ACTION_TIMED_OUT", -"ACTION_PENDING" -], -"enumDescriptions": [ -"The unknown state.", -"State indicating action executed successfully.", -"State indicating action failed.", -"State indicating action timed out.", -"State indicating action is pending." -], -"type": "string" -}, -"outputMessage": { -"description": "Action execution output message.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1AddToFolderAction": { -"description": "Represents the action responsible for adding document under a folder.", -"id": "GoogleCloudContentwarehouseV1AddToFolderAction", -"properties": { -"folders": { -"description": "Names of the folder under which new document is to be added. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1CloudAIDocumentOption": { -"description": "Request Option for processing Cloud AI Document in CW Document.", -"id": "GoogleCloudContentwarehouseV1CloudAIDocumentOption", -"properties": { -"customizedEntitiesPropertiesConversions": { -"additionalProperties": { -"type": "string" -}, -"description": "If set, only selected entities will be converted to properties.", -"type": "object" -}, -"enableEntitiesConversions": { -"description": "Whether to convert all the entities to properties.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1CreateDocumentLinkRequest": { -"description": "Request message for DocumentLinkService.CreateDocumentLink.", -"id": "GoogleCloudContentwarehouseV1CreateDocumentLinkRequest", -"properties": { -"documentLink": { -"$ref": "GoogleCloudContentwarehouseV1DocumentLink", -"description": "Required. Document links associated with the source documents (source_document_id)." -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the document creator, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1CreateDocumentMetadata": { -"description": "Metadata object for CreateDocument request (currently empty).", -"id": "GoogleCloudContentwarehouseV1CreateDocumentMetadata", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1CreateDocumentRequest": { -"description": "Request message for DocumentService.CreateDocument.", -"id": "GoogleCloudContentwarehouseV1CreateDocumentRequest", -"properties": { -"cloudAiDocumentOption": { -"$ref": "GoogleCloudContentwarehouseV1CloudAIDocumentOption", -"description": "Request Option for processing Cloud AI Document in Document Warehouse. This field offers limited support for mapping entities from Cloud AI Document to Warehouse Document. Please consult with product team before using this field and other available options." -}, -"createMask": { -"description": "Field mask for creating Document fields. If mask path is empty, it means all fields are masked. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.", -"format": "google-fieldmask", -"type": "string" -}, -"document": { -"$ref": "GoogleCloudContentwarehouseV1Document", -"description": "Required. The document to create." -}, -"policy": { -"$ref": "GoogleIamV1Policy", -"description": "Default document policy during creation. This refers to an Identity and Access (IAM) policy, which specifies access controls for the Document. Conditions defined in the policy will be ignored." -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1CreateDocumentResponse": { -"description": "Response message for DocumentService.CreateDocument.", -"id": "GoogleCloudContentwarehouseV1CreateDocumentResponse", -"properties": { -"document": { -"$ref": "GoogleCloudContentwarehouseV1Document", -"description": "Document created after executing create request." -}, -"longRunningOperations": { -"description": "post-processing LROs", -"items": { -"$ref": "GoogleLongrunningOperation" -}, -"type": "array" -}, -"metadata": { -"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", -"description": "Additional information for the API invocation, such as the request tracking id." -}, -"ruleEngineOutput": { -"$ref": "GoogleCloudContentwarehouseV1RuleEngineOutput", -"description": "Output from Rule Engine recording the rule evaluator and action executor's output. Refer format in: google/cloud/contentwarehouse/v1/rule_engine.proto" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1CustomWeightsMetadata": { -"description": "To support the custom weighting across document schemas.", -"id": "GoogleCloudContentwarehouseV1CustomWeightsMetadata", -"properties": { -"weightedSchemaProperties": { -"description": "List of schema and property name. Allows a maximum of 10 schemas to be specified for relevance boosting.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1WeightedSchemaProperty" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DataUpdateAction": { -"description": "Represents the action responsible for properties update operations.", -"id": "GoogleCloudContentwarehouseV1DataUpdateAction", -"properties": { -"entries": { -"additionalProperties": { -"type": "string" -}, -"description": "Map of (K, V) -> (valid name of the field, new value of the field) E.g., (\"age\", \"60\") entry triggers update of field age with a value of 60. If the field is not present then new entry is added. During update action execution, value strings will be casted to appropriate types.", -"type": "object" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DataValidationAction": { -"description": "Represents the action responsible for data validation operations.", -"id": "GoogleCloudContentwarehouseV1DataValidationAction", -"properties": { -"conditions": { -"additionalProperties": { -"type": "string" -}, -"description": "Map of (K, V) -> (field, string condition to be evaluated on the field) E.g., (\"age\", \"age > 18 && age < 60\") entry triggers validation of field age with the given condition. Map entries will be ANDed during validation.", -"type": "object" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DateTimeArray": { -"description": "DateTime values.", -"id": "GoogleCloudContentwarehouseV1DateTimeArray", -"properties": { -"values": { -"description": "List of datetime values. Both OffsetDateTime and ZonedDateTime are supported.", -"items": { -"$ref": "GoogleTypeDateTime" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DateTimeTypeOptions": { -"description": "Configurations for a date time property.", -"id": "GoogleCloudContentwarehouseV1DateTimeTypeOptions", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DeleteDocumentAction": { -"description": "Represents the action responsible for deleting the document.", -"id": "GoogleCloudContentwarehouseV1DeleteDocumentAction", -"properties": { -"enableHardDelete": { -"description": "Boolean field to select between hard vs soft delete options. Set 'true' for 'hard delete' and 'false' for 'soft delete'.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DeleteDocumentLinkRequest": { -"description": "Request message for DocumentLinkService.DeleteDocumentLink.", -"id": "GoogleCloudContentwarehouseV1DeleteDocumentLinkRequest", -"properties": { -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the document creator, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DeleteDocumentRequest": { -"description": "Request message for DocumentService.DeleteDocument.", -"id": "GoogleCloudContentwarehouseV1DeleteDocumentRequest", -"properties": { -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1Document": { -"description": "Defines the structure for content warehouse document proto.", -"id": "GoogleCloudContentwarehouseV1Document", -"properties": { -"cloudAiDocument": { -"$ref": "GoogleCloudDocumentaiV1Document", -"description": "Document AI format to save the structured content, including OCR." -}, -"contentCategory": { -"description": "Indicates the category (image, audio, video etc.) of the original content.", -"enum": [ -"CONTENT_CATEGORY_UNSPECIFIED", -"CONTENT_CATEGORY_IMAGE", -"CONTENT_CATEGORY_AUDIO", -"CONTENT_CATEGORY_VIDEO" -], -"enumDescriptions": [ -"No category is specified.", -"Content is of image type.", -"Content is of audio type.", -"Content is of video type." -], -"type": "string" -}, -"createTime": { -"description": "Output only. The time when the document is created.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"creator": { -"description": "The user who creates the document.", -"type": "string" -}, -"displayName": { -"description": "Required. Display name of the document given by the user. This name will be displayed in the UI. Customer can populate this field with the name of the document. This differs from the 'title' field as 'title' is optional and stores the top heading in the document.", -"type": "string" -}, -"displayUri": { -"description": "Uri to display the document, for example, in the UI.", -"type": "string" -}, -"dispositionTime": { -"description": "Output only. If linked to a Collection with RetentionPolicy, the date when the document becomes mutable.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"documentSchemaName": { -"description": "The Document schema name. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", -"type": "string" -}, -"inlineRawDocument": { -"description": "Raw document content.", -"format": "byte", -"type": "string" -}, -"legalHold": { -"description": "Output only. Indicates if the document has a legal hold on it.", -"readOnly": true, -"type": "boolean" -}, -"name": { -"description": "The resource name of the document. Format: projects/{project_number}/locations/{location}/documents/{document_id}. The name is ignored when creating a document.", -"type": "string" -}, -"plainText": { -"description": "Other document format, such as PPTX, XLXS", -"type": "string" -}, -"properties": { -"description": "List of values that are user supplied metadata.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1Property" -}, -"type": "array" -}, -"rawDocumentFileType": { -"description": "This is used when DocAI was not used to load the document and parsing/ extracting is needed for the inline_raw_document. For example, if inline_raw_document is the byte representation of a PDF file, then this should be set to: RAW_DOCUMENT_FILE_TYPE_PDF.", -"enum": [ -"RAW_DOCUMENT_FILE_TYPE_UNSPECIFIED", -"RAW_DOCUMENT_FILE_TYPE_PDF", -"RAW_DOCUMENT_FILE_TYPE_DOCX", -"RAW_DOCUMENT_FILE_TYPE_XLSX", -"RAW_DOCUMENT_FILE_TYPE_PPTX", -"RAW_DOCUMENT_FILE_TYPE_TEXT", -"RAW_DOCUMENT_FILE_TYPE_TIFF" -], -"enumDescriptions": [ -"No raw document specified or it is non-parsable", -"Adobe PDF format", -"Microsoft Word format", -"Microsoft Excel format", -"Microsoft Powerpoint format", -"UTF-8 encoded text format", -"TIFF or TIF image file format" -], -"type": "string" -}, -"rawDocumentPath": { -"description": "Raw document file in Cloud Storage path.", -"type": "string" -}, -"referenceId": { -"description": "The reference ID set by customers. Must be unique per project and location.", -"type": "string" -}, -"textExtractionDisabled": { -"deprecated": true, -"description": "If true, text extraction will not be performed.", -"type": "boolean" -}, -"textExtractionEnabled": { -"description": "If true, text extraction will be performed.", -"type": "boolean" -}, -"title": { -"description": "Title that describes the document. This can be the top heading or text that describes the document.", -"type": "string" -}, -"updateTime": { -"description": "Output only. The time when the document is last updated.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"updater": { -"description": "The user who lastly updates the document.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DocumentLink": { -"description": "A document-link between source and target document.", -"id": "GoogleCloudContentwarehouseV1DocumentLink", -"properties": { -"createTime": { -"description": "Output only. The time when the documentLink is created.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"description": { -"description": "Description of this document-link.", -"type": "string" -}, -"name": { -"description": "Name of this document-link. It is required that the parent derived form the name to be consistent with the source document reference. Otherwise an exception will be thrown. Format: projects/{project_number}/locations/{location}/documents/{source_document_id}/documentLinks/{document_link_id}.", -"type": "string" -}, -"sourceDocumentReference": { -"$ref": "GoogleCloudContentwarehouseV1DocumentReference", -"description": "Document references of the source document." -}, -"state": { -"description": "The state of the documentlink. If target node has been deleted, the link is marked as invalid. Removing a source node will result in removal of all associated links.", -"enum": [ -"STATE_UNSPECIFIED", -"ACTIVE", -"SOFT_DELETED" -], -"enumDescriptions": [ -"Unknown state of documentlink.", -"The documentlink has both source and target documents detected.", -"Target document is deleted, and mark the documentlink as soft-deleted." -], -"type": "string" -}, -"targetDocumentReference": { -"$ref": "GoogleCloudContentwarehouseV1DocumentReference", -"description": "Document references of the target document." -}, -"updateTime": { -"description": "Output only. The time when the documentLink is last updated.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DocumentQuery": { -"id": "GoogleCloudContentwarehouseV1DocumentQuery", -"properties": { -"customPropertyFilter": { -"deprecated": true, -"description": "This filter specifies a structured syntax to match against the [PropertyDefinition].is_filterable marked as `true`. The syntax for this expression is a subset of SQL syntax. Supported operators are: `=`, `!=`, `<`, `<=`, `>`, and `>=` where the left of the operator is a property name and the right of the operator is a number or a quoted string. You must escape backslash (\\\\) and quote (\\\") characters. Supported functions are `LOWER([property_name])` to perform a case insensitive match and `EMPTY([property_name])` to filter on the existence of a key. Boolean expressions (AND/OR/NOT) are supported up to 3 levels of nesting (for example, \"((A AND B AND C) OR NOT D) AND E\"), a maximum of 100 comparisons or functions are allowed in the expression. The expression must be < 6000 bytes in length. Sample Query: `(LOWER(driving_license)=\"class \\\"a\\\"\" OR EMPTY(driving_license)) AND driving_years > 10`", -"type": "string" -}, -"customWeightsMetadata": { -"$ref": "GoogleCloudContentwarehouseV1CustomWeightsMetadata", -"description": "To support the custom weighting across document schemas, customers need to provide the properties to be used to boost the ranking in the search request. For a search query with CustomWeightsMetadata specified, only the RetrievalImportance for the properties in the CustomWeightsMetadata will be honored." -}, -"documentCreatorFilter": { -"description": "The exact creator(s) of the documents to search against. If a value isn't specified, documents within the search results are associated with any creator. If multiple values are specified, documents within the search results may be associated with any of the specified creators.", -"items": { -"type": "string" -}, -"type": "array" -}, -"documentNameFilter": { -"description": "Search the documents in the list. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", -"items": { -"type": "string" -}, -"type": "array" -}, -"documentSchemaNames": { -"description": "This filter specifies the exact document schema Document.document_schema_name of the documents to search against. If a value isn't specified, documents within the search results are associated with any schema. If multiple values are specified, documents within the search results may be associated with any of the specified schemas. At most 20 document schema names are allowed.", -"items": { -"type": "string" -}, -"type": "array" -}, -"fileTypeFilter": { -"$ref": "GoogleCloudContentwarehouseV1FileTypeFilter", -"description": "This filter specifies the types of files to return: ALL, FOLDER, or FILE. If FOLDER or FILE is specified, then only either folders or files will be returned, respectively. If ALL is specified, both folders and files will be returned. If no value is specified, ALL files will be returned." -}, -"folderNameFilter": { -"description": "Search all the documents under this specified folder. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", -"type": "string" -}, -"isNlQuery": { -"description": "Experimental, do not use. If the query is a natural language question. False by default. If true, then the question-answering feature will be used instead of search, and `result_count` in SearchDocumentsRequest must be set. In addition, all other input fields related to search (pagination, histograms, etc.) will be ignored.", -"type": "boolean" -}, -"propertyFilter": { -"description": "This filter specifies a structured syntax to match against the PropertyDefinition.is_filterable marked as `true`. The relationship between the PropertyFilters is OR.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1PropertyFilter" -}, -"type": "array" -}, -"query": { -"description": "The query string that matches against the full text of the document and the searchable properties. The query partially supports [Google AIP style syntax](https://google.aip.dev/160). Specifically, the query supports literals, logical operators, negation operators, comparison operators, and functions. Literals: A bare literal value (examples: \"42\", \"Hugo\") is a value to be matched against. It searches over the full text of the document and the searchable properties. Logical operators: \"AND\", \"and\", \"OR\", and \"or\" are binary logical operators (example: \"engineer OR developer\"). Negation operators: \"NOT\" and \"!\" are negation operators (example: \"NOT software\"). Comparison operators: support the binary comparison operators =, !=, <, >, <= and >= for string, numeric, enum, boolean. Also support like operator `~~` for string. It provides semantic search functionality by parsing, stemming and doing synonyms expansion against the input query. To specify a property in the query, the left hand side expression in the comparison must be the property ID including the parent. The right hand side must be literals. For example: \"\\\"projects/123/locations/us\\\".property_a < 1\" matches results whose \"property_a\" is less than 1 in project 123 and us location. The literals and comparison expression can be connected in a single query (example: \"software engineer \\\"projects/123/locations/us\\\".salary > 100\"). Functions: supported functions are `LOWER([property_name])` to perform a case insensitive match and `EMPTY([property_name])` to filter on the existence of a key. Support nested expressions connected using parenthesis and logical operators. The default logical operators is `AND` if there is no operators between expressions. The query can be used with other filters e.g. `time_filters` and `folder_name_filter`. They are connected with `AND` operator under the hood. The maximum number of allowed characters is 255.", -"type": "string" -}, -"queryContext": { -"description": "For custom synonyms. Customers provide the synonyms based on context. One customer can provide multiple set of synonyms based on different context. The search query will be expanded based on the custom synonyms of the query context set. By default, no custom synonyms wll be applied if no query context is provided. It is not supported for CMEK compliant deployment.", -"items": { -"type": "string" -}, -"type": "array" -}, -"timeFilters": { -"description": "Documents created/updated within a range specified by this filter are searched against.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1TimeFilter" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DocumentReference": { -"description": "References to the documents.", -"id": "GoogleCloudContentwarehouseV1DocumentReference", -"properties": { -"createTime": { -"description": "Output only. The time when the document is created.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"deleteTime": { -"description": "Output only. The time when the document is deleted.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"displayName": { -"description": "display_name of the referenced document; this name does not need to be consistent to the display_name in the Document proto, depending on the ACL constraint.", -"type": "string" -}, -"documentIsFolder": { -"description": "The document type of the document being referenced.", -"type": "boolean" -}, -"documentIsLegalHoldFolder": { -"description": "Document is a folder with legal hold.", -"type": "boolean" -}, -"documentIsRetentionFolder": { -"description": "Document is a folder with retention policy.", -"type": "boolean" -}, -"documentName": { -"description": "Required. Name of the referenced document.", -"type": "string" -}, -"snippet": { -"description": "Stores the subset of the referenced document's content. This is useful to allow user peek the information of the referenced document.", -"type": "string" -}, -"updateTime": { -"description": "Output only. The time when the document is last updated.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1DocumentSchema": { -"description": "A document schema used to define document structure.", -"id": "GoogleCloudContentwarehouseV1DocumentSchema", -"properties": { -"createTime": { -"description": "Output only. The time when the document schema is created.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -}, -"description": { -"description": "Schema description.", -"type": "string" -}, -"displayName": { -"description": "Required. Name of the schema given by the user. Must be unique per project.", -"type": "string" -}, -"documentIsFolder": { -"description": "Document Type, true refers the document is a folder, otherwise it is a typical document.", -"type": "boolean" -}, -"name": { -"description": "The resource name of the document schema. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}. The name is ignored when creating a document schema.", -"type": "string" -}, -"propertyDefinitions": { -"description": "Document details.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1PropertyDefinition" -}, -"type": "array" -}, -"updateTime": { -"description": "Output only. The time when the document schema is last updated.", -"format": "google-datetime", -"readOnly": true, -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1EnumArray": { -"description": "Enum values.", -"id": "GoogleCloudContentwarehouseV1EnumArray", -"properties": { -"values": { -"description": "List of enum values.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1EnumTypeOptions": { -"description": "Configurations for an enum/categorical property.", -"id": "GoogleCloudContentwarehouseV1EnumTypeOptions", -"properties": { -"possibleValues": { -"description": "Required. List of possible enum values.", -"items": { -"type": "string" -}, -"type": "array" -}, -"validationCheckDisabled": { -"description": "Make sure the Enum property value provided in the document is in the possile value list during document creation. The validation check runs by default.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1EnumValue": { -"description": "Represents the string value of the enum field.", -"id": "GoogleCloudContentwarehouseV1EnumValue", -"properties": { -"value": { -"description": "String value of the enum field. This must match defined set of enums in document schema using EnumTypeOptions.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ExportToCdwPipeline": { -"description": "The configuration of exporting documents from the Document Warehouse to CDW pipeline.", -"id": "GoogleCloudContentwarehouseV1ExportToCdwPipeline", -"properties": { -"docAiDataset": { -"description": "Optional. The CDW dataset resource name. This field is optional. If not set, the documents will be exported to Cloud Storage only. Format: projects/{project}/locations/{location}/processors/{processor}/dataset", -"type": "string" -}, -"documents": { -"description": "The list of all the resource names of the documents to be processed. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", -"items": { -"type": "string" -}, -"type": "array" -}, -"exportFolderPath": { -"description": "The Cloud Storage folder path used to store the exported documents before being sent to CDW. Format: `gs:///`.", -"type": "string" -}, -"trainingSplitRatio": { -"description": "Ratio of training dataset split. When importing into Document AI Workbench, documents will be automatically split into training and test split category with the specified ratio. This field is required if doc_ai_dataset is set.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1FetchAclRequest": { -"description": "Request message for DocumentService.FetchAcl", -"id": "GoogleCloudContentwarehouseV1FetchAclRequest", -"properties": { -"projectOwner": { -"description": "For Get Project ACL only. Authorization check for end user will be ignored when project_owner=true.", -"type": "boolean" -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1FetchAclResponse": { -"description": "Response message for DocumentService.FetchAcl.", -"id": "GoogleCloudContentwarehouseV1FetchAclResponse", -"properties": { -"metadata": { -"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", -"description": "Additional information for the API invocation, such as the request tracking id." -}, -"policy": { -"$ref": "GoogleIamV1Policy", -"description": "The IAM policy." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1FileTypeFilter": { -"description": "Filter for the specific types of documents returned.", -"id": "GoogleCloudContentwarehouseV1FileTypeFilter", -"properties": { -"fileType": { -"description": "The type of files to return.", -"enum": [ -"FILE_TYPE_UNSPECIFIED", -"ALL", -"FOLDER", -"DOCUMENT", -"ROOT_FOLDER" -], -"enumDescriptions": [ -"Default document type. If set, disables the filter.", -"Returns all document types, including folders.", -"Returns only folders.", -"Returns only non-folder documents.", -"Returns only root folders" -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1FloatArray": { -"description": "Float values.", -"id": "GoogleCloudContentwarehouseV1FloatArray", -"properties": { -"values": { -"description": "List of float values.", -"items": { -"format": "float", -"type": "number" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1FloatTypeOptions": { -"description": "Configurations for a float property.", -"id": "GoogleCloudContentwarehouseV1FloatTypeOptions", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1GcsIngestPipeline": { -"description": "The configuration of the Cloud Storage Ingestion pipeline.", -"id": "GoogleCloudContentwarehouseV1GcsIngestPipeline", -"properties": { -"inputPath": { -"description": "The input Cloud Storage folder. All files under this folder will be imported to Document Warehouse. Format: `gs:///`.", -"type": "string" -}, -"pipelineConfig": { -"$ref": "GoogleCloudContentwarehouseV1IngestPipelineConfig", -"description": "Optional. The config for the Cloud Storage Ingestion pipeline. It provides additional customization options to run the pipeline and can be skipped if it is not applicable." -}, -"processorType": { -"description": "The Doc AI processor type name. Only used when the format of ingested files is Doc AI Document proto format.", -"type": "string" -}, -"schemaName": { -"description": "The Document Warehouse schema resource name. All documents processed by this pipeline will use this schema. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", -"type": "string" -}, -"skipIngestedDocuments": { -"description": "The flag whether to skip ingested documents. If it is set to true, documents in Cloud Storage contains key \"status\" with value \"status=ingested\" in custom metadata will be skipped to ingest.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1GcsIngestWithDocAiProcessorsPipeline": { -"description": "The configuration of the Cloud Storage Ingestion with DocAI Processors pipeline.", -"id": "GoogleCloudContentwarehouseV1GcsIngestWithDocAiProcessorsPipeline", -"properties": { -"extractProcessorInfos": { -"description": "The extract processors information. One matched extract processor will be used to process documents based on the classify processor result. If no classify processor is specified, the first extract processor will be used.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo" -}, -"type": "array" -}, -"inputPath": { -"description": "The input Cloud Storage folder. All files under this folder will be imported to Document Warehouse. Format: `gs:///`.", -"type": "string" -}, -"pipelineConfig": { -"$ref": "GoogleCloudContentwarehouseV1IngestPipelineConfig", -"description": "Optional. The config for the Cloud Storage Ingestion with DocAI Processors pipeline. It provides additional customization options to run the pipeline and can be skipped if it is not applicable." -}, -"processorResultsFolderPath": { -"description": "The Cloud Storage folder path used to store the raw results from processors. Format: `gs:///`.", -"type": "string" -}, -"skipIngestedDocuments": { -"description": "The flag whether to skip ingested documents. If it is set to true, documents in Cloud Storage contains key \"status\" with value \"status=ingested\" in custom metadata will be skipped to ingest.", -"type": "boolean" -}, -"splitClassifyProcessorInfo": { -"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo", -"description": "The split and classify processor information. The split and classify result will be used to find a matched extract processor." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1GetDocumentRequest": { -"description": "Request message for DocumentService.GetDocument.", -"id": "GoogleCloudContentwarehouseV1GetDocumentRequest", -"properties": { -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1HistogramQuery": { -"description": "The histogram request.", -"id": "GoogleCloudContentwarehouseV1HistogramQuery", -"properties": { -"filters": { -"$ref": "GoogleCloudContentwarehouseV1HistogramQueryPropertyNameFilter", -"description": "Optional. Filter the result of histogram query by the property names. It only works with histogram query count('FilterableProperties'). It is an optional. It will perform histogram on all the property names for all the document schemas. Setting this field will have a better performance." -}, -"histogramQuery": { -"description": "An expression specifies a histogram request against matching documents for searches. See SearchDocumentsRequest.histogram_queries for details about syntax.", -"type": "string" -}, -"requirePreciseResultSize": { -"description": "Controls if the histogram query requires the return of a precise count. Enable this flag may adversely impact performance. Defaults to true.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1HistogramQueryPropertyNameFilter": { -"id": "GoogleCloudContentwarehouseV1HistogramQueryPropertyNameFilter", -"properties": { -"documentSchemas": { -"description": "This filter specifies the exact document schema(s) Document.document_schema_name to run histogram query against. It is optional. It will perform histogram for property names for all the document schemas if it is not set. At most 10 document schema names are allowed. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", -"items": { -"type": "string" -}, -"type": "array" -}, -"propertyNames": { -"description": "It is optional. It will perform histogram for all the property names if it is not set. The properties need to be defined with the is_filterable flag set to true and the name of the property should be in the format: \"schemaId.propertyName\". The property needs to be defined in the schema. Example: the schema id is abc. Then the name of property for property MORTGAGE_TYPE will be \"abc.MORTGAGE_TYPE\".", -"items": { -"type": "string" -}, -"type": "array" -}, -"yAxis": { -"description": "By default, the y_axis is HISTOGRAM_YAXIS_DOCUMENT if this field is not set.", -"enum": [ -"HISTOGRAM_YAXIS_DOCUMENT", -"HISTOGRAM_YAXIS_PROPERTY" -], -"enumDescriptions": [ -"Count the documents per property name.", -"Count the properties per property name." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1HistogramQueryResult": { -"description": "Histogram result that matches HistogramQuery specified in searches.", -"id": "GoogleCloudContentwarehouseV1HistogramQueryResult", -"properties": { -"histogram": { -"additionalProperties": { -"format": "int64", -"type": "string" -}, -"description": "A map from the values of the facet associated with distinct values to the number of matching entries with corresponding value. The key format is: * (for string histogram) string values stored in the field.", -"type": "object" -}, -"histogramQuery": { -"description": "Requested histogram expression.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1IngestPipelineConfig": { -"description": "The ingestion pipeline config.", -"id": "GoogleCloudContentwarehouseV1IngestPipelineConfig", -"properties": { -"cloudFunction": { -"description": "The Cloud Function resource name. The Cloud Function needs to live inside consumer project and is accessible to Document AI Warehouse P4SA. Only Cloud Functions V2 is supported. Cloud function execution should complete within 5 minutes or this file ingestion may fail due to timeout. Format: `https://{region}-{project_id}.cloudfunctions.net/{cloud_function}` The following keys are available the request json payload. * display_name * properties * plain_text * reference_id * document_schema_name * raw_document_path * raw_document_file_type The following keys from the cloud function json response payload will be ingested to the Document AI Warehouse as part of Document proto content and/or related information. The original values will be overridden if any key is present in the response. * display_name * properties * plain_text * document_acl_policy * folder", -"type": "string" -}, -"documentAclPolicy": { -"$ref": "GoogleIamV1Policy", -"description": "The document level acl policy config. This refers to an Identity and Access (IAM) policy, which specifies access controls for all documents ingested by the pipeline. The role and members under the policy needs to be specified. The following roles are supported for document level acl control: * roles/contentwarehouse.documentAdmin * roles/contentwarehouse.documentEditor * roles/contentwarehouse.documentViewer The following members are supported for document level acl control: * user:user-email@example.com * group:group-email@example.com Note that for documents searched with LLM, only single level user or group acl check is supported." -}, -"enableDocumentTextExtraction": { -"description": "The document text extraction enabled flag. If the flag is set to true, DWH will perform text extraction on the raw document.", -"type": "boolean" -}, -"folder": { -"description": "Optional. The name of the folder to which all ingested documents will be linked during ingestion process. Format is `projects/{project}/locations/{location}/documents/{folder_id}`", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1InitializeProjectRequest": { -"description": "Request message for projectService.InitializeProject", -"id": "GoogleCloudContentwarehouseV1InitializeProjectRequest", -"properties": { -"accessControlMode": { -"description": "Required. The access control mode for accessing the customer data", -"enum": [ -"ACL_MODE_UNKNOWN", -"ACL_MODE_UNIVERSAL_ACCESS", -"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_BYOID", -"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_GCI" -], -"enumDescriptions": [ -"This value is required by protobuf best practices", -"Universal Access: No document level access control.", -"Document level access control with customer own Identity Service.", -"Document level access control using Google Cloud Identity." -], -"type": "string" -}, -"databaseType": { -"description": "Required. The type of database used to store customer data", -"enum": [ -"DB_UNKNOWN", -"DB_INFRA_SPANNER", -"DB_CLOUD_SQL_POSTGRES" -], -"enumDeprecated": [ -false, -false, -true -], -"enumDescriptions": [ -"This value is required by protobuf best practices", -"Internal Spanner", -"Cloud Sql with a Postgres Sql instance" -], -"type": "string" -}, -"documentCreatorDefaultRole": { -"description": "Optional. The default role for the person who create a document.", -"enum": [ -"DOCUMENT_CREATOR_DEFAULT_ROLE_UNSPECIFIED", -"DOCUMENT_ADMIN", -"DOCUMENT_EDITOR", -"DOCUMENT_VIEWER" -], -"enumDescriptions": [ -"Unspecified, will be default to document admin role.", -"Document Admin, same as contentwarehouse.googleapis.com/documentAdmin.", -"Document Editor, same as contentwarehouse.googleapis.com/documentEditor.", -"Document Viewer, same as contentwarehouse.googleapis.com/documentViewer." -], -"type": "string" -}, -"enableCalUserEmailLogging": { -"description": "Optional. Whether to enable CAL user email logging.", -"type": "boolean" -}, -"kmsKey": { -"description": "Optional. The KMS key used for CMEK encryption. It is required that the kms key is in the same region as the endpoint. The same key will be used for all provisioned resources, if encryption is available. If the kms_key is left empty, no encryption will be enforced.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1InitializeProjectResponse": { -"description": "Response message for projectService.InitializeProject", -"id": "GoogleCloudContentwarehouseV1InitializeProjectResponse", -"properties": { -"message": { -"description": "The message of the project initialization process.", -"type": "string" -}, -"state": { -"description": "The state of the project initialization process.", -"enum": [ -"STATE_UNSPECIFIED", -"SUCCEEDED", -"FAILED", -"CANCELLED", -"RUNNING" -], -"enumDescriptions": [ -"Clients should never see this.", -"Finished project initialization without error.", -"Finished project initialization with an error.", -"Client canceled the LRO.", -"Ask the customer to check the operation for results." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1IntegerArray": { -"description": "Integer values.", -"id": "GoogleCloudContentwarehouseV1IntegerArray", -"properties": { -"values": { -"description": "List of integer values.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1IntegerTypeOptions": { -"description": "Configurations for an integer property.", -"id": "GoogleCloudContentwarehouseV1IntegerTypeOptions", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1InvalidRule": { -"description": "A triggered rule that failed the validation check(s) after parsing.", -"id": "GoogleCloudContentwarehouseV1InvalidRule", -"properties": { -"error": { -"description": "Validation error on a parsed expression.", -"type": "string" -}, -"rule": { -"$ref": "GoogleCloudContentwarehouseV1Rule", -"description": "Triggered rule." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListDocumentSchemasResponse": { -"description": "Response message for DocumentSchemaService.ListDocumentSchemas.", -"id": "GoogleCloudContentwarehouseV1ListDocumentSchemasResponse", -"properties": { -"documentSchemas": { -"description": "The document schemas from the specified parent.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1DocumentSchema" -}, -"type": "array" -}, -"nextPageToken": { -"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListLinkedSourcesRequest": { -"description": "Response message for DocumentLinkService.ListLinkedSources.", -"id": "GoogleCloudContentwarehouseV1ListLinkedSourcesRequest", -"properties": { -"pageSize": { -"description": "The maximum number of document-links to return. The service may return fewer than this value. If unspecified, at most 50 document-links will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", -"format": "int32", -"type": "integer" -}, -"pageToken": { -"description": "A page token, received from a previous `ListLinkedSources` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListLinkedSources` must match the call that provided the page token.", -"type": "string" -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the document creator, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListLinkedSourcesResponse": { -"description": "Response message for DocumentLinkService.ListLinkedSources.", -"id": "GoogleCloudContentwarehouseV1ListLinkedSourcesResponse", -"properties": { -"documentLinks": { -"description": "Source document-links.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1DocumentLink" -}, -"type": "array" -}, -"nextPageToken": { -"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListLinkedTargetsRequest": { -"description": "Request message for DocumentLinkService.ListLinkedTargets.", -"id": "GoogleCloudContentwarehouseV1ListLinkedTargetsRequest", -"properties": { -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the document creator, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListLinkedTargetsResponse": { -"description": "Response message for DocumentLinkService.ListLinkedTargets.", -"id": "GoogleCloudContentwarehouseV1ListLinkedTargetsResponse", -"properties": { -"documentLinks": { -"description": "Target document-links.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1DocumentLink" -}, -"type": "array" -}, -"nextPageToken": { -"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListRuleSetsResponse": { -"description": "Response message for RuleSetService.ListRuleSets.", -"id": "GoogleCloudContentwarehouseV1ListRuleSetsResponse", -"properties": { -"nextPageToken": { -"description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", -"type": "string" -}, -"ruleSets": { -"description": "The rule sets from the specified parent.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1RuleSet" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ListSynonymSetsResponse": { -"description": "Response message for SynonymSetService.ListSynonymSets.", -"id": "GoogleCloudContentwarehouseV1ListSynonymSetsResponse", -"properties": { -"nextPageToken": { -"description": "A page token, received from a previous `ListSynonymSets` call. Provide this to retrieve the subsequent page.", -"type": "string" -}, -"synonymSets": { -"description": "The synonymSets from the specified parent.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1SynonymSet" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1LockDocumentRequest": { -"description": "Request message for DocumentService.LockDocument.", -"id": "GoogleCloudContentwarehouseV1LockDocumentRequest", -"properties": { -"collectionId": { -"description": "The collection the document connects to.", -"type": "string" -}, -"lockingUser": { -"$ref": "GoogleCloudContentwarehouseV1UserInfo", -"description": "The user information who locks the document." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1MapProperty": { -"description": "Map property value. Represents a structured entries of key value pairs, consisting of field names which map to dynamically typed values.", -"id": "GoogleCloudContentwarehouseV1MapProperty", -"properties": { -"fields": { -"additionalProperties": { -"$ref": "GoogleCloudContentwarehouseV1Value" -}, -"description": "Unordered map of dynamically typed values.", -"type": "object" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1MapTypeOptions": { -"description": "Configurations for a Map property.", -"id": "GoogleCloudContentwarehouseV1MapTypeOptions", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1MergeFieldsOptions": { -"description": "Options for merging updated fields.", -"id": "GoogleCloudContentwarehouseV1MergeFieldsOptions", -"properties": { -"replaceMessageFields": { -"description": "When merging message fields, the default behavior is to merge the content of two message fields together. If you instead want to use the field from the source message to replace the corresponding field in the destination message, set this flag to true. When this flag is set, specified submessage fields that are missing in source will be cleared in destination.", -"type": "boolean" -}, -"replaceRepeatedFields": { -"description": "When merging repeated fields, the default behavior is to append entries from the source repeated field to the destination repeated field. If you instead want to keep only the entries from the source repeated field, set this flag to true. If you want to replace a repeated field within a message field on the destination message, you must set both replace_repeated_fields and replace_message_fields to true, otherwise the repeated fields will be appended.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ProcessWithDocAiPipeline": { -"description": "The configuration of processing documents in Document Warehouse with DocAi processors pipeline.", -"id": "GoogleCloudContentwarehouseV1ProcessWithDocAiPipeline", -"properties": { -"documents": { -"description": "The list of all the resource names of the documents to be processed. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", -"items": { -"type": "string" -}, -"type": "array" -}, -"exportFolderPath": { -"description": "The Cloud Storage folder path used to store the exported documents before being sent to CDW. Format: `gs:///`.", -"type": "string" -}, -"processorInfo": { -"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo", -"description": "The CDW processor information." -}, -"processorResultsFolderPath": { -"description": "The Cloud Storage folder path used to store the raw results from processors. Format: `gs:///`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ProcessorInfo": { -"description": "The DocAI processor information.", -"id": "GoogleCloudContentwarehouseV1ProcessorInfo", -"properties": { -"documentType": { -"description": "The processor will process the documents with this document type.", -"type": "string" -}, -"processorName": { -"description": "The processor resource name. Format is `projects/{project}/locations/{location}/processors/{processor}`, or `projects/{project}/locations/{location}/processors/{processor}/processorVersions/{processorVersion}`", -"type": "string" -}, -"schemaName": { -"description": "The Document schema resource name. All documents processed by this processor will use this schema. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ProjectStatus": { -"description": "Status of a project, including the project state, dbType, aclMode and etc.", -"id": "GoogleCloudContentwarehouseV1ProjectStatus", -"properties": { -"accessControlMode": { -"description": "Access control mode.", -"enum": [ -"ACL_MODE_UNKNOWN", -"ACL_MODE_UNIVERSAL_ACCESS", -"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_BYOID", -"ACL_MODE_DOCUMENT_LEVEL_ACCESS_CONTROL_GCI" -], -"enumDescriptions": [ -"This value is required by protobuf best practices", -"Universal Access: No document level access control.", -"Document level access control with customer own Identity Service.", -"Document level access control using Google Cloud Identity." -], -"type": "string" -}, -"databaseType": { -"description": "Database type.", -"enum": [ -"DB_UNKNOWN", -"DB_INFRA_SPANNER", -"DB_CLOUD_SQL_POSTGRES" -], -"enumDeprecated": [ -false, -false, -true -], -"enumDescriptions": [ -"This value is required by protobuf best practices", -"Internal Spanner", -"Cloud Sql with a Postgres Sql instance" -], -"type": "string" -}, -"documentCreatorDefaultRole": { -"description": "The default role for the person who create a document.", -"type": "string" -}, -"location": { -"description": "The location of the queried project.", -"type": "string" -}, -"qaEnabled": { -"description": "If the qa is enabled on this project.", -"type": "boolean" -}, -"state": { -"description": "State of the project.", -"enum": [ -"PROJECT_STATE_UNSPECIFIED", -"PROJECT_STATE_PENDING", -"PROJECT_STATE_COMPLETED", -"PROJECT_STATE_FAILED", -"PROJECT_STATE_DELETING", -"PROJECT_STATE_DELETING_FAILED", -"PROJECT_STATE_DELETED", -"PROJECT_STATE_NOT_FOUND" -], -"enumDescriptions": [ -"Default status, required by protobuf best practices.", -"The project is in the middle of a provision process.", -"All dependencies have been provisioned.", -"A provision process was previously initiated, but failed.", -"The project is in the middle of a deletion process.", -"A deleting process was initiated, but failed.", -"The project is deleted.", -"The project is not found." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1Property": { -"description": "Property of a document.", -"id": "GoogleCloudContentwarehouseV1Property", -"properties": { -"dateTimeValues": { -"$ref": "GoogleCloudContentwarehouseV1DateTimeArray", -"description": "Date time property values. It is not supported by CMEK compliant deployment." -}, -"enumValues": { -"$ref": "GoogleCloudContentwarehouseV1EnumArray", -"description": "Enum property values." -}, -"floatValues": { -"$ref": "GoogleCloudContentwarehouseV1FloatArray", -"description": "Float property values." -}, -"integerValues": { -"$ref": "GoogleCloudContentwarehouseV1IntegerArray", -"description": "Integer property values." -}, -"mapProperty": { -"$ref": "GoogleCloudContentwarehouseV1MapProperty", -"description": "Map property values." -}, -"name": { -"description": "Required. Must match the name of a PropertyDefinition in the DocumentSchema.", -"type": "string" -}, -"propertyValues": { -"$ref": "GoogleCloudContentwarehouseV1PropertyArray", -"description": "Nested structured data property values." -}, -"textValues": { -"$ref": "GoogleCloudContentwarehouseV1TextArray", -"description": "String/text property values." -}, -"timestampValues": { -"$ref": "GoogleCloudContentwarehouseV1TimestampArray", -"description": "Timestamp property values. It is not supported by CMEK compliant deployment." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1PropertyArray": { -"description": "Property values.", -"id": "GoogleCloudContentwarehouseV1PropertyArray", -"properties": { -"properties": { -"description": "List of property values.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1Property" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1PropertyDefinition": { -"description": "Defines the metadata for a schema property.", -"id": "GoogleCloudContentwarehouseV1PropertyDefinition", -"properties": { -"dateTimeTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1DateTimeTypeOptions", -"description": "Date time property. It is not supported by CMEK compliant deployment." -}, -"displayName": { -"description": "The display-name for the property, used for front-end.", -"type": "string" -}, -"enumTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1EnumTypeOptions", -"description": "Enum/categorical property." -}, -"floatTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1FloatTypeOptions", -"description": "Float property." -}, -"integerTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1IntegerTypeOptions", -"description": "Integer property." -}, -"isFilterable": { -"description": "Whether the property can be filtered. If this is a sub-property, all the parent properties must be marked filterable.", -"type": "boolean" -}, -"isMetadata": { -"description": "Whether the property is user supplied metadata. This out-of-the box placeholder setting can be used to tag derived properties. Its value and interpretation logic should be implemented by API user.", -"type": "boolean" -}, -"isRepeatable": { -"description": "Whether the property can have multiple values.", -"type": "boolean" -}, -"isRequired": { -"description": "Whether the property is mandatory. Default is 'false', i.e. populating property value can be skipped. If 'true' then user must populate the value for this property.", -"type": "boolean" -}, -"isSearchable": { -"description": "Indicates that the property should be included in a global search.", -"type": "boolean" -}, -"mapTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1MapTypeOptions", -"description": "Map property." -}, -"name": { -"description": "Required. The name of the metadata property. Must be unique within a document schema and is case insensitive. Names must be non-blank, start with a letter, and can contain alphanumeric characters and: /, :, -, _, and .", -"type": "string" -}, -"propertyTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1PropertyTypeOptions", -"description": "Nested structured data property." -}, -"retrievalImportance": { -"description": "The retrieval importance of the property during search.", -"enum": [ -"RETRIEVAL_IMPORTANCE_UNSPECIFIED", -"HIGHEST", -"HIGHER", -"HIGH", -"MEDIUM", -"LOW", -"LOWEST" -], -"enumDescriptions": [ -"No importance specified. Default medium importance.", -"Highest importance.", -"Higher importance.", -"High importance.", -"Medium importance.", -"Low importance (negative).", -"Lowest importance (negative)." -], -"type": "string" -}, -"schemaSources": { -"description": "The mapping information between this property to another schema source.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1PropertyDefinitionSchemaSource" -}, -"type": "array" -}, -"textTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1TextTypeOptions", -"description": "Text/string property." -}, -"timestampTypeOptions": { -"$ref": "GoogleCloudContentwarehouseV1TimestampTypeOptions", -"description": "Timestamp property. It is not supported by CMEK compliant deployment." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1PropertyDefinitionSchemaSource": { -"description": "The schema source information.", -"id": "GoogleCloudContentwarehouseV1PropertyDefinitionSchemaSource", -"properties": { -"name": { -"description": "The schema name in the source.", -"type": "string" -}, -"processorType": { -"description": "The Doc AI processor type name.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1PropertyFilter": { -"id": "GoogleCloudContentwarehouseV1PropertyFilter", -"properties": { -"condition": { -"description": "The filter condition. The syntax for this expression is a subset of SQL syntax. Supported operators are: `=`, `!=`, `<`, `<=`, `>`, `>=`, and `~~` where the left of the operator is a property name and the right of the operator is a number or a quoted string. You must escape backslash (\\\\) and quote (\\\") characters. `~~` is the LIKE operator. The right of the operator must be a string. The only supported property data type for LIKE is text_values. It provides semantic search functionality by parsing, stemming and doing synonyms expansion against the input query. It matches if the property contains semantic similar content to the query. It is not regex matching or wildcard matching. For example, \"property.company ~~ \\\"google\\\"\" will match records whose property `property.compnay` have values like \"Google Inc.\", \"Google LLC\" or \"Google Company\". Supported functions are `LOWER([property_name])` to perform a case insensitive match and `EMPTY([property_name])` to filter on the existence of a key. Boolean expressions (AND/OR/NOT) are supported up to 3 levels of nesting (for example, \"((A AND B AND C) OR NOT D) AND E\"), a maximum of 100 comparisons or functions are allowed in the expression. The expression must be < 6000 bytes in length. Only properties that are marked filterable are allowed (PropertyDefinition.is_filterable). Property names do not need to be prefixed by the document schema id (as is the case with histograms), however property names will need to be prefixed by its parent hierarchy, if any. For example: top_property_name.sub_property_name. Sample Query: `(LOWER(driving_license)=\"class \\\"a\\\"\" OR EMPTY(driving_license)) AND driving_years > 10` CMEK compliant deployment only supports: * Operators: `=`, `<`, `<=`, `>`, and `>=`. * Boolean expressions: AND and OR.", -"type": "string" -}, -"documentSchemaName": { -"description": "The Document schema name Document.document_schema_name. Format: projects/{project_number}/locations/{location}/documentSchemas/{document_schema_id}.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1PropertyTypeOptions": { -"description": "Configurations for a nested structured data property.", -"id": "GoogleCloudContentwarehouseV1PropertyTypeOptions", -"properties": { -"propertyDefinitions": { -"description": "Required. List of property definitions.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1PropertyDefinition" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1PublishAction": { -"description": "Represents the action responsible for publishing messages to a Pub/Sub topic.", -"id": "GoogleCloudContentwarehouseV1PublishAction", -"properties": { -"messages": { -"description": "Messages to be published.", -"items": { -"type": "string" -}, -"type": "array" -}, -"topicId": { -"description": "The topic id in the Pub/Sub service for which messages will be published to.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1QAResult": { -"description": "Additional result info for the question-answering feature.", -"id": "GoogleCloudContentwarehouseV1QAResult", -"properties": { -"confidenceScore": { -"description": "The calibrated confidence score for this document, in the range [0., 1.]. This represents the confidence level for whether the returned document and snippet answers the user's query.", -"format": "float", -"type": "number" -}, -"highlights": { -"description": "Highlighted sections in the snippet.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1QAResultHighlight" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1QAResultHighlight": { -"description": "A text span in the search text snippet that represents a highlighted section (answer context, highly relevant sentence, etc.).", -"id": "GoogleCloudContentwarehouseV1QAResultHighlight", -"properties": { -"endIndex": { -"description": "End index of the highlight, exclusive.", -"format": "int32", -"type": "integer" -}, -"startIndex": { -"description": "Start index of the highlight.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RemoveFromFolderAction": { -"description": "Represents the action responsible for remove a document from a specific folder.", -"id": "GoogleCloudContentwarehouseV1RemoveFromFolderAction", -"properties": { -"condition": { -"description": "Condition of the action to be executed.", -"type": "string" -}, -"folder": { -"description": "Name of the folder under which new document is to be added. Format: projects/{project_number}/locations/{location}/documents/{document_id}.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RequestMetadata": { -"description": "Meta information is used to improve the performance of the service.", -"id": "GoogleCloudContentwarehouseV1RequestMetadata", -"properties": { -"userInfo": { -"$ref": "GoogleCloudContentwarehouseV1UserInfo", -"description": "Provides user unique identification and groups information." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1ResponseMetadata": { -"description": "Additional information returned to client, such as debugging information.", -"id": "GoogleCloudContentwarehouseV1ResponseMetadata", -"properties": { -"requestId": { -"description": "A unique id associated with this call. This id is logged for tracking purpose.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1Rule": { -"description": "Represents the rule for a content warehouse trigger.", -"id": "GoogleCloudContentwarehouseV1Rule", -"properties": { -"actions": { -"description": "List of actions that are executed when the rule is satisfied.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1Action" -}, -"type": "array" -}, -"condition": { -"description": "Represents the conditional expression to be evaluated. Expression should evaluate to a boolean result. When the condition is true actions are executed. Example: user_role = \"hsbc_role_1\" AND doc.salary > 20000", -"type": "string" -}, -"description": { -"description": "Short description of the rule and its context.", -"type": "string" -}, -"ruleId": { -"description": "ID of the rule. It has to be unique across all the examples. This is managed internally.", -"type": "string" -}, -"triggerType": { -"description": "Identifies the trigger type for running the policy.", -"enum": [ -"UNKNOWN", -"ON_CREATE", -"ON_UPDATE", -"ON_CREATE_LINK", -"ON_DELETE_LINK" -], -"enumDescriptions": [ -"Trigger for unknown action.", -"Trigger for create document action.", -"Trigger for update document action.", -"Trigger for create link action.", -"Trigger for delete link action." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RuleActionsPair": { -"description": "Represents a rule and outputs of associated actions.", -"id": "GoogleCloudContentwarehouseV1RuleActionsPair", -"properties": { -"actionOutputs": { -"description": "Outputs of executing the actions associated with the above rule.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1ActionOutput" -}, -"type": "array" -}, -"rule": { -"$ref": "GoogleCloudContentwarehouseV1Rule", -"description": "Represents the rule." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RuleEngineOutput": { -"description": "Records the output of Rule Engine including rule evaluation and actions result.", -"id": "GoogleCloudContentwarehouseV1RuleEngineOutput", -"properties": { -"actionExecutorOutput": { -"$ref": "GoogleCloudContentwarehouseV1ActionExecutorOutput", -"description": "Output from Action Executor containing rule and corresponding actions execution result." -}, -"documentName": { -"description": "Name of the document against which the rules and actions were evaluated.", -"type": "string" -}, -"ruleEvaluatorOutput": { -"$ref": "GoogleCloudContentwarehouseV1RuleEvaluatorOutput", -"description": "Output from Rule Evaluator containing matched, unmatched and invalid rules." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RuleEvaluatorOutput": { -"description": "Represents the output of the Rule Evaluator.", -"id": "GoogleCloudContentwarehouseV1RuleEvaluatorOutput", -"properties": { -"invalidRules": { -"description": "A subset of triggered rules that failed the validation check(s) after parsing.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1InvalidRule" -}, -"type": "array" -}, -"matchedRules": { -"description": "A subset of triggered rules that are evaluated true for a given request.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1Rule" -}, -"type": "array" -}, -"triggeredRules": { -"description": "List of rules fetched from database for the given request trigger type.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1Rule" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RuleSet": { -"description": "Represents a set of rules from a single customer.", -"id": "GoogleCloudContentwarehouseV1RuleSet", -"properties": { -"description": { -"description": "Short description of the rule-set.", -"type": "string" -}, -"name": { -"description": "The resource name of the rule set. Managed internally. Format: projects/{project_number}/locations/{location}/ruleSet/{rule_set_id}. The name is ignored when creating a rule set.", -"type": "string" -}, -"rules": { -"description": "List of rules given by the customer.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1Rule" -}, -"type": "array" -}, -"source": { -"description": "Source of the rules i.e., customer name.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RunPipelineMetadata": { -"description": "Metadata message of RunPipeline method.", -"id": "GoogleCloudContentwarehouseV1RunPipelineMetadata", -"properties": { -"exportToCdwPipelineMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RunPipelineMetadataExportToCdwPipelineMetadata", -"description": "The pipeline metadata for Export-to-CDW pipeline." -}, -"failedFileCount": { -"description": "Number of files that have failed at some point in the pipeline.", -"format": "int32", -"type": "integer" -}, -"gcsIngestPipelineMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RunPipelineMetadataGcsIngestPipelineMetadata", -"description": "The pipeline metadata for GcsIngest pipeline." -}, -"individualDocumentStatuses": { -"description": "The list of response details of each document.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1RunPipelineMetadataIndividualDocumentStatus" -}, -"type": "array" -}, -"processWithDocAiPipelineMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RunPipelineMetadataProcessWithDocAiPipelineMetadata", -"description": "The pipeline metadata for Process-with-DocAi pipeline." -}, -"totalFileCount": { -"description": "Number of files that were processed by the pipeline.", -"format": "int32", -"type": "integer" -}, -"userInfo": { -"$ref": "GoogleCloudContentwarehouseV1UserInfo", -"description": "User unique identification and groups information." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RunPipelineMetadataExportToCdwPipelineMetadata": { -"description": "The metadata message for Export-to-CDW pipeline.", -"id": "GoogleCloudContentwarehouseV1RunPipelineMetadataExportToCdwPipelineMetadata", -"properties": { -"docAiDataset": { -"description": "The output CDW dataset resource name.", -"type": "string" -}, -"documents": { -"description": "The input list of all the resource names of the documents to be exported.", -"items": { -"type": "string" -}, -"type": "array" -}, -"outputPath": { -"description": "The output Cloud Storage folder in this pipeline.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RunPipelineMetadataGcsIngestPipelineMetadata": { -"description": "The metadata message for GcsIngest pipeline.", -"id": "GoogleCloudContentwarehouseV1RunPipelineMetadataGcsIngestPipelineMetadata", -"properties": { -"inputPath": { -"description": "The input Cloud Storage folder in this pipeline. Format: `gs:///`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RunPipelineMetadataIndividualDocumentStatus": { -"description": "The status of processing a document.", -"id": "GoogleCloudContentwarehouseV1RunPipelineMetadataIndividualDocumentStatus", -"properties": { -"documentId": { -"description": "Document identifier of an existing document.", -"type": "string" -}, -"status": { -"$ref": "GoogleRpcStatus", -"description": "The status processing the document." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RunPipelineMetadataProcessWithDocAiPipelineMetadata": { -"description": "The metadata message for Process-with-DocAi pipeline.", -"id": "GoogleCloudContentwarehouseV1RunPipelineMetadataProcessWithDocAiPipelineMetadata", -"properties": { -"documents": { -"description": "The input list of all the resource names of the documents to be processed.", -"items": { -"type": "string" -}, -"type": "array" -}, -"processorInfo": { -"$ref": "GoogleCloudContentwarehouseV1ProcessorInfo", -"description": "The DocAI processor to process the documents with." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1RunPipelineRequest": { -"description": "Request message for DocumentService.RunPipeline.", -"id": "GoogleCloudContentwarehouseV1RunPipelineRequest", -"properties": { -"exportCdwPipeline": { -"$ref": "GoogleCloudContentwarehouseV1ExportToCdwPipeline", -"description": "Export docuemnts from Document Warehouse to CDW for training purpose." -}, -"gcsIngestPipeline": { -"$ref": "GoogleCloudContentwarehouseV1GcsIngestPipeline", -"description": "Cloud Storage ingestion pipeline." -}, -"gcsIngestWithDocAiProcessorsPipeline": { -"$ref": "GoogleCloudContentwarehouseV1GcsIngestWithDocAiProcessorsPipeline", -"description": "Use DocAI processors to process documents in Cloud Storage and ingest them to Document Warehouse." -}, -"processWithDocAiPipeline": { -"$ref": "GoogleCloudContentwarehouseV1ProcessWithDocAiPipeline", -"description": "Use a DocAI processor to process documents in Document Warehouse, and re-ingest the updated results into Document Warehouse." -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SearchDocumentsRequest": { -"description": "Request message for DocumentService.SearchDocuments.", -"id": "GoogleCloudContentwarehouseV1SearchDocumentsRequest", -"properties": { -"documentQuery": { -"$ref": "GoogleCloudContentwarehouseV1DocumentQuery", -"description": "Query used to search against documents (keyword, filters, etc.)." -}, -"histogramQueries": { -"description": "An expression specifying a histogram request against matching documents. Expression syntax is an aggregation function call with histogram facets and other options. The following aggregation functions are supported: * `count(string_histogram_facet)`: Count the number of matching entities for each distinct attribute value. Data types: * Histogram facet (aka filterable properties): Facet names with format .. Facets will have the format of: `a-zA-Z`. If the facet is a child facet, then the parent hierarchy needs to be specified separated by dots in the prefix after the schema id. Thus, the format for a multi- level facet is: .. . Example: schema123.root_parent_facet.middle_facet.child_facet * DocumentSchemaId: (with no schema id prefix) to get histograms for each document type (returns the schema id path, e.g. projects/12345/locations/us-west/documentSchemas/abc123). Example expression: * Document type counts: count('DocumentSchemaId') * For schema id, abc123, get the counts for MORTGAGE_TYPE: count('abc123.MORTGAGE_TYPE')", -"items": { -"$ref": "GoogleCloudContentwarehouseV1HistogramQuery" -}, -"type": "array" -}, -"offset": { -"description": "An integer that specifies the current offset (that is, starting result location, amongst the documents deemed by the API as relevant) in search results. This field is only considered if page_token is unset. The maximum allowed value is 5000. Otherwise an error is thrown. For example, 0 means to return results starting from the first matching document, and 10 means to return from the 11th document. This can be used for pagination, (for example, pageSize = 10 and offset = 10 means to return from the second page).", -"format": "int32", -"type": "integer" -}, -"orderBy": { -"description": "The criteria determining how search results are sorted. For non-empty query, default is `\"relevance desc\"`. For empty query, default is `\"upload_date desc\"`. Supported options are: * `\"relevance desc\"`: By relevance descending, as determined by the API algorithms. * `\"upload_date desc\"`: By upload date descending. * `\"upload_date\"`: By upload date ascending. * `\"update_date desc\"`: By last updated date descending. * `\"update_date\"`: By last updated date ascending. * `\"retrieval_importance desc\"`: By retrieval importance of properties descending. This feature is still under development, please do not use unless otherwise instructed to do so.", -"type": "string" -}, -"pageSize": { -"description": "A limit on the number of documents returned in the search results. Increasing this value above the default value of 10 can increase search response time. The value can be between 1 and 100.", -"format": "int32", -"type": "integer" -}, -"pageToken": { -"description": "The token specifying the current offset within search results. See SearchDocumentsResponse.next_page_token for an explanation of how to obtain the next set of query results.", -"type": "string" -}, -"qaSizeLimit": { -"description": "Experimental, do not use. The limit on the number of documents returned for the question-answering feature. To enable the question-answering feature, set [DocumentQuery].is_nl_query to true.", -"format": "int32", -"type": "integer" -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control and improve the search quality of the service." -}, -"requireTotalSize": { -"description": "Controls if the search document request requires the return of a total size of matched documents. See SearchDocumentsResponse.total_size. Enabling this flag may adversely impact performance. Hint: If this is used with pagination, set this flag on the initial query but set this to false on subsequent page calls (keep the total count locally). Defaults to false.", -"type": "boolean" -}, -"totalResultSize": { -"description": "Controls if the search document request requires the return of a total size of matched documents. See SearchDocumentsResponse.total_size.", -"enum": [ -"TOTAL_RESULT_SIZE_UNSPECIFIED", -"ESTIMATED_SIZE", -"ACTUAL_SIZE" -], -"enumDescriptions": [ -"Total number calculation will be skipped.", -"Estimate total number. The total result size will be accurated up to 10,000. This option will add cost and latency to your request.", -"It may adversely impact performance. The limit is 1000,000." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SearchDocumentsResponse": { -"description": "Response message for DocumentService.SearchDocuments.", -"id": "GoogleCloudContentwarehouseV1SearchDocumentsResponse", -"properties": { -"histogramQueryResults": { -"description": "The histogram results that match with the specified SearchDocumentsRequest.histogram_queries.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1HistogramQueryResult" -}, -"type": "array" -}, -"matchingDocuments": { -"description": "The document entities that match the specified SearchDocumentsRequest.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1SearchDocumentsResponseMatchingDocument" -}, -"type": "array" -}, -"metadata": { -"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", -"description": "Additional information for the API invocation, such as the request tracking id." -}, -"nextPageToken": { -"description": "The token that specifies the starting position of the next page of results. This field is empty if there are no more results.", -"type": "string" -}, -"questionAnswer": { -"description": "Experimental. Question answer from the query against the document.", -"type": "string" -}, -"totalSize": { -"description": "The total number of matched documents which is available only if the client set SearchDocumentsRequest.require_total_size to `true` or set SearchDocumentsRequest.total_result_size to `ESTIMATED_SIZE` or `ACTUAL_SIZE`. Otherwise, the value will be `-1`. Typically a UI would handle this condition by displaying \"of many\", for example: \"Displaying 10 of many\".", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SearchDocumentsResponseMatchingDocument": { -"description": "Document entry with metadata inside SearchDocumentsResponse", -"id": "GoogleCloudContentwarehouseV1SearchDocumentsResponseMatchingDocument", -"properties": { -"document": { -"$ref": "GoogleCloudContentwarehouseV1Document", -"description": "Document that matches the specified SearchDocumentsRequest. This document only contains indexed metadata information." -}, -"matchedTokenPageIndices": { -"description": "Return the 1-based page indices where those pages have one or more matched tokens.", -"items": { -"format": "int64", -"type": "string" -}, -"type": "array" -}, -"qaResult": { -"$ref": "GoogleCloudContentwarehouseV1QAResult", -"description": "Experimental. Additional result info if the question-answering feature is enabled." -}, -"searchTextSnippet": { -"description": "Contains snippets of text from the document full raw text that most closely match a search query's keywords, if available. All HTML tags in the original fields are stripped when returned in this field, and matching query keywords are enclosed in HTML bold tags. If the question-answering feature is enabled, this field will instead contain a snippet that answers the user's natural-language query. No HTML bold tags will be present, and highlights in the answer snippet can be found in QAResult.highlights.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SetAclRequest": { -"description": "Request message for DocumentService.SetAcl.", -"id": "GoogleCloudContentwarehouseV1SetAclRequest", -"properties": { -"policy": { -"$ref": "GoogleIamV1Policy", -"description": "Required. REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. This refers to an Identity and Access (IAM) policy, which specifies access controls for the Document. You can set ACL with condition for projects only. Supported operators are: `=`, `!=`, `<`, `<=`, `>`, and `>=` where the left of the operator is `DocumentSchemaId` or property name and the right of the operator is a number or a quoted string. You must escape backslash (\\\\) and quote (\\\") characters. Boolean expressions (AND/OR) are supported up to 3 levels of nesting (for example, \"((A AND B AND C) OR D) AND E\"), a maximum of 10 comparisons are allowed in the expression. The expression must be < 6000 bytes in length. Sample condition: `\"DocumentSchemaId = \\\"some schema id\\\" OR SchemaId.floatPropertyName >= 10\"`" -}, -"projectOwner": { -"description": "For Set Project ACL only. Authorization check for end user will be ignored when project_owner=true.", -"type": "boolean" -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SetAclResponse": { -"description": "Response message for DocumentService.SetAcl.", -"id": "GoogleCloudContentwarehouseV1SetAclResponse", -"properties": { -"metadata": { -"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", -"description": "Additional information for the API invocation, such as the request tracking id." -}, -"policy": { -"$ref": "GoogleIamV1Policy", -"description": "The policy will be attached to a resource (e.g. projecct, document)." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SynonymSet": { -"description": "Represents a list of synonyms for a given context. For example a context \"sales\" could contain: Synonym 1: sale, invoice, bill, order Synonym 2: money, credit, finance, payment Synonym 3: shipping, freight, transport Each SynonymSets should be disjoint", -"id": "GoogleCloudContentwarehouseV1SynonymSet", -"properties": { -"context": { -"description": "This is a freeform field. Example contexts can be \"sales,\" \"engineering,\" \"real estate,\" \"accounting,\" etc. The context can be supplied during search requests.", -"type": "string" -}, -"name": { -"description": "The resource name of the SynonymSet This is mandatory for google.api.resource. Format: projects/{project_number}/locations/{location}/synonymSets/{context}.", -"type": "string" -}, -"synonyms": { -"description": "List of Synonyms for the context.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1SynonymSetSynonym" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1SynonymSetSynonym": { -"description": "Represents a list of words given by the customer All these words are synonyms of each other.", -"id": "GoogleCloudContentwarehouseV1SynonymSetSynonym", -"properties": { -"words": { -"description": "For example: sale, invoice, bill, order", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1TextArray": { -"description": "String/text values.", -"id": "GoogleCloudContentwarehouseV1TextArray", -"properties": { -"values": { -"description": "List of text values.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1TextTypeOptions": { -"description": "Configurations for a text property.", -"id": "GoogleCloudContentwarehouseV1TextTypeOptions", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1TimeFilter": { -"description": "Filter on create timestamp or update timestamp of documents.", -"id": "GoogleCloudContentwarehouseV1TimeFilter", -"properties": { -"timeField": { -"description": "Specifies which time field to filter documents on. Defaults to TimeField.UPLOAD_TIME.", -"enum": [ -"TIME_FIELD_UNSPECIFIED", -"CREATE_TIME", -"UPDATE_TIME", -"DISPOSITION_TIME" -], -"enumDescriptions": [ -"Default value.", -"Earliest document create time.", -"Latest document update time.", -"Time when document becomes mutable again." -], -"type": "string" -}, -"timeRange": { -"$ref": "GoogleTypeInterval" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1TimestampArray": { -"description": "Timestamp values.", -"id": "GoogleCloudContentwarehouseV1TimestampArray", -"properties": { -"values": { -"description": "List of timestamp values.", -"items": { -"$ref": "GoogleCloudContentwarehouseV1TimestampValue" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1TimestampTypeOptions": { -"description": "Configurations for a timestamp property.", -"id": "GoogleCloudContentwarehouseV1TimestampTypeOptions", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1TimestampValue": { -"description": "Timestamp value type.", -"id": "GoogleCloudContentwarehouseV1TimestampValue", -"properties": { -"textValue": { -"description": "The string must represent a valid instant in UTC and is parsed using java.time.format.DateTimeFormatter.ISO_INSTANT. e.g. \"2013-09-29T18:46:19Z\"", -"type": "string" -}, -"timestampValue": { -"description": "Timestamp value", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UpdateDocumentMetadata": { -"description": "Metadata object for UpdateDocument request (currently empty).", -"id": "GoogleCloudContentwarehouseV1UpdateDocumentMetadata", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UpdateDocumentRequest": { -"description": "Request message for DocumentService.UpdateDocument.", -"id": "GoogleCloudContentwarehouseV1UpdateDocumentRequest", -"properties": { -"cloudAiDocumentOption": { -"$ref": "GoogleCloudContentwarehouseV1CloudAIDocumentOption", -"description": "Request Option for processing Cloud AI Document in Document Warehouse. This field offers limited support for mapping entities from Cloud AI Document to Warehouse Document. Please consult with product team before using this field and other available options." -}, -"document": { -"$ref": "GoogleCloudContentwarehouseV1Document", -"description": "Required. The document to update." -}, -"requestMetadata": { -"$ref": "GoogleCloudContentwarehouseV1RequestMetadata", -"description": "The meta information collected about the end user, used to enforce access control for the service." -}, -"updateOptions": { -"$ref": "GoogleCloudContentwarehouseV1UpdateOptions", -"description": "Options for the update operation." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UpdateDocumentResponse": { -"description": "Response message for DocumentService.UpdateDocument.", -"id": "GoogleCloudContentwarehouseV1UpdateDocumentResponse", -"properties": { -"document": { -"$ref": "GoogleCloudContentwarehouseV1Document", -"description": "Updated document after executing update request." -}, -"metadata": { -"$ref": "GoogleCloudContentwarehouseV1ResponseMetadata", -"description": "Additional information for the API invocation, such as the request tracking id." -}, -"ruleEngineOutput": { -"$ref": "GoogleCloudContentwarehouseV1RuleEngineOutput", -"description": "Output from Rule Engine recording the rule evaluator and action executor's output. Refer format in: google/cloud/contentwarehouse/v1/rule_engine.proto" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UpdateDocumentSchemaRequest": { -"description": "Request message for DocumentSchemaService.UpdateDocumentSchema.", -"id": "GoogleCloudContentwarehouseV1UpdateDocumentSchemaRequest", -"properties": { -"documentSchema": { -"$ref": "GoogleCloudContentwarehouseV1DocumentSchema", -"description": "Required. The document schema to update with." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UpdateOptions": { -"description": "Options for Update operations.", -"id": "GoogleCloudContentwarehouseV1UpdateOptions", -"properties": { -"mergeFieldsOptions": { -"$ref": "GoogleCloudContentwarehouseV1MergeFieldsOptions", -"description": "Options for merging." -}, -"updateMask": { -"description": "Field mask for merging Document fields. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", -"format": "google-fieldmask", -"type": "string" -}, -"updateType": { -"description": "Type for update.", -"enum": [ -"UPDATE_TYPE_UNSPECIFIED", -"UPDATE_TYPE_REPLACE", -"UPDATE_TYPE_MERGE", -"UPDATE_TYPE_INSERT_PROPERTIES_BY_NAMES", -"UPDATE_TYPE_REPLACE_PROPERTIES_BY_NAMES", -"UPDATE_TYPE_DELETE_PROPERTIES_BY_NAMES", -"UPDATE_TYPE_MERGE_AND_REPLACE_OR_INSERT_PROPERTIES_BY_NAMES" -], -"enumDescriptions": [ -"Defaults to full replace behavior, ie. FULL_REPLACE.", -"Fully replace all the fields (including previously linked raw document). Any field masks will be ignored.", -"Merge the fields into the existing entities.", -"Inserts the properties by names.", -"Replace the properties by names.", -"Delete the properties by names.", -"For each of the property, replaces the property if the it exists, otherwise inserts a new property. And for the rest of the fields, merge them based on update mask and merge fields options." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UpdateRuleSetRequest": { -"description": "Request message for RuleSetService.UpdateRuleSet.", -"id": "GoogleCloudContentwarehouseV1UpdateRuleSetRequest", -"properties": { -"ruleSet": { -"$ref": "GoogleCloudContentwarehouseV1RuleSet", -"description": "Required. The rule set to update." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1UserInfo": { -"description": "The user information.", -"id": "GoogleCloudContentwarehouseV1UserInfo", -"properties": { -"groupIds": { -"description": "The unique group identifications which the user is belong to. The format is \"group:yyyy@example.com\";", -"items": { -"type": "string" -}, -"type": "array" -}, -"id": { -"description": "A unique user identification string, as determined by the client. The maximum number of allowed characters is 255. Allowed characters include numbers 0 to 9, uppercase and lowercase letters, and restricted special symbols (:, @, +, -, _, ~) The format is \"user:xxxx@example.com\";", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1Value": { -"description": "`Value` represents a dynamically typed value which can be either be a float, a integer, a string, or a datetime value. A producer of value is expected to set one of these variants. Absence of any variant indicates an error.", -"id": "GoogleCloudContentwarehouseV1Value", -"properties": { -"booleanValue": { -"description": "Represents a boolean value.", -"type": "boolean" -}, -"datetimeValue": { -"$ref": "GoogleTypeDateTime", -"description": "Represents a datetime value." -}, -"enumValue": { -"$ref": "GoogleCloudContentwarehouseV1EnumValue", -"description": "Represents an enum value." -}, -"floatValue": { -"description": "Represents a float value.", -"format": "float", -"type": "number" -}, -"intValue": { -"description": "Represents a integer value.", -"format": "int32", -"type": "integer" -}, -"stringValue": { -"description": "Represents a string value.", -"type": "string" -}, -"timestampValue": { -"$ref": "GoogleCloudContentwarehouseV1TimestampValue", -"description": "Represents a timestamp value." -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1WeightedSchemaProperty": { -"description": "Specifies the schema property name.", -"id": "GoogleCloudContentwarehouseV1WeightedSchemaProperty", -"properties": { -"documentSchemaName": { -"description": "The document schema name.", -"type": "string" -}, -"propertyNames": { -"description": "The property definition names in the schema.", -"items": { -"type": "string" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1beta1CreateDocumentMetadata": { -"description": "Metadata object for CreateDocument request (currently empty).", -"id": "GoogleCloudContentwarehouseV1beta1CreateDocumentMetadata", -"properties": {}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1beta1InitializeProjectResponse": { -"description": "Response message for projectService.InitializeProject", -"id": "GoogleCloudContentwarehouseV1beta1InitializeProjectResponse", -"properties": { -"message": { -"description": "The message of the project initialization process.", -"type": "string" -}, -"state": { -"description": "The state of the project initialization process.", -"enum": [ -"STATE_UNSPECIFIED", -"SUCCEEDED", -"FAILED", -"CANCELLED", -"RUNNING" -], -"enumDescriptions": [ -"Clients should never see this.", -"Finished project initialization without error.", -"Finished project initialization with an error.", -"Client canceled the LRO.", -"Ask the customer to check the operation for results." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudContentwarehouseV1beta1UpdateDocumentMetadata": { -"description": "Metadata object for UpdateDocument request (currently empty).", -"id": "GoogleCloudContentwarehouseV1beta1UpdateDocumentMetadata", -"properties": {}, -"type": "object" -}, -"GoogleCloudDocumentaiV1Barcode": { -"description": "Encodes the detailed information of a barcode.", -"id": "GoogleCloudDocumentaiV1Barcode", -"properties": { -"format": { -"description": "Format of a barcode. The supported formats are: - `CODE_128`: Code 128 type. - `CODE_39`: Code 39 type. - `CODE_93`: Code 93 type. - `CODABAR`: Codabar type. - `DATA_MATRIX`: 2D Data Matrix type. - `ITF`: ITF type. - `EAN_13`: EAN-13 type. - `EAN_8`: EAN-8 type. - `QR_CODE`: 2D QR code type. - `UPC_A`: UPC-A type. - `UPC_E`: UPC-E type. - `PDF417`: PDF417 type. - `AZTEC`: 2D Aztec code type. - `DATABAR`: GS1 DataBar code type.", -"type": "string" -}, -"rawValue": { -"description": "Raw value encoded in the barcode. For example: `'MEBKM:TITLE:Google;URL:https://www.google.com;;'`.", -"type": "string" -}, -"valueFormat": { -"description": "Value format describes the format of the value that a barcode encodes. The supported formats are: - `CONTACT_INFO`: Contact information. - `EMAIL`: Email address. - `ISBN`: ISBN identifier. - `PHONE`: Phone number. - `PRODUCT`: Product. - `SMS`: SMS message. - `TEXT`: Text string. - `URL`: URL address. - `WIFI`: Wifi information. - `GEO`: Geo-localization. - `CALENDAR_EVENT`: Calendar event. - `DRIVER_LICENSE`: Driver's license.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1BoundingPoly": { -"description": "A bounding polygon for the detected image annotation.", -"id": "GoogleCloudDocumentaiV1BoundingPoly", -"properties": { -"normalizedVertices": { -"description": "The bounding polygon normalized vertices.", -"items": { -"$ref": "GoogleCloudDocumentaiV1NormalizedVertex" -}, -"type": "array" -}, -"vertices": { -"description": "The bounding polygon vertices.", -"items": { -"$ref": "GoogleCloudDocumentaiV1Vertex" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1Document": { -"description": "Document represents the canonical document resource in Document AI. It is an interchange format that provides insights into documents and allows for collaboration between users and Document AI to iterate and optimize for quality.", -"id": "GoogleCloudDocumentaiV1Document", -"properties": { -"content": { -"description": "Optional. Inline document content, represented as a stream of bytes. Note: As with all `bytes` fields, protobuffers use a pure binary representation, whereas JSON representations use base64.", -"format": "byte", -"type": "string" -}, -"entities": { -"description": "A list of entities detected on Document.text. For document shards, entities in this list may cross shard boundaries.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentEntity" -}, -"type": "array" -}, -"entityRelations": { -"description": "Placeholder. Relationship among Document.entities.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentEntityRelation" -}, -"type": "array" -}, -"error": { -"$ref": "GoogleRpcStatus", -"description": "Any error that occurred while processing this document." -}, -"mimeType": { -"description": "An IANA published [media type (MIME type)](https://www.iana.org/assignments/media-types/media-types.xhtml).", -"type": "string" -}, -"pages": { -"description": "Visual page layout for the Document.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPage" -}, -"type": "array" -}, -"revisions": { -"description": "Placeholder. Revision history of this document.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentRevision" -}, -"type": "array" -}, -"shardInfo": { -"$ref": "GoogleCloudDocumentaiV1DocumentShardInfo", -"description": "Information about the sharding if this document is sharded part of a larger document. If the document is not sharded, this message is not specified." -}, -"text": { -"description": "Optional. UTF-8 encoded text in reading order from the document.", -"type": "string" -}, -"textChanges": { -"description": "Placeholder. A list of text corrections made to Document.text. This is usually used for annotating corrections to OCR mistakes. Text changes for a given revision may not overlap with each other.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentTextChange" -}, -"type": "array" -}, -"textStyles": { -"deprecated": true, -"description": "Styles for the Document.text.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentStyle" -}, -"type": "array" -}, -"uri": { -"description": "Optional. Currently supports Google Cloud Storage URI of the form `gs://bucket_name/object_name`. Object versioning is not supported. For more information, refer to [Google Cloud Storage Request URIs](https://cloud.google.com/storage/docs/reference-uris).", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentEntity": { -"description": "An entity that could be a phrase in the text or a property that belongs to the document. It is a known entity type, such as a person, an organization, or location.", -"id": "GoogleCloudDocumentaiV1DocumentEntity", -"properties": { -"confidence": { -"description": "Optional. Confidence of detected Schema entity. Range `[0, 1]`.", -"format": "float", -"type": "number" -}, -"id": { -"description": "Optional. Canonical id. This will be a unique value in the entity list for this document.", -"type": "string" -}, -"mentionId": { -"description": "Optional. Deprecated. Use `id` field instead.", -"type": "string" -}, -"mentionText": { -"description": "Optional. Text value of the entity e.g. `1600 Amphitheatre Pkwy`.", -"type": "string" -}, -"normalizedValue": { -"$ref": "GoogleCloudDocumentaiV1DocumentEntityNormalizedValue", -"description": "Optional. Normalized entity value. Absent if the extracted value could not be converted or the type (e.g. address) is not supported for certain parsers. This field is also only populated for certain supported document types." -}, -"pageAnchor": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageAnchor", -"description": "Optional. Represents the provenance of this entity wrt. the location on the page where it was found." -}, -"properties": { -"description": "Optional. Entities can be nested to form a hierarchical data structure representing the content in the document.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentEntity" -}, -"type": "array" -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"description": "Optional. The history of this annotation." -}, -"redacted": { -"description": "Optional. Whether the entity will be redacted for de-identification purposes.", -"type": "boolean" -}, -"textAnchor": { -"$ref": "GoogleCloudDocumentaiV1DocumentTextAnchor", -"description": "Optional. Provenance of the entity. Text anchor indexing into the Document.text." -}, -"type": { -"description": "Required. Entity type from a schema e.g. `Address`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentEntityNormalizedValue": { -"description": "Parsed and normalized entity value.", -"id": "GoogleCloudDocumentaiV1DocumentEntityNormalizedValue", -"properties": { -"addressValue": { -"$ref": "GoogleTypePostalAddress", -"description": "Postal address. See also: https://github.com/googleapis/googleapis/blob/master/google/type/postal_address.proto" -}, -"booleanValue": { -"description": "Boolean value. Can be used for entities with binary values, or for checkboxes.", -"type": "boolean" -}, -"dateValue": { -"$ref": "GoogleTypeDate", -"description": "Date value. Includes year, month, day. See also: https://github.com/googleapis/googleapis/blob/master/google/type/date.proto" -}, -"datetimeValue": { -"$ref": "GoogleTypeDateTime", -"description": "DateTime value. Includes date, time, and timezone. See also: https://github.com/googleapis/googleapis/blob/master/google/type/datetime.proto" -}, -"floatValue": { -"description": "Float value.", -"format": "float", -"type": "number" -}, -"integerValue": { -"description": "Integer value.", -"format": "int32", -"type": "integer" -}, -"moneyValue": { -"$ref": "GoogleTypeMoney", -"description": "Money value. See also: https://github.com/googleapis/googleapis/blob/master/google/type/money.proto" -}, -"text": { -"description": "Optional. An optional field to store a normalized string. For some entity types, one of respective `structured_value` fields may also be populated. Also not all the types of `structured_value` will be normalized. For example, some processors may not generate `float` or `integer` normalized text by default. Below are sample formats mapped to structured values. - Money/Currency type (`money_value`) is in the ISO 4217 text format. - Date type (`date_value`) is in the ISO 8601 text format. - Datetime type (`datetime_value`) is in the ISO 8601 text format.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentEntityRelation": { -"description": "Relationship between Entities.", -"id": "GoogleCloudDocumentaiV1DocumentEntityRelation", -"properties": { -"objectId": { -"description": "Object entity id.", -"type": "string" -}, -"relation": { -"description": "Relationship description.", -"type": "string" -}, -"subjectId": { -"description": "Subject entity id.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPage": { -"description": "A page in a Document.", -"id": "GoogleCloudDocumentaiV1DocumentPage", -"properties": { -"blocks": { -"description": "A list of visually detected text blocks on the page. A block has a set of lines (collected into paragraphs) that have a common line-spacing and orientation.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageBlock" -}, -"type": "array" -}, -"detectedBarcodes": { -"description": "A list of detected barcodes.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedBarcode" -}, -"type": "array" -}, -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"dimension": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDimension", -"description": "Physical dimension of the page." -}, -"formFields": { -"description": "A list of visually detected form fields on the page.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageFormField" -}, -"type": "array" -}, -"image": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageImage", -"description": "Rendered image for this page. This image is preprocessed to remove any skew, rotation, and distortions such that the annotation bounding boxes can be upright and axis-aligned." -}, -"imageQualityScores": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageImageQualityScores", -"description": "Image quality scores." -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for the page." -}, -"lines": { -"description": "A list of visually detected text lines on the page. A collection of tokens that a human would perceive as a line.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLine" -}, -"type": "array" -}, -"pageNumber": { -"description": "1-based index for current Page in a parent Document. Useful when a page is taken out of a Document for individual processing.", -"format": "int32", -"type": "integer" -}, -"paragraphs": { -"description": "A list of visually detected text paragraphs on the page. A collection of lines that a human would perceive as a paragraph.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageParagraph" -}, -"type": "array" -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"deprecated": true, -"description": "The history of this page." -}, -"symbols": { -"description": "A list of visually detected symbols on the page.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageSymbol" -}, -"type": "array" -}, -"tables": { -"description": "A list of visually detected tables on the page.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageTable" -}, -"type": "array" -}, -"tokens": { -"description": "A list of visually detected tokens on the page.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageToken" -}, -"type": "array" -}, -"transforms": { -"description": "Transformation matrices that were applied to the original document image to produce Page.image.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageMatrix" -}, -"type": "array" -}, -"visualElements": { -"description": "A list of detected non-text visual elements e.g. checkbox, signature etc. on the page.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageVisualElement" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageAnchor": { -"description": "Referencing the visual context of the entity in the Document.pages. Page anchors can be cross-page, consist of multiple bounding polygons and optionally reference specific layout element types.", -"id": "GoogleCloudDocumentaiV1DocumentPageAnchor", -"properties": { -"pageRefs": { -"description": "One or more references to visual page elements", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageAnchorPageRef" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageAnchorPageRef": { -"description": "Represents a weak reference to a page element within a document.", -"id": "GoogleCloudDocumentaiV1DocumentPageAnchorPageRef", -"properties": { -"boundingPoly": { -"$ref": "GoogleCloudDocumentaiV1BoundingPoly", -"description": "Optional. Identifies the bounding polygon of a layout element on the page. If `layout_type` is set, the bounding polygon must be exactly the same to the layout element it's referring to." -}, -"confidence": { -"description": "Optional. Confidence of detected page element, if applicable. Range `[0, 1]`.", -"format": "float", -"type": "number" -}, -"layoutId": { -"deprecated": true, -"description": "Optional. Deprecated. Use PageRef.bounding_poly instead.", -"type": "string" -}, -"layoutType": { -"description": "Optional. The type of the layout element that is being referenced if any.", -"enum": [ -"LAYOUT_TYPE_UNSPECIFIED", -"BLOCK", -"PARAGRAPH", -"LINE", -"TOKEN", -"VISUAL_ELEMENT", -"TABLE", -"FORM_FIELD" -], -"enumDescriptions": [ -"Layout Unspecified.", -"References a Page.blocks element.", -"References a Page.paragraphs element.", -"References a Page.lines element.", -"References a Page.tokens element.", -"References a Page.visual_elements element.", -"Refrrences a Page.tables element.", -"References a Page.form_fields element." -], -"type": "string" -}, -"page": { -"description": "Required. Index into the Document.pages element, for example using `Document.pages` to locate the related page element. This field is skipped when its value is the default `0`. See https://developers.google.com/protocol-buffers/docs/proto3#json.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageBlock": { -"description": "A block has a set of lines (collected into paragraphs) that have a common line-spacing and orientation.", -"id": "GoogleCloudDocumentaiV1DocumentPageBlock", -"properties": { -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for Block." -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"deprecated": true, -"description": "The history of this annotation." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageDetectedBarcode": { -"description": "A detected barcode.", -"id": "GoogleCloudDocumentaiV1DocumentPageDetectedBarcode", -"properties": { -"barcode": { -"$ref": "GoogleCloudDocumentaiV1Barcode", -"description": "Detailed barcode information of the DetectedBarcode." -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for DetectedBarcode." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageDetectedLanguage": { -"description": "Detected language for a structural component.", -"id": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage", -"properties": { -"confidence": { -"description": "Confidence of detected language. Range `[0, 1]`.", -"format": "float", -"type": "number" -}, -"languageCode": { -"description": "The [BCP-47 language code](https://www.unicode.org/reports/tr35/#Unicode_locale_identifier), such as `en-US` or `sr-Latn`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageDimension": { -"description": "Dimension for the page.", -"id": "GoogleCloudDocumentaiV1DocumentPageDimension", -"properties": { -"height": { -"description": "Page height.", -"format": "float", -"type": "number" -}, -"unit": { -"description": "Dimension unit.", -"type": "string" -}, -"width": { -"description": "Page width.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageFormField": { -"description": "A form field detected on the page.", -"id": "GoogleCloudDocumentaiV1DocumentPageFormField", -"properties": { -"correctedKeyText": { -"description": "Created for Labeling UI to export key text. If corrections were made to the text identified by the `field_name.text_anchor`, this field will contain the correction.", -"type": "string" -}, -"correctedValueText": { -"description": "Created for Labeling UI to export value text. If corrections were made to the text identified by the `field_value.text_anchor`, this field will contain the correction.", -"type": "string" -}, -"fieldName": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for the FormField name. e.g. `Address`, `Email`, `Grand total`, `Phone number`, etc." -}, -"fieldValue": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for the FormField value." -}, -"nameDetectedLanguages": { -"description": "A list of detected languages for name together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"description": "The history of this annotation." -}, -"valueDetectedLanguages": { -"description": "A list of detected languages for value together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"valueType": { -"description": "If the value is non-textual, this field represents the type. Current valid values are: - blank (this indicates the `field_value` is normal text) - `unfilled_checkbox` - `filled_checkbox`", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageImage": { -"description": "Rendered image contents for this page.", -"id": "GoogleCloudDocumentaiV1DocumentPageImage", -"properties": { -"content": { -"description": "Raw byte content of the image.", -"format": "byte", -"type": "string" -}, -"height": { -"description": "Height of the image in pixels.", -"format": "int32", -"type": "integer" -}, -"mimeType": { -"description": "Encoding [media type (MIME type)](https://www.iana.org/assignments/media-types/media-types.xhtml) for the image.", -"type": "string" -}, -"width": { -"description": "Width of the image in pixels.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageImageQualityScores": { -"description": "Image quality scores for the page image.", -"id": "GoogleCloudDocumentaiV1DocumentPageImageQualityScores", -"properties": { -"detectedDefects": { -"description": "A list of detected defects.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageImageQualityScoresDetectedDefect" -}, -"type": "array" -}, -"qualityScore": { -"description": "The overall quality score. Range `[0, 1]` where `1` is perfect quality.", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageImageQualityScoresDetectedDefect": { -"description": "Image Quality Defects", -"id": "GoogleCloudDocumentaiV1DocumentPageImageQualityScoresDetectedDefect", -"properties": { -"confidence": { -"description": "Confidence of detected defect. Range `[0, 1]` where `1` indicates strong confidence that the defect exists.", -"format": "float", -"type": "number" -}, -"type": { -"description": "Name of the defect type. Supported values are: - `quality/defect_blurry` - `quality/defect_noisy` - `quality/defect_dark` - `quality/defect_faint` - `quality/defect_text_too_small` - `quality/defect_document_cutoff` - `quality/defect_text_cutoff` - `quality/defect_glare`", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageLayout": { -"description": "Visual element describing a layout unit on a page.", -"id": "GoogleCloudDocumentaiV1DocumentPageLayout", -"properties": { -"boundingPoly": { -"$ref": "GoogleCloudDocumentaiV1BoundingPoly", -"description": "The bounding polygon for the Layout." -}, -"confidence": { -"description": "Confidence of the current Layout within context of the object this layout is for. e.g. confidence can be for a single token, a table, a visual element, etc. depending on context. Range `[0, 1]`.", -"format": "float", -"type": "number" -}, -"orientation": { -"description": "Detected orientation for the Layout.", -"enum": [ -"ORIENTATION_UNSPECIFIED", -"PAGE_UP", -"PAGE_RIGHT", -"PAGE_DOWN", -"PAGE_LEFT" -], -"enumDescriptions": [ -"Unspecified orientation.", -"Orientation is aligned with page up.", -"Orientation is aligned with page right. Turn the head 90 degrees clockwise from upright to read.", -"Orientation is aligned with page down. Turn the head 180 degrees from upright to read.", -"Orientation is aligned with page left. Turn the head 90 degrees counterclockwise from upright to read." -], -"type": "string" -}, -"textAnchor": { -"$ref": "GoogleCloudDocumentaiV1DocumentTextAnchor", -"description": "Text anchor indexing into the Document.text." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageLine": { -"description": "A collection of tokens that a human would perceive as a line. Does not cross column boundaries, can be horizontal, vertical, etc.", -"id": "GoogleCloudDocumentaiV1DocumentPageLine", -"properties": { -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for Line." -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"deprecated": true, -"description": "The history of this annotation." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageMatrix": { -"description": "Representation for transformation matrix, intended to be compatible and used with OpenCV format for image manipulation.", -"id": "GoogleCloudDocumentaiV1DocumentPageMatrix", -"properties": { -"cols": { -"description": "Number of columns in the matrix.", -"format": "int32", -"type": "integer" -}, -"data": { -"description": "The matrix data.", -"format": "byte", -"type": "string" -}, -"rows": { -"description": "Number of rows in the matrix.", -"format": "int32", -"type": "integer" -}, -"type": { -"description": "This encodes information about what data type the matrix uses. For example, 0 (CV_8U) is an unsigned 8-bit image. For the full list of OpenCV primitive data types, please refer to https://docs.opencv.org/4.3.0/d1/d1b/group__core__hal__interface.html", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageParagraph": { -"description": "A collection of lines that a human would perceive as a paragraph.", -"id": "GoogleCloudDocumentaiV1DocumentPageParagraph", -"properties": { -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for Paragraph." -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"deprecated": true, -"description": "The history of this annotation." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageSymbol": { -"description": "A detected symbol.", -"id": "GoogleCloudDocumentaiV1DocumentPageSymbol", -"properties": { -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for Symbol." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageTable": { -"description": "A table representation similar to HTML table structure.", -"id": "GoogleCloudDocumentaiV1DocumentPageTable", -"properties": { -"bodyRows": { -"description": "Body rows of the table.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageTableTableRow" -}, -"type": "array" -}, -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"headerRows": { -"description": "Header rows of the table.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageTableTableRow" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for Table." -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"deprecated": true, -"description": "The history of this table." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageTableTableCell": { -"description": "A cell representation inside the table.", -"id": "GoogleCloudDocumentaiV1DocumentPageTableTableCell", -"properties": { -"colSpan": { -"description": "How many columns this cell spans.", -"format": "int32", -"type": "integer" -}, -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for TableCell." -}, -"rowSpan": { -"description": "How many rows this cell spans.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageTableTableRow": { -"description": "A row of table cells.", -"id": "GoogleCloudDocumentaiV1DocumentPageTableTableRow", -"properties": { -"cells": { -"description": "Cells that make up this row.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageTableTableCell" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageToken": { -"description": "A detected token.", -"id": "GoogleCloudDocumentaiV1DocumentPageToken", -"properties": { -"detectedBreak": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageTokenDetectedBreak", -"description": "Detected break at the end of a Token." -}, -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for Token." -}, -"provenance": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance", -"deprecated": true, -"description": "The history of this annotation." -}, -"styleInfo": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageTokenStyleInfo", -"description": "Text style attributes." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageTokenDetectedBreak": { -"description": "Detected break at the end of a Token.", -"id": "GoogleCloudDocumentaiV1DocumentPageTokenDetectedBreak", -"properties": { -"type": { -"description": "Detected break type.", -"enum": [ -"TYPE_UNSPECIFIED", -"SPACE", -"WIDE_SPACE", -"HYPHEN" -], -"enumDescriptions": [ -"Unspecified break type.", -"A single whitespace.", -"A wider whitespace.", -"A hyphen that indicates that a token has been split across lines." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageTokenStyleInfo": { -"description": "Font and other text style attributes.", -"id": "GoogleCloudDocumentaiV1DocumentPageTokenStyleInfo", -"properties": { -"backgroundColor": { -"$ref": "GoogleTypeColor", -"description": "Color of the background." -}, -"bold": { -"description": "Whether the text is bold (equivalent to font_weight is at least `700`).", -"type": "boolean" -}, -"fontSize": { -"description": "Font size in points (`1` point is `\u00b9\u2044\u2087\u2082` inches).", -"format": "int32", -"type": "integer" -}, -"fontType": { -"description": "Name or style of the font.", -"type": "string" -}, -"fontWeight": { -"description": "TrueType weight on a scale `100` (thin) to `1000` (ultra-heavy). Normal is `400`, bold is `700`.", -"format": "int32", -"type": "integer" -}, -"handwritten": { -"description": "Whether the text is handwritten.", -"type": "boolean" -}, -"italic": { -"description": "Whether the text is italic.", -"type": "boolean" -}, -"letterSpacing": { -"description": "Letter spacing in points.", -"format": "double", -"type": "number" -}, -"pixelFontSize": { -"description": "Font size in pixels, equal to _unrounded font_size_ * _resolution_ \u00f7 `72.0`.", -"format": "double", -"type": "number" -}, -"smallcaps": { -"description": "Whether the text is in small caps. This feature is not supported yet.", -"type": "boolean" -}, -"strikeout": { -"description": "Whether the text is strikethrough. This feature is not supported yet.", -"type": "boolean" -}, -"subscript": { -"description": "Whether the text is a subscript. This feature is not supported yet.", -"type": "boolean" -}, -"superscript": { -"description": "Whether the text is a superscript. This feature is not supported yet.", -"type": "boolean" -}, -"textColor": { -"$ref": "GoogleTypeColor", -"description": "Color of the text." -}, -"underlined": { -"description": "Whether the text is underlined.", -"type": "boolean" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentPageVisualElement": { -"description": "Detected non-text visual elements e.g. checkbox, signature etc. on the page.", -"id": "GoogleCloudDocumentaiV1DocumentPageVisualElement", -"properties": { -"detectedLanguages": { -"description": "A list of detected languages together with confidence.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageDetectedLanguage" -}, -"type": "array" -}, -"layout": { -"$ref": "GoogleCloudDocumentaiV1DocumentPageLayout", -"description": "Layout for VisualElement." -}, -"type": { -"description": "Type of the VisualElement.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentProvenance": { -"description": "Structure to identify provenance relationships between annotations in different revisions.", -"id": "GoogleCloudDocumentaiV1DocumentProvenance", -"properties": { -"id": { -"deprecated": true, -"description": "The Id of this operation. Needs to be unique within the scope of the revision.", -"format": "int32", -"type": "integer" -}, -"parents": { -"description": "References to the original elements that are replaced.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenanceParent" -}, -"type": "array" -}, -"revision": { -"deprecated": true, -"description": "The index of the revision that produced this element.", -"format": "int32", -"type": "integer" -}, -"type": { -"description": "The type of provenance operation.", -"enum": [ -"OPERATION_TYPE_UNSPECIFIED", -"ADD", -"REMOVE", -"UPDATE", -"REPLACE", -"EVAL_REQUESTED", -"EVAL_APPROVED", -"EVAL_SKIPPED" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -true, -true, -true -], -"enumDescriptions": [ -"Operation type unspecified. If no operation is specified a provenance entry is simply used to match against a `parent`.", -"Add an element.", -"Remove an element identified by `parent`.", -"Updates any fields within the given provenance scope of the message. It overwrites the fields rather than replacing them. Use this when you want to update a field value of an entity without also updating all the child properties.", -"Currently unused. Replace an element identified by `parent`.", -"Deprecated. Request human review for the element identified by `parent`.", -"Deprecated. Element is reviewed and approved at human review, confidence will be set to 1.0.", -"Deprecated. Element is skipped in the validation process." -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentProvenanceParent": { -"description": "The parent element the current element is based on. Used for referencing/aligning, removal and replacement operations.", -"id": "GoogleCloudDocumentaiV1DocumentProvenanceParent", -"properties": { -"id": { -"deprecated": true, -"description": "The id of the parent provenance.", -"format": "int32", -"type": "integer" -}, -"index": { -"description": "The index of the parent item in the corresponding item list (eg. list of entities, properties within entities, etc.) in the parent revision.", -"format": "int32", -"type": "integer" -}, -"revision": { -"description": "The index of the index into current revision's parent_ids list.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentRevision": { -"description": "Contains past or forward revisions of this document.", -"id": "GoogleCloudDocumentaiV1DocumentRevision", -"properties": { -"agent": { -"description": "If the change was made by a person specify the name or id of that person.", -"type": "string" -}, -"createTime": { -"description": "The time that the revision was created, internally generated by doc proto storage at the time of create.", -"format": "google-datetime", -"type": "string" -}, -"humanReview": { -"$ref": "GoogleCloudDocumentaiV1DocumentRevisionHumanReview", -"description": "Human Review information of this revision." -}, -"id": { -"description": "Id of the revision, internally generated by doc proto storage. Unique within the context of the document.", -"type": "string" -}, -"parent": { -"deprecated": true, -"description": "The revisions that this revision is based on. This can include one or more parent (when documents are merged.) This field represents the index into the `revisions` field.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"parentIds": { -"description": "The revisions that this revision is based on. Must include all the ids that have anything to do with this revision - eg. there are `provenance.parent.revision` fields that index into this field.", -"items": { -"type": "string" -}, -"type": "array" -}, -"processor": { -"description": "If the annotation was made by processor identify the processor by its resource name.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentRevisionHumanReview": { -"description": "Human Review information of the document.", -"id": "GoogleCloudDocumentaiV1DocumentRevisionHumanReview", -"properties": { -"state": { -"description": "Human review state. e.g. `requested`, `succeeded`, `rejected`.", -"type": "string" -}, -"stateMessage": { -"description": "A message providing more details about the current state of processing. For example, the rejection reason when the state is `rejected`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentShardInfo": { -"description": "For a large document, sharding may be performed to produce several document shards. Each document shard contains this field to detail which shard it is.", -"id": "GoogleCloudDocumentaiV1DocumentShardInfo", -"properties": { -"shardCount": { -"description": "Total number of shards.", -"format": "int64", -"type": "string" -}, -"shardIndex": { -"description": "The 0-based index of this shard.", -"format": "int64", -"type": "string" -}, -"textOffset": { -"description": "The index of the first character in Document.text in the overall document global text.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentStyle": { -"description": "Annotation for common text style attributes. This adheres to CSS conventions as much as possible.", -"id": "GoogleCloudDocumentaiV1DocumentStyle", -"properties": { -"backgroundColor": { -"$ref": "GoogleTypeColor", -"description": "Text background color." -}, -"color": { -"$ref": "GoogleTypeColor", -"description": "Text color." -}, -"fontFamily": { -"description": "Font family such as `Arial`, `Times New Roman`. https://www.w3schools.com/cssref/pr_font_font-family.asp", -"type": "string" -}, -"fontSize": { -"$ref": "GoogleCloudDocumentaiV1DocumentStyleFontSize", -"description": "Font size." -}, -"fontWeight": { -"description": "[Font weight](https://www.w3schools.com/cssref/pr_font_weight.asp). Possible values are `normal`, `bold`, `bolder`, and `lighter`.", -"type": "string" -}, -"textAnchor": { -"$ref": "GoogleCloudDocumentaiV1DocumentTextAnchor", -"description": "Text anchor indexing into the Document.text." -}, -"textDecoration": { -"description": "[Text decoration](https://www.w3schools.com/cssref/pr_text_text-decoration.asp). Follows CSS standard. ", -"type": "string" -}, -"textStyle": { -"description": "[Text style](https://www.w3schools.com/cssref/pr_font_font-style.asp). Possible values are `normal`, `italic`, and `oblique`.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentStyleFontSize": { -"description": "Font size with unit.", -"id": "GoogleCloudDocumentaiV1DocumentStyleFontSize", -"properties": { -"size": { -"description": "Font size for the text.", -"format": "float", -"type": "number" -}, -"unit": { -"description": "Unit for the font size. Follows CSS naming (such as `in`, `px`, and `pt`).", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentTextAnchor": { -"description": "Text reference indexing into the Document.text.", -"id": "GoogleCloudDocumentaiV1DocumentTextAnchor", -"properties": { -"content": { -"description": "Contains the content of the text span so that users do not have to look it up in the text_segments. It is always populated for formFields.", -"type": "string" -}, -"textSegments": { -"description": "The text segments from the Document.text.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentTextAnchorTextSegment" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentTextAnchorTextSegment": { -"description": "A text segment in the Document.text. The indices may be out of bounds which indicate that the text extends into another document shard for large sharded documents. See ShardInfo.text_offset", -"id": "GoogleCloudDocumentaiV1DocumentTextAnchorTextSegment", -"properties": { -"endIndex": { -"description": "TextSegment half open end UTF-8 char index in the Document.text.", -"format": "int64", -"type": "string" -}, -"startIndex": { -"description": "TextSegment start UTF-8 char index in the Document.text.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1DocumentTextChange": { -"description": "This message is used for text changes aka. OCR corrections.", -"id": "GoogleCloudDocumentaiV1DocumentTextChange", -"properties": { -"changedText": { -"description": "The text that replaces the text identified in the `text_anchor`.", -"type": "string" -}, -"provenance": { -"deprecated": true, -"description": "The history of this annotation.", -"items": { -"$ref": "GoogleCloudDocumentaiV1DocumentProvenance" -}, -"type": "array" -}, -"textAnchor": { -"$ref": "GoogleCloudDocumentaiV1DocumentTextAnchor", -"description": "Provenance of the correction. Text anchor indexing into the Document.text. There can only be a single `TextAnchor.text_segments` element. If the start and end index of the text segment are the same, the text change is inserted before that index." -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1NormalizedVertex": { -"description": "A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1.", -"id": "GoogleCloudDocumentaiV1NormalizedVertex", -"properties": { -"x": { -"description": "X coordinate.", -"format": "float", -"type": "number" -}, -"y": { -"description": "Y coordinate (starts from the top of the image).", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoogleCloudDocumentaiV1Vertex": { -"description": "A vertex represents a 2D point in the image. NOTE: the vertex coordinates are in the same scale as the original image.", -"id": "GoogleCloudDocumentaiV1Vertex", -"properties": { -"x": { -"description": "X coordinate.", -"format": "int32", -"type": "integer" -}, -"y": { -"description": "Y coordinate (starts from the top of the image).", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleIamV1AuditConfig": { -"description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts `jose@example.com` from DATA_READ logging, and `aliya@example.com` from DATA_WRITE logging.", -"id": "GoogleIamV1AuditConfig", -"properties": { -"auditLogConfigs": { -"description": "The configuration for logging of each type of permission.", -"items": { -"$ref": "GoogleIamV1AuditLogConfig" -}, -"type": "array" -}, -"service": { -"description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleIamV1AuditLogConfig": { -"description": "Provides the configuration for logging a type of permissions. Example: { \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", -"id": "GoogleIamV1AuditLogConfig", -"properties": { -"exemptedMembers": { -"description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.", -"items": { -"type": "string" -}, -"type": "array" -}, -"logType": { -"description": "The log type that this config enables.", -"enum": [ -"LOG_TYPE_UNSPECIFIED", -"ADMIN_READ", -"DATA_WRITE", -"DATA_READ" -], -"enumDescriptions": [ -"Default case. Should never be this.", -"Admin reads. Example: CloudIAM getIamPolicy", -"Data writes. Example: CloudSQL Users create", -"Data reads. Example: CloudSQL Users list" -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleIamV1Binding": { -"description": "Associates `members`, or principals, with a `role`.", -"id": "GoogleIamV1Binding", -"properties": { -"condition": { -"$ref": "GoogleTypeExpr", -"description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." -}, -"members": { -"description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.", -"items": { -"type": "string" -}, -"type": "array" -}, -"role": { -"description": "Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles).", -"type": "string" -} -}, -"type": "object" -}, -"GoogleIamV1Policy": { -"description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", -"id": "GoogleIamV1Policy", -"properties": { -"auditConfigs": { -"description": "Specifies cloud audit logging configuration for this policy.", -"items": { -"$ref": "GoogleIamV1AuditConfig" -}, -"type": "array" -}, -"bindings": { -"description": "Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:alice@example.com`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.", -"items": { -"$ref": "GoogleIamV1Binding" -}, -"type": "array" -}, -"etag": { -"description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.", -"format": "byte", -"type": "string" -}, -"version": { -"description": "Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaAvailabilityPeriod": { -"description": "Represents the information about user's working hours during one day. Note that a period on Monday from 18:00 - 00:00 is represented as a triplet (1, 1080, 1440).", -"id": "GoogleInternalAppsWaldoV1alphaAvailabilityPeriod", -"properties": { -"dayOfWeek": { -"description": "Day of week, 0 for Sunday, 1 for Monday, ...", -"format": "int32", -"type": "integer" -}, -"periodEndMinutes": { -"description": "Period end, in minutes from the start of the day, exclusive.", -"format": "int32", -"type": "integer" -}, -"periodStartMinutes": { -"description": "Period start, in minutes from the start of the day, inclusive.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaCalendarBusy": { -"description": "The status indicating the user is temporarily busy and there is not a more specific status derived from calendar that applies (e.g., InMeeting or DoNotDisturb).", -"id": "GoogleInternalAppsWaldoV1alphaCalendarBusy", -"properties": { -"committedUntil": { -"description": "The time when the user will either stop being committed or change commitment type (i.e. InMeeting, DoNotDisturb, Busy or OutOfOffice < Xh). Note that the goal of this field is to provide information to help users decide how to communicate with a user (see also http://shortn/_wXYXtZScgh).", -"format": "google-datetime", -"type": "string" -}, -"eventSummary": { -"description": "The summary of the corresponding event in Calendar.", -"type": "string" -}, -"nextAvailable": { -"description": "The next time when the user will be available, i.e., when their status will be neither InMeeting, CalendarBusy, DoNotDisturb, OutsideWorkingHours, nor OutOfOffice.", -"format": "google-datetime", -"type": "string" -}, -"occupiedUntil": { -"description": "The time when the user will stop being occupied, i.e., when their status will be neither inMeeting, Busy nor DoNotDisturb.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaCustomLocation": { -"description": "Custom location specified by the user.", -"id": "GoogleInternalAppsWaldoV1alphaCustomLocation", -"properties": { -"geoCoordinates": { -"$ref": "GoogleTypeLatLng", -"description": "Geographic location as geo coordinates." -}, -"label": { -"description": "The custom location label as a string entered manually by the user.", -"type": "string" -}, -"location": { -"description": "Geographic location as free-form text.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaDoNotDisturb": { -"description": "The status indicating the user should not be disturbed.", -"id": "GoogleInternalAppsWaldoV1alphaDoNotDisturb", -"properties": { -"committedUntil": { -"description": "The time when the user will either stop being committed or change commitment type (i.e. InMeeting, DoNotDisturb, Busy or OutOfOffice < Xh). Note that the goal of this field is to provide information to help users decide how to communicate with a user (see also http://shortn/_wXYXtZScgh).", -"format": "google-datetime", -"type": "string" -}, -"nextAvailable": { -"description": "The next time when the user will be available, i.e., when their status will be neither InMeeting, CalendarBusy, DoNotDisturb, OutsideWorkingHours, nor OutOfOffice.", -"format": "google-datetime", -"type": "string" -}, -"occupiedUntil": { -"description": "The time when the user will stop being occupied, i.e., when their status will be neither inMeeting, Busy nor DoNotDisturb.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaHomeLocation": { -"description": "Home location.", -"id": "GoogleInternalAppsWaldoV1alphaHomeLocation", -"properties": {}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaInMeeting": { -"description": "The status indicating the user is in a meeting.", -"id": "GoogleInternalAppsWaldoV1alphaInMeeting", -"properties": { -"committedUntil": { -"description": "The time when the user will either stop being committed or change commitment type (i.e. InMeeting, DoNotDisturb, Busy or OutOfOffice < Xh). Note that the goal of this field is to provide information to help users decide how to communicate with a user (see also http://shortn/_wXYXtZScgh).", -"format": "google-datetime", -"type": "string" -}, -"eventSummary": { -"description": "The summary of the corresponding event in Calendar.", -"type": "string" -}, -"inMeetingsUntil": { -"description": "The time when the user will stop being in a meeting.", -"format": "google-datetime", -"type": "string" -}, -"nextAvailable": { -"description": "The next time when the user will be available, i.e., when their status will be neither InMeeting, CalendarBusy, DoNotDisturb, OutsideWorkingHours, nor OutOfOffice.", -"format": "google-datetime", -"type": "string" -}, -"occupiedUntil": { -"description": "The time when the user will stop being occupied, i.e., when their status will be neither InMeeting, Busy nor DoNotDisturb.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaInactive": { -"description": "The status indicating that no other status applies.", -"id": "GoogleInternalAppsWaldoV1alphaInactive", -"properties": {}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaLocalTimeContext": { -"description": "Context which helps to determine the user's local time.", -"id": "GoogleInternalAppsWaldoV1alphaLocalTimeContext", -"properties": { -"timeZone": { -"description": "The current time zone of the user. Represented as a valid time zone ID from Olson database, like \"Europe/Zurich\" (see http://google3/i18n/identifiers/data/timezones.txt).", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaOfficeLocation": { -"description": "Office location.", -"id": "GoogleInternalAppsWaldoV1alphaOfficeLocation", -"properties": { -"experimentalBuildingId": { -"description": "Experimental. Can change or disappear without warning or notice. References a building from http://google3/ccc/hosted/api/rosy/resources/calendar/building.proto For example \"US-NYC-9TH\".", -"type": "string" -}, -"experimentalDeskId": { -"description": "Experimental. Can change or disappear without warning or notice. The desk id. For example \"11E358K\".", -"type": "string" -}, -"experimentalFloorId": { -"description": "Experimental. Can change or disappear without warning or notice. The floor id. For example \"11\".", -"type": "string" -}, -"label": { -"description": "The display label of this office location. For example a building name.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaOutOfOffice": { -"description": "The status indicating the user is out of office.", -"id": "GoogleInternalAppsWaldoV1alphaOutOfOffice", -"properties": { -"comeBackTime": { -"description": "The closest time when the user will be available after this OOO block. This might be different from the end of the OOO block in Calendar, since the OOO block might end on Friday evening, and then the user is outside working hours.", -"format": "google-datetime", -"type": "string" -}, -"committedUntil": { -"description": "The time when the user will either stop being committed or change commitment type (i.e. InMeeting, DoNotDisturb, Busy or OutOfOffice < Xh). Note that the goal of this field is to provide information to help users decide how to communicate with a user (see also http://shortn/_wXYXtZScgh). Note that if this OOO block is large (>=Xh), committed_until is not set.", -"format": "google-datetime", -"type": "string" -}, -"eventSummary": { -"description": "The summary of the corresponding OOO block in Calendar. This is entered by the user, so we return it \"as is\" - no i18n.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaOutsideWorkingHours": { -"description": "The status indicating the user doesn't work at this time.", -"id": "GoogleInternalAppsWaldoV1alphaOutsideWorkingHours", -"properties": { -"comeBackTime": { -"description": "The closest time when the user will be available after this block. This might be different from the start of the working hours in Calendar, because the given OutsideWorkingHours interval might be followed by OOO.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaTimeRange": { -"description": "A time range, which includes the start and excludes the end.", -"id": "GoogleInternalAppsWaldoV1alphaTimeRange", -"properties": { -"endTime": { -"description": "End point of the range, exclusive.", -"format": "google-datetime", -"type": "string" -}, -"startTime": { -"description": "Starting point of the range, inclusive.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUpcomingCommitmentContext": { -"id": "GoogleInternalAppsWaldoV1alphaUpcomingCommitmentContext", -"properties": { -"nextCommitmentStatus": { -"$ref": "GoogleInternalAppsWaldoV1alphaUserStatus", -"description": "The status of the commitment above." -}, -"nextCommitmentTime": { -"description": "The most relevant upcoming commitment (InMeeting, DoNotDisturb, CalendarBusy or OutOfOffice). This context is set only if there is an upcoming commitment to show, and only on non commitments. Priority is given to the next closest commitment if its start is close enough to this event, otherwise the next large OOO if there is one.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUpcomingOooContext": { -"description": "The context indicating the user's upcoming Out of Office event.", -"id": "GoogleInternalAppsWaldoV1alphaUpcomingOooContext", -"properties": { -"timeRange": { -"$ref": "GoogleInternalAppsWaldoV1alphaTimeRange", -"description": "The future period of absence. The start of this timerange is the start of the future Out of Office event. The end of this timerange represents the come back time of the user from that future OOO event. Note that the come back time might be different (greater) than the end of the corresponding future OOO event due to other non-working user status intervals that it may be followed by." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUserAvailabilities": { -"description": "The current and future availabilities of a user. The response contains a timeline, which starts before or at the request time, and the timeline is split into a set of disjoint intervals (without gaps), where the first range always contains the request time. Each range represents what should be displayed in the UI during this time range. The time range might be different from the actual time range of the underlying status. For example, if the user is OOO from 09:00 to 10:00, and a request is made at 8:00, the response might contain two intervals: [08:00, 09:00) - \"User is INACTIVE, but leaving the office soon\" [09:00, 10:00) - \"User is OOO till 10:00\" For intervals that don't have a clear availability signal coming from Calendar (e.g. OOO), we return INACTIVE. For more details, please see https://docs.google.com/presentation/d/1ADCTxGawjF9UqMnfuVrVNxGvdyjeiV8h4D7p0a9zYgw/edit#slide=id.g3e2824ac6c_12_94 The service returns availabilities for some short period of time - likely one day, but the client should stick to the \"next_poll_time\" to decide when to query the server again at the latest. Below there is an example response from the server. Let's assume the client calls the service at 17:59:45. The client receives the message and, assuming its current time is between [17:59:45, 18:00:00), it displays \"inactive\". When the current time becomes 18:00:00 it displays \"outside working hours\". At 18:00:40 the client issues another rpc which will return the availabilities for the next minute. The original response looks like availabilities { time_range { start_time: 17:59:45 end_time: 18:00:00 } status { inactive {} } } availabilities { time_range { start_time: 18:00:00 end_time: 18:00:45 } status { outside_working_hours { } } } next_poll_time: 18:00:40", -"id": "GoogleInternalAppsWaldoV1alphaUserAvailabilities", -"properties": { -"availabilities": { -"description": "A list of user availabilities having contiguous time ranges which are ordered chronologically. The first one starts at the time of the request or before, and is guaranteed to contain the request time. That means the first element always indicates the current status of a user. A client that wants to display a user's availability in real time should display the availability whose time range contains the current time.", -"items": { -"$ref": "GoogleInternalAppsWaldoV1alphaUserAvailability" -}, -"type": "array" -}, -"nextPollTime": { -"description": "The time at which the client should issue the next availability query for this user. This field should only be used to control the polling frequency. This time is always before the end of the time range of the last availability so that the client always knows the current availability.", -"format": "google-datetime", -"type": "string" -}, -"workingHours": { -"$ref": "GoogleInternalAppsWaldoV1alphaWorkingHours", -"description": "Information about the user's working hours. This will only be set in case working hours are enabled in their calendar settings." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUserAvailability": { -"description": "A single availability range. The displayed status should be the same during the entire time range.", -"id": "GoogleInternalAppsWaldoV1alphaUserAvailability", -"properties": { -"contexts": { -"$ref": "GoogleInternalAppsWaldoV1alphaUserContext", -"description": "The contexts contain additional information about the current user's availability or its upcoming changes. The client doesn't need to extract certain bits to visualize the status or apply custom logic based on the content of this field: the status field should contain everything needed for the correct visualization." -}, -"status": { -"$ref": "GoogleInternalAppsWaldoV1alphaUserStatus", -"description": "The user status during the time range." -}, -"timeRange": { -"$ref": "GoogleInternalAppsWaldoV1alphaTimeRange", -"description": "The time range when this availability should be displayed." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUserContext": { -"description": "Additional context about the user's current and/or future availability to give a better understanding of the status (\"Working from Zurich\").", -"id": "GoogleInternalAppsWaldoV1alphaUserContext", -"properties": { -"localTime": { -"$ref": "GoogleInternalAppsWaldoV1alphaLocalTimeContext", -"description": "Helps to determine the user's local time by providing their current time zone." -}, -"upcomingCommitmentContext": { -"$ref": "GoogleInternalAppsWaldoV1alphaUpcomingCommitmentContext", -"description": "Information about upcoming events." -}, -"upcomingOoo": { -"$ref": "GoogleInternalAppsWaldoV1alphaUpcomingOooContext", -"description": "Set if user has upcoming OOO." -}, -"workingElsewhere": { -"$ref": "GoogleInternalAppsWaldoV1alphaWorkingElsewhereContext", -"description": "Set if the user has a working location. Not just elsewhere (legacy name)." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUserLocation": { -"description": "Location of the user, which might be \"home\", for example, or an office building, as well as a custom location specified by the user.", -"id": "GoogleInternalAppsWaldoV1alphaUserLocation", -"properties": { -"customLocation": { -"$ref": "GoogleInternalAppsWaldoV1alphaCustomLocation", -"description": "Indicates the user is working from some other location." -}, -"homeLocation": { -"$ref": "GoogleInternalAppsWaldoV1alphaHomeLocation", -"description": "Indicates the user is working from home." -}, -"officeLocation": { -"$ref": "GoogleInternalAppsWaldoV1alphaOfficeLocation", -"description": "Indicates the user is working from the office." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaUserStatus": { -"description": "The actual status of the user. The message contains everything needed for visualisation of this status.", -"id": "GoogleInternalAppsWaldoV1alphaUserStatus", -"properties": { -"calendarBusy": { -"$ref": "GoogleInternalAppsWaldoV1alphaCalendarBusy", -"description": "Set if the user is temporarily busy and there is not a more specific status derived from calendar that applies (e.g., InMeeting or DoNotDisturb)." -}, -"doNotDisturb": { -"$ref": "GoogleInternalAppsWaldoV1alphaDoNotDisturb", -"description": "Set if the user is in a Focus Time block. Note that this is different than Chat's Do not disturb status, but they may coincide, see: go/focus-time-dnd." -}, -"inMeeting": { -"$ref": "GoogleInternalAppsWaldoV1alphaInMeeting", -"description": "Set if the user is in a meeting." -}, -"inactive": { -"$ref": "GoogleInternalAppsWaldoV1alphaInactive", -"description": "Set if no other statuses apply." -}, -"outOfOffice": { -"$ref": "GoogleInternalAppsWaldoV1alphaOutOfOffice", -"description": "Set if the user is out of office." -}, -"outsideWorkingHours": { -"$ref": "GoogleInternalAppsWaldoV1alphaOutsideWorkingHours", -"description": "Set if the user doesn't work at this time." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaWorkingElsewhereContext": { -"description": "The context providing the User Location (not just Elsewhere). This is a legacy name from when it was only set for users working remotely, now it is also set when the user is working from the office.", -"id": "GoogleInternalAppsWaldoV1alphaWorkingElsewhereContext", -"properties": { -"location": { -"$ref": "GoogleInternalAppsWaldoV1alphaUserLocation", -"description": "The new location of the user. Might represent home, office, or a custom address on the map." -} -}, -"type": "object" -}, -"GoogleInternalAppsWaldoV1alphaWorkingHours": { -"description": "Information about the user's working hours.", -"id": "GoogleInternalAppsWaldoV1alphaWorkingHours", -"properties": { -"availableTime": { -"description": "A list of availability periods representing the user's working hours as configured in calendar.", -"items": { -"$ref": "GoogleInternalAppsWaldoV1alphaAvailabilityPeriod" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleInternalCommunicationsInstantmessagingV1Id": { -"description": "Id for message recipients, e.g. users, groups etc.", -"id": "GoogleInternalCommunicationsInstantmessagingV1Id", -"properties": { -"app": { -"description": "app is the tachyon client application that generated or is to receive a message.", -"type": "string" -}, -"countryCode": { -"deprecated": true, -"description": "country_code is the E164_COUNTRY_CODE format country code for this id, used as a hint for its region. E.g, \"+1\" will be used for North America, \"+86\" will be used for China, etc. Should be filled only for RCS group id.", -"type": "string" -}, -"id": { -"description": "id is a unique (for this type and app) identifier of a message source or recipient.", -"type": "string" -}, -"locationHint": { -"$ref": "GoogleInternalCommunicationsInstantmessagingV1LocationHint", -"description": "location_hint is used as a hint for the user's region." -}, -"routingInfoToken": { -"description": "Raw byte array containing encoded routing information. Clients of Tachyon are expected to include the most recent routing_info_cookie that they have received from the server in the requests that they make. Its format is purposely opaque so that clients do not need to concern themselves with the content of this field. This field is expected to be used primarily by Tachygram clients for go/tachygram-groups to simplify the API contract for group operations while reducing the need for unnecessary lookups.", -"format": "byte", -"type": "string" -}, -"type": { -"description": "type defines what the id field contains, e.g. phone number, Fi-number, Gaia ID etc.", -"enum": [ -"UNSET", -"PHONE_NUMBER", -"GROUP_ID", -"FIREBALL_BOT", -"CALL_CONTROLLER", -"SUGGESTER", -"FI_ID", -"SYSTEM", -"DUO_BOT", -"MATCHBOX_ID", -"RCS_BOT", -"WIREBALL", -"SERVICE_ACCOUNT", -"DEVICE_ID", -"FOREIGN_RCS_GROUP", -"DITTO", -"EMAIL", -"GAIA_ID", -"LIGHTER_ID", -"OPAQUE_ID", -"SERVER", -"SHORT_CODE", -"CLOUDCAST_PLAYER_ID", -"CHROMOTING_ID", -"UNNORMALIZABLE_PHONE_NUMBER", -"NOT_KNOWN", -"ANDROID_ID", -"NEARBY_ID", -"WAZE_ID", -"GUEST", -"MESSAGES_DATA_DONATION", -"DUO_CLIP_ID", -"ACCOUNT_ID", -"CARRIER_ID", -"EXTERNAL_PARTNER_ID", -"UNAUTHENTICATED_USER_ID", -"SUPPORT_CASES_ID", -"FITBIT_P11_ID", -"SHORT_PHONE_NUMBER", -"USER_HANDLE", -"PENPAL_CONVERSATION_ID" -], -"enumDescriptions": [ -"", -"E.164 format.", -"", -"Handled by Extractor", -"Handled by Call Controller Message Receiver.", -"", -"IDs of this type will be verified using a Fi service.", -"Used for any system-generated, internal and external", -"messages. ex: Duo's invite reward. Bots for Duo", -"Used by Matchbox for 1P/3P App Preview Messaging.", -"RCS BOT for sending message to Platinum", -"Web Client for Fireball", -"Service account, currently only used for Duo API.", -"Device ID used by Matchstick anonymous registrations.", -"Rcs group hosted by another RCS cloud.", -"Android messages desktop: go/android-messages-desktop.", -"Email address.", -"Only used by servers.", -"Used by Lighter platform: go/lighter-backend-api.", -"Used when client want's to obfuscate the id of the", -"entity. Not usable by server for routing. Used for server delivery. go/tachyon_server_delivery", -"Phone short code.", -"Used by cloudcast (yeti). 64 bit integer.", -"go/yeti-player-ids Chrome Remote Desktop (go/chromoting).", -"Unnormalizable phone number", -"client does not know the id type.", -"android id", -"Used by Nearby devices. go/tachyon-express-nearby.", -"Used by Waze. go/waze-tachyon.", -"Used by Duo currently.", -"Used by Android Messages for data donation.", -"Used as the Id Type of Clips From Duo clips. See http://go/clips-from-duo", -"used by Takeout/LIS: see go/tachyon-legal-takeout.", -"used by RCS NNI Notification", -"used by RCS NNI Notification", -"used by GMM for anonymous registration", -"used by Support Cases for Realtime Media WebRtc", -"signaling. go/cases-media-channel-dd. used by Fitbit Project 11 for messaging.", -"Short phone number: go/shortnumbers", -"Used to reference internal and users in other domains,", -"including for DMA interop Used for 1:1 conversations in Gemini Mode" -], -"type": "string" -} -}, -"type": "object" -}, -"GoogleInternalCommunicationsInstantmessagingV1LocationHint": { -"description": "LocationHint is used to specify a location as well as format.", -"id": "GoogleInternalCommunicationsInstantmessagingV1LocationHint", -"properties": { -"format": { -"description": "the format of location.", -"enum": [ -"UNKNOWN", -"E164_CALLING", -"ISO_3166_1_ALPHA_2" -], -"enumDescriptions": [ -"", -"E164 country codes: https://en.wikipedia.org/wiki/List_of_country_calling_codes e.g. +1 for USA", -"ISO 3166-1 alpha-2 country codes: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2" -], -"type": "string" -}, -"location": { -"description": "Location is the location, provided in the format specified by format.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleLongrunningOperation": { -"description": "This resource represents a long-running operation that is the result of a network API call.", -"id": "GoogleLongrunningOperation", -"properties": { -"done": { -"description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.", -"type": "boolean" -}, -"error": { -"$ref": "GoogleRpcStatus", -"description": "The error result of the operation in case of failure or cancellation." -}, -"metadata": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", -"type": "object" -}, -"name": { -"description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.", -"type": "string" -}, -"response": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", -"type": "object" -} -}, -"type": "object" -}, -"GoogleProtobufEmpty": { -"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", -"id": "GoogleProtobufEmpty", -"properties": {}, -"type": "object" -}, -"GoogleRpcStatus": { -"description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", -"id": "GoogleRpcStatus", -"properties": { -"code": { -"description": "The status code, which should be an enum value of google.rpc.Code.", -"format": "int32", -"type": "integer" -}, -"details": { -"description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", -"items": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" -}, -"type": "object" -}, -"type": "array" -}, -"message": { -"description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleTypeColor": { -"description": "Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value\u2014for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result autorelease]; return result; } // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); } resultBuilder.push(hexString); return resultBuilder.join(''); }; // ...", -"id": "GoogleTypeColor", -"properties": { -"alpha": { -"description": "The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0).", -"format": "float", -"type": "number" -}, -"blue": { -"description": "The amount of blue in the color as a value in the interval [0, 1].", -"format": "float", -"type": "number" -}, -"green": { -"description": "The amount of green in the color as a value in the interval [0, 1].", -"format": "float", -"type": "number" -}, -"red": { -"description": "The amount of red in the color as a value in the interval [0, 1].", -"format": "float", -"type": "number" -} -}, -"type": "object" -}, -"GoogleTypeDate": { -"description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp", -"id": "GoogleTypeDate", -"properties": { -"day": { -"description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.", -"format": "int32", -"type": "integer" -}, -"month": { -"description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.", -"format": "int32", -"type": "integer" -}, -"year": { -"description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleTypeDateTime": { -"description": "Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations.", -"id": "GoogleTypeDateTime", -"properties": { -"day": { -"description": "Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day.", -"format": "int32", -"type": "integer" -}, -"hours": { -"description": "Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", -"format": "int32", -"type": "integer" -}, -"minutes": { -"description": "Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0.", -"format": "int32", -"type": "integer" -}, -"month": { -"description": "Optional. Month of year. Must be from 1 to 12, or 0 if specifying a datetime without a month.", -"format": "int32", -"type": "integer" -}, -"nanos": { -"description": "Optional. Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999, defaults to 0.", -"format": "int32", -"type": "integer" -}, -"seconds": { -"description": "Optional. Seconds of minutes of the time. Must normally be from 0 to 59, defaults to 0. An API may allow the value 60 if it allows leap-seconds.", -"format": "int32", -"type": "integer" -}, -"timeZone": { -"$ref": "GoogleTypeTimeZone", -"description": "Time zone." -}, -"utcOffset": { -"description": "UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }.", -"format": "google-duration", -"type": "string" -}, -"year": { -"description": "Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleTypeExpr": { -"description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", -"id": "GoogleTypeExpr", -"properties": { -"description": { -"description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", -"type": "string" -}, -"expression": { -"description": "Textual representation of an expression in Common Expression Language syntax.", -"type": "string" -}, -"location": { -"description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", -"type": "string" -}, -"title": { -"description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleTypeInterval": { -"description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive). The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", -"id": "GoogleTypeInterval", -"properties": { -"endTime": { -"description": "Optional. Exclusive end of the interval. If specified, a Timestamp matching this interval will have to be before the end.", -"format": "google-datetime", -"type": "string" -}, -"startTime": { -"description": "Optional. Inclusive start of the interval. If specified, a Timestamp matching this interval will have to be the same or after the start.", -"format": "google-datetime", -"type": "string" -} -}, -"type": "object" -}, -"GoogleTypeLatLng": { -"description": "An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges.", -"id": "GoogleTypeLatLng", -"properties": { -"latitude": { -"description": "The latitude in degrees. It must be in the range [-90.0, +90.0].", -"format": "double", -"type": "number" -}, -"longitude": { -"description": "The longitude in degrees. It must be in the range [-180.0, +180.0].", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"GoogleTypeMoney": { -"description": "Represents an amount of money with its currency type.", -"id": "GoogleTypeMoney", -"properties": { -"currencyCode": { -"description": "The three-letter currency code defined in ISO 4217.", -"type": "string" -}, -"nanos": { -"description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", -"format": "int32", -"type": "integer" -}, -"units": { -"description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", -"format": "int64", -"type": "string" -} -}, -"type": "object" -}, -"GoogleTypePostalAddress": { -"description": "Represents a postal address, e.g. for postal delivery or payments addresses. Given a postal address, a postal service can deliver items to a premise, P.O. Box or similar. It is not intended to model geographical locations (roads, towns, mountains). In typical usage an address would be created via user input or from importing existing data, depending on the type of process. Advice on address input / editing: - Use an internationalization-ready address widget such as https://github.com/google/libaddressinput) - Users should not be presented with UI elements for input or editing of fields outside countries where that field is used. For more guidance on how to use this schema, please see: https://support.google.com/business/answer/6397478", -"id": "GoogleTypePostalAddress", -"properties": { -"addressLines": { -"description": "Unstructured address lines describing the lower levels of an address. Because values in address_lines do not have type information and may sometimes contain multiple values in a single field (e.g. \"Austin, TX\"), it is important that the line order is clear. The order of address lines should be \"envelope order\" for the country/region of the address. In places where this can vary (e.g. Japan), address_language is used to make it explicit (e.g. \"ja\" for large-to-small ordering and \"ja-Latn\" or \"en\" for small-to-large). This way, the most specific line of an address can be selected based on the language. The minimum permitted structural representation of an address consists of a region_code with all remaining information placed in the address_lines. It would be possible to format such an address very approximately without geocoding, but no semantic reasoning could be made about any of the address components until it was at least partially resolved. Creating an address only containing a region_code and address_lines, and then geocoding is the recommended way to handle completely unstructured addresses (as opposed to guessing which parts of the address should be localities or administrative areas).", -"items": { -"type": "string" -}, -"type": "array" -}, -"administrativeArea": { -"description": "Optional. Highest administrative subdivision which is used for postal addresses of a country or region. For example, this can be a state, a province, an oblast, or a prefecture. Specifically, for Spain this is the province and not the autonomous community (e.g. \"Barcelona\" and not \"Catalonia\"). Many countries don't use an administrative area in postal addresses. E.g. in Switzerland this should be left unpopulated.", -"type": "string" -}, -"languageCode": { -"description": "Optional. BCP-47 language code of the contents of this address (if known). This is often the UI language of the input form or is expected to match one of the languages used in the address' country/region, or their transliterated equivalents. This can affect formatting in certain countries, but is not critical to the correctness of the data and will never affect any validation or other non-formatting related operations. If this value is not known, it should be omitted (rather than specifying a possibly incorrect default). Examples: \"zh-Hant\", \"ja\", \"ja-Latn\", \"en\".", -"type": "string" -}, -"locality": { -"description": "Optional. Generally refers to the city/town portion of the address. Examples: US city, IT comune, UK post town. In regions of the world where localities are not well defined or do not fit into this structure well, leave locality empty and use address_lines.", -"type": "string" -}, -"organization": { -"description": "Optional. The name of the organization at the address.", -"type": "string" -}, -"postalCode": { -"description": "Optional. Postal code of the address. Not all countries use or require postal codes to be present, but where they are used, they may trigger additional validation with other parts of the address (e.g. state/zip validation in the U.S.A.).", -"type": "string" -}, -"recipients": { -"description": "Optional. The recipient at the address. This field may, under certain circumstances, contain multiline information. For example, it might contain \"care of\" information.", -"items": { -"type": "string" -}, -"type": "array" -}, -"regionCode": { -"description": "Required. CLDR region code of the country/region of the address. This is never inferred and it is up to the user to ensure the value is correct. See https://cldr.unicode.org/ and https://www.unicode.org/cldr/charts/30/supplemental/territory_information.html for details. Example: \"CH\" for Switzerland.", -"type": "string" -}, -"revision": { -"description": "The schema revision of the `PostalAddress`. This must be set to 0, which is the latest revision. All new revisions **must** be backward compatible with old revisions.", -"format": "int32", -"type": "integer" -}, -"sortingCode": { -"description": "Optional. Additional, country-specific, sorting code. This is not used in most regions. Where it is used, the value is either a string like \"CEDEX\", optionally followed by a number (e.g. \"CEDEX 7\"), or just a number alone, representing the \"sector code\" (Jamaica), \"delivery area indicator\" (Malawi) or \"post office indicator\" (e.g. C\u00f4te d'Ivoire).", -"type": "string" -}, -"sublocality": { -"description": "Optional. Sublocality of the address. For example, this can be neighborhoods, boroughs, districts.", -"type": "string" -} -}, -"type": "object" -}, -"GoogleTypeTimeOfDay": { -"description": "Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`.", -"id": "GoogleTypeTimeOfDay", -"properties": { -"hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", -"format": "int32", -"type": "integer" -}, -"minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", -"format": "int32", -"type": "integer" -}, -"nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", -"format": "int32", -"type": "integer" -}, -"seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"GoogleTypeTimeZone": { -"description": "Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones).", -"id": "GoogleTypeTimeZone", -"properties": { -"id": { -"description": "IANA Time Zone Database time zone, e.g. \"America/New_York\".", -"type": "string" -}, -"version": { -"description": "Optional. IANA Time Zone Database version number, e.g. \"2019a\".", -"type": "string" -} -}, -"type": "object" -}, -"GroupsPerDocData": { -"id": "GroupsPerDocData", -"properties": { -"AuthorId": { -"format": "uint64", -"type": "string" -}, -"GroupGaiaId": { -"format": "uint64", -"type": "string" -}, -"GroupId": { -"description": "Legacy group mysql id.", -"format": "uint64", -"type": "string" -}, -"ThreadId": { -"format": "uint64", -"type": "string" -} -}, -"type": "object" -}, -"HomeGraphCommonRoute": { -"id": "HomeGraphCommonRoute", -"properties": { -"agentDeviceId": { -"description": "The device ID defined by the agent.", -"type": "string" -}, -"agentId": { -"description": "The agent's ID. Generally it is the agent's Google cloud project id.", -"type": "string" -}, -"chipEndpoint": { -"description": "chip endpoint index (if the target is CHIP). Set packed = true to handle error caused by b/32953375 when exporting this data. Note that we should never change this to non-repeated: a packed field will not work properly if you change the field to non-repeated later.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"targetType": { -"description": "Execution routing target.", -"enum": [ -"UNSPECIFIED", -"PARTNER_CLOUD", -"LOCAL", -"CHIP" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -} -}, -"type": "object" -}, -"HomeGraphCommonRoutingTable": { -"description": "Defines execution routing information for Traits, which will be used to be mapped for each Trait as following: map traits_to_routing_table_map = X; We'll use this for CHIP first. And expect to migrate the existing routing logic to this gradually.", -"id": "HomeGraphCommonRoutingTable", -"properties": { -"supportedRoutes": { -"description": "List of supported execution route.", -"items": { -"$ref": "HomeGraphCommonRoute" -}, -"type": "array" -} -}, -"type": "object" -}, -"HomeGraphCommonTraitRoutingHints": { -"description": "LINT.IfChange go/shed-per-trait-routing. Making it object to allow for extendible design, where we can add attributes in future.", -"id": "HomeGraphCommonTraitRoutingHints", -"properties": { -"cloudFulfillmentOnly": { -"description": "Set to true for a non-local trait.", -"type": "boolean" -}, -"trait": { -"description": "Trait name, e.g., \"action.devices.traits.MediaInitiation\". See [device traits](https://developers.home.google.com/cloud-to-cloud/traits). See java/com/google/home/graph/common/devices/config/protoconf.pi for the exhaustive list of trait-strings.", -"type": "string" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoAnonTimingStatPair": { -"description": "TimingStatPair message stores a timing period name and a time value. This is intentionally vague for doing fine level timing of rendering as what we measure is likely to change as we iterate. The intention is also that these values will just be dumped to varzs for evaluation purposed and not used programmatically.", -"id": "HtmlrenderWebkitHeadlessProtoAnonTimingStatPair", -"properties": { -"name": { -"type": "string" -}, -"timeS": { -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoBox": { -"description": "A simple 2D box represented by an (x, y) coordinate, a width, and a height. Coordinates are in pixels.", -"id": "HtmlrenderWebkitHeadlessProtoBox", -"properties": { -"height": { -"format": "int32", -"type": "integer" -}, -"width": { -"format": "int32", -"type": "integer" -}, -"x": { -"description": "on horizontal axis", -"format": "int32", -"type": "integer" -}, -"y": { -"description": "on vertical axis", -"format": "int32", -"type": "integer" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoChromiumTrace": { -"description": "A message to describe the trace events returned by Chromium.", -"id": "HtmlrenderWebkitHeadlessProtoChromiumTrace", -"properties": { -"chromiumTrace": { -"description": "Populated if Chromium traces are requested in JSON format.", -"type": "string" -}, -"chromiumTraceProto": { -"description": "Populated if Chromium traces are requested in PROTO format.", -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoConsoleLogEntry": { -"description": "ConsoleLogEntry message stores messages logged by the renderer to the console. Typically error messages related to JS execution, parsing, any CSS errors, etc are logged by the renderer to the console. Next id: 7.", -"id": "HtmlrenderWebkitHeadlessProtoConsoleLogEntry", -"properties": { -"lineNumber": { -"description": "Line number of the document which caused an error.", -"format": "int32", -"type": "integer" -}, -"message": { -"description": "Message which indicates the nature of the error. e.g. parse error, reference error (happens when javascript functions or variables are not resolvable) etc.", -"type": "string" -}, -"messageLevel": { -"description": "message level", -"enum": [ -"TIP_MESSAGE_LEVEL", -"DEBUG_MESSAGE_LEVEL", -"LOG_MESSAGE_LEVEL", -"INFO_MESSAGE_LEVEL", -"WARNING_MESSAGE_LEVEL", -"ERROR_MESSAGE_LEVEL" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"sourceUrl": { -"description": "The url of the document which has the error.", -"type": "string" -}, -"stackTrace": { -"description": "Stack trace which functions were called when generating the console log. The first frame is the innermost one.", -"items": { -"$ref": "HtmlrenderWebkitHeadlessProtoScriptStackFrame" -}, -"type": "array" -}, -"timestamp": { -"description": "Wall time (s) when the log entry was recorded", -"format": "double", -"type": "number" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoCookie": { -"id": "HtmlrenderWebkitHeadlessProtoCookie", -"properties": { -"domain": { -"type": "string" -}, -"expiration": { -"format": "double", -"type": "number" -}, -"httpOnly": { -"type": "boolean" -}, -"name": { -"format": "byte", -"type": "string" -}, -"path": { -"type": "string" -}, -"sameSite": { -"enum": [ -"SAME_SITE_UNSPECIFIED", -"SAME_SITE_LAX", -"SAME_SITE_STRICT", -"SAME_SITE_NONE" -], -"enumDescriptions": [ -"", -"", -"", -"" -], -"type": "string" -}, -"secure": { -"type": "boolean" -}, -"value": { -"format": "byte", -"type": "string" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoDOMStorageItem": { -"id": "HtmlrenderWebkitHeadlessProtoDOMStorageItem", -"properties": { -"key": { -"type": "string" -}, -"securityOrigin": { -"type": "string" -}, -"value": { -"type": "string" -} -}, -"type": "object" -}, -"HtmlrenderWebkitHeadlessProtoDOMTreeNode": { -"description": "DOMTreeNode Defines a DOM Node. An instance can contain references to one or more children (of type DOMTreeNode) and one or more attributes. The DOMTreeNode also encapsulates rendering information (if applicable) in the form of references to one or more RenderTreeNodes. Next tag available: 16", -"id": "HtmlrenderWebkitHeadlessProtoDOMTreeNode", -"properties": { -"attribute": { -"items": { -"$ref": "HtmlrenderWebkitHeadlessProtoDOMTreeNodeAttribute" -}, -"type": "array" -}, -"childDomTreeNodeIndex": { -"description": "An index per child. Indexes can be used to fetch the DOMTreeNodes from the list maintained by the Document.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"currentSourceUrl": { -"description": "For elements, the actual url that was used to fetch the image. Note that this field is set only if it is different from the 'src' attribute value.", -"type": "string" -}, -"document": { -"$ref": "HtmlrenderWebkitHeadlessProtoDocument", -"description": "If the node represents an iframe or a frame then document will be set." -}, -"htmlTagType": { -"description": "Identifies the HTML tag type (IMG, P, DIV, etc). Applicable only for DOM nodes that are representative of html elements. For a list of possible types refer HtmlTagEnum defined in webutil/html/htmltagenum.h.", -"format": "int32", -"type": "integer" -}, -"isClickable": { -"description": "Whether this DOM node responds to mouse clicks. This includes e.g. nodes that have had click event listeners attached via JavaScript as well as e.g. anchor tags that naturally navigate when clicked.", -"type": "boolean" -}, -"name": { -"description": "Name of the node (document, text, comment, div, etc).", -"type": "string" -}, -"originUrl": { -"description": "URL of the script, if any, which created or populated this node.", -"type": "string" -}, -"referencedResourceIndex": { -"description": "List of referenced resource indexes for any resources that this DOM tree node references.", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"renderTreeNodeIndex": { -"description": "RenderTreeNode can be looked up from the list of RenderTreeNodes stored in the Document using render_tree_node_index. RenderTreeNode gives rendering information (bounding box, style that was applied, etc). Note: 1. If a DOMTreeNode does not have a RenderTreeNode then it is safe to assume that the DOMTreeNode has no effect on the rendering. DOMTreeNodes for a , ", -"items": { -"format": "int32", -"type": "integer" -}, -"type": "array" -}, -"type": { -"enum": [ -"ELEMENT_NODE", -"ATTRIBUTE_NODE", -"TEXT_NODE", -"CDATA_SECTION_NODE", -"ENTITY_REFERENCE_NODE", -"ENTITY_NODE", -"PROCESSING_INSTRUCTION_NODE", -"COMMENT_NODE", -"DOCUMENT_NODE", -"DOCUMENT_TYPE_NODE", -"DOCUMENT_FRAGMENT_NODE", -"NOTATION_NODE", -"XPATH_NAMESPACE_NODE", -"SHADOW_ROOT_NODE" -], -"enumDescriptions": [ -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"", -"" -], -"type": "string" -}, -"value": { -"description": "The node value is applicable for TEXT_NODEs, DOCUMENT_TYPE_NODEs, COMMENT_NODEs, and user input elements such as , and