From 7a476c174da33d8ea32d848fbe3a8c00638c3c0c Mon Sep 17 00:00:00 2001 From: imotai Date: Sun, 29 Jan 2023 15:02:25 +0800 Subject: [PATCH 1/9] feat: add firestore protobuf --- src/proto/proto/README.md | 2 + src/proto/proto/compile.sh | 27 + src/proto/proto/firestore/bundle.proto | 121 + src/proto/proto/firestore_bundle_proto.ts | 93 + src/proto/proto/firestore_proto_api.ts | 1289 ++++++++ src/proto/proto/google/api/annotations.proto | 31 + src/proto/proto/google/api/client.proto | 99 + .../proto/google/api/field_behavior.proto | 90 + src/proto/proto/google/api/http.proto | 375 +++ .../firestore/v1/aggregation_result.proto | 42 + .../proto/google/firestore/v1/common.proto | 83 + .../proto/google/firestore/v1/document.proto | 150 + .../proto/google/firestore/v1/firestore.proto | 980 ++++++ .../proto/google/firestore/v1/query.proto | 355 +++ .../proto/google/firestore/v1/write.proto | 264 ++ src/proto/proto/google/protobuf/any.proto | 155 + .../proto/google/protobuf/descriptor.proto | 882 +++++ src/proto/proto/google/protobuf/empty.proto | 52 + src/proto/proto/google/protobuf/struct.proto | 96 + .../proto/google/protobuf/timestamp.proto | 137 + .../proto/google/protobuf/wrappers.proto | 123 + src/proto/proto/google/rpc/status.proto | 47 + src/proto/proto/google/type/latlng.proto | 37 + src/proto/proto/protos.json | 2825 +++++++++++++++++ src/proto/proto/update.sh | 76 + 25 files changed, 8431 insertions(+) create mode 100644 src/proto/proto/README.md create mode 100755 src/proto/proto/compile.sh create mode 100644 src/proto/proto/firestore/bundle.proto create mode 100644 src/proto/proto/firestore_bundle_proto.ts create mode 100644 src/proto/proto/firestore_proto_api.ts create mode 100644 src/proto/proto/google/api/annotations.proto create mode 100644 src/proto/proto/google/api/client.proto create mode 100644 src/proto/proto/google/api/field_behavior.proto create mode 100644 src/proto/proto/google/api/http.proto create mode 100644 src/proto/proto/google/firestore/v1/aggregation_result.proto create mode 100644 src/proto/proto/google/firestore/v1/common.proto create mode 100644 src/proto/proto/google/firestore/v1/document.proto create mode 100644 src/proto/proto/google/firestore/v1/firestore.proto create mode 100644 src/proto/proto/google/firestore/v1/query.proto create mode 100644 src/proto/proto/google/firestore/v1/write.proto create mode 100644 src/proto/proto/google/protobuf/any.proto create mode 100644 src/proto/proto/google/protobuf/descriptor.proto create mode 100644 src/proto/proto/google/protobuf/empty.proto create mode 100644 src/proto/proto/google/protobuf/struct.proto create mode 100644 src/proto/proto/google/protobuf/timestamp.proto create mode 100644 src/proto/proto/google/protobuf/wrappers.proto create mode 100644 src/proto/proto/google/rpc/status.proto create mode 100644 src/proto/proto/google/type/latlng.proto create mode 100644 src/proto/proto/protos.json create mode 100755 src/proto/proto/update.sh diff --git a/src/proto/proto/README.md b/src/proto/proto/README.md new file mode 100644 index 00000000..24919e59 --- /dev/null +++ b/src/proto/proto/README.md @@ -0,0 +1,2 @@ +These protos are copied from https://github.com/googleapis/googleapis and +https://github.com/google/protobuf. Run update.sh to update them. diff --git a/src/proto/proto/compile.sh b/src/proto/proto/compile.sh new file mode 100755 index 00000000..26c46d1a --- /dev/null +++ b/src/proto/proto/compile.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +# Variables +PROTOS_DIR="." +PBJS="$(npm bin)/pbjs" + +"${PBJS}" --proto_path=. --target=json -o protos.json \ + -r firestore_v1 \ + "${PROTOS_DIR}/google/firestore/v1/*.proto" \ + "${PROTOS_DIR}/google/protobuf/*.proto" "${PROTOS_DIR}/google/type/*.proto" \ + "${PROTOS_DIR}/google/rpc/*.proto" "${PROTOS_DIR}/google/api/*.proto" diff --git a/src/proto/proto/firestore/bundle.proto b/src/proto/proto/firestore/bundle.proto new file mode 100644 index 00000000..ee7954e6 --- /dev/null +++ b/src/proto/proto/firestore/bundle.proto @@ -0,0 +1,121 @@ +// Copyright 2020 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This file defines the format of Firestore bundle file/stream. It is not a part of the +// Firestore API, only a specification used by Server and Client SDK to write and read +// bundles. + +syntax = "proto3"; + +package firestore; + +import "google/firestore/v1/document.proto"; +import "google/firestore/v1/query.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Firestore.Proto"; +option go_package = "google.golang.org/genproto/firestore/proto;firestore"; +option java_multiple_files = true; +option java_outer_classname = "BundleProto"; +option java_package = "com.google.firestore.proto"; +option objc_class_prefix = "FSTPB"; +option php_namespace = "Firestore\\Proto"; + +// Describes a query saved in the bundle. +message BundledQuery { + // The parent resource name. + string parent = 1; + + // The query to run. + oneof query_type { + // A structured query. + google.firestore.v1.StructuredQuery structured_query = 2; + } + + // If the query is a limit query, should the limit be applied to the beginning or + // the end of results. + enum LimitType { + FIRST = 0; + LAST = 1; + } + LimitType limit_type = 3; +} + +// A Query associated with a name, created as part of the bundle file, and can be read +// by client SDKs once the bundle containing them is loaded. +message NamedQuery { + // Name of the query, such that client can use the name to load this query + // from bundle, and resume from when the query results are materialized + // into this bundle. + string name = 1; + + // The query saved in the bundle. + BundledQuery bundled_query = 2; + + // The read time of the query, when it is used to build the bundle. This is useful to + // resume the query from the bundle, once it is loaded by client SDKs. + google.protobuf.Timestamp read_time = 3; +} + +// Metadata describing a Firestore document saved in the bundle. +message BundledDocumentMetadata { + // The document key of a bundled document. + string name = 1; + + // The snapshot version of the document data bundled. + google.protobuf.Timestamp read_time = 2; + + // Whether the document exists. + bool exists = 3; + + // The names of the queries in this bundle that this document matches to. + repeated string queries = 4; +} + +// Metadata describing the bundle file/stream. +message BundleMetadata { + // The ID of the bundle. + string id = 1; + + // Time at which the documents snapshot is taken for this bundle. + google.protobuf.Timestamp create_time = 2; + + // The schema version of the bundle. + uint32 version = 3; + + // The number of documents in the bundle. + uint32 total_documents = 4; + + // The size of the bundle in bytes, excluding this `BundleMetadata`. + uint64 total_bytes = 5; +} + +// A Firestore bundle is a length-prefixed stream of JSON representations of +// `BundleElement`. +// Only one `BundleMetadata` is expected, and it should be the first element. +// The named queries follow after `metadata`. If a document exists when the +// bundle is built, `document_metadata` is immediately followed by the +// `document`, otherwise `document_metadata` will present by itself. +message BundleElement { + oneof element_type { + BundleMetadata metadata = 1; + + NamedQuery named_query = 2; + + BundledDocumentMetadata document_metadata = 3; + + google.firestore.v1.Document document = 4; + } +} diff --git a/src/proto/proto/firestore_bundle_proto.ts b/src/proto/proto/firestore_bundle_proto.ts new file mode 100644 index 00000000..d51da229 --- /dev/null +++ b/src/proto/proto/firestore_bundle_proto.ts @@ -0,0 +1,93 @@ +/** + * @license + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { StructuredQuery, Timestamp, Document } from './firestore_proto_api'; + +/** Properties of a BundledQuery. */ +export interface BundledQuery { + /** BundledQuery parent */ + parent?: string | null; + + /** BundledQuery structuredQuery */ + structuredQuery?: StructuredQuery | null; + + /** BundledQuery limitType */ + limitType?: LimitType | null; +} + +/** LimitType enum. */ +export type LimitType = 'FIRST' | 'LAST'; + +/** Properties of a NamedQuery. */ +export interface NamedQuery { + /** NamedQuery name */ + name?: string | null; + + /** NamedQuery bundledQuery */ + bundledQuery?: BundledQuery | null; + + /** NamedQuery readTime */ + readTime?: Timestamp | null; +} + +/** Properties of a BundledDocumentMetadata. */ +export interface BundledDocumentMetadata { + /** BundledDocumentMetadata name */ + name?: string | null; + + /** BundledDocumentMetadata readTime */ + readTime?: Timestamp | null; + + /** BundledDocumentMetadata exists */ + exists?: boolean | null; + + /** The names of the queries in this bundle that this document matches to. */ + queries?: string[]; +} + +/** Properties of a BundleMetadata. */ +export interface BundleMetadata { + /** BundleMetadata id */ + id?: string | null; + + /** BundleMetadata createTime */ + createTime?: Timestamp | null; + + /** BundleMetadata version */ + version?: number | null; + + /** BundleMetadata totalDocuments */ + totalDocuments?: number | null; + + /** BundleMetadata totalBytes */ + totalBytes?: number | null; +} + +/** Properties of a BundleElement. */ +export interface BundleElement { + /** BundleElement metadata */ + metadata?: BundleMetadata | null; + + /** BundleElement namedQuery */ + namedQuery?: NamedQuery | null; + + /** BundleElement documentMetadata */ + documentMetadata?: BundledDocumentMetadata | null; + + /** BundleElement document */ + document?: Document | null; +} diff --git a/src/proto/proto/firestore_proto_api.ts b/src/proto/proto/firestore_proto_api.ts new file mode 100644 index 00000000..46b00e0e --- /dev/null +++ b/src/proto/proto/firestore_proto_api.ts @@ -0,0 +1,1289 @@ +/** + * @license + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Rather than pull these in from other protos, we just alias them to any. +/* + eslint-disable + camelcase, @typescript-eslint/no-explicit-any, + @typescript-eslint/naming-convention +*/ +export declare type ApiClientHookFactory = any; +export declare type PromiseRequestService = any; +export interface ApiClientObjectMap { + [k: string]: T; +} +export declare type Timestamp = + | string + | { seconds?: string | number; nanos?: number }; + +export declare type CompositeFilterOp = 'OPERATOR_UNSPECIFIED' | 'AND' | 'OR'; +export interface ICompositeFilterOpEnum { + OPERATOR_UNSPECIFIED: CompositeFilterOp; + AND: CompositeFilterOp; + values(): CompositeFilterOp[]; +} +export declare const CompositeFilterOpEnum: ICompositeFilterOpEnum; +export declare type FieldFilterOp = + | 'OPERATOR_UNSPECIFIED' + | 'LESS_THAN' + | 'LESS_THAN_OR_EQUAL' + | 'GREATER_THAN' + | 'GREATER_THAN_OR_EQUAL' + | 'EQUAL' + | 'NOT_EQUAL' + | 'ARRAY_CONTAINS' + | 'IN' + | 'ARRAY_CONTAINS_ANY' + | 'NOT_IN'; +export interface IFieldFilterOpEnum { + OPERATOR_UNSPECIFIED: FieldFilterOp; + LESS_THAN: FieldFilterOp; + LESS_THAN_OR_EQUAL: FieldFilterOp; + GREATER_THAN: FieldFilterOp; + GREATER_THAN_OR_EQUAL: FieldFilterOp; + EQUAL: FieldFilterOp; + NOT_EQUAL: FieldFilterOp; + ARRAY_CONTAINS: FieldFilterOp; + IN: FieldFilterOp; + ARRAY_CONTAINS_ANY: FieldFilterOp; + NOT_IN: FieldFilterOp; + values(): FieldFilterOp[]; +} +export declare const FieldFilterOpEnum: IFieldFilterOpEnum; +export declare type FieldTransformSetToServerValue = + | 'SERVER_VALUE_UNSPECIFIED' + | 'REQUEST_TIME'; +export interface IFieldTransformSetToServerValueEnum { + SERVER_VALUE_UNSPECIFIED: FieldTransformSetToServerValue; + REQUEST_TIME: FieldTransformSetToServerValue; + values(): FieldTransformSetToServerValue[]; +} +export declare const FieldTransformSetToServerValueEnum: IFieldTransformSetToServerValueEnum; +export declare type IndexFieldMode = + | 'MODE_UNSPECIFIED' + | 'ASCENDING' + | 'DESCENDING'; +export interface IIndexFieldModeEnum { + MODE_UNSPECIFIED: IndexFieldMode; + ASCENDING: IndexFieldMode; + DESCENDING: IndexFieldMode; + values(): IndexFieldMode[]; +} +export declare const IndexFieldModeEnum: IIndexFieldModeEnum; +export declare type IndexState = + | 'STATE_UNSPECIFIED' + | 'CREATING' + | 'READY' + | 'ERROR'; +export interface IIndexStateEnum { + STATE_UNSPECIFIED: IndexState; + CREATING: IndexState; + READY: IndexState; + ERROR: IndexState; + values(): IndexState[]; +} +export declare const IndexStateEnum: IIndexStateEnum; +export declare type OrderDirection = + | 'DIRECTION_UNSPECIFIED' + | 'ASCENDING' + | 'DESCENDING'; +export interface IOrderDirectionEnum { + DIRECTION_UNSPECIFIED: OrderDirection; + ASCENDING: OrderDirection; + DESCENDING: OrderDirection; + values(): OrderDirection[]; +} +export declare const OrderDirectionEnum: IOrderDirectionEnum; +export declare type TargetChangeTargetChangeType = + | 'NO_CHANGE' + | 'ADD' + | 'REMOVE' + | 'CURRENT' + | 'RESET'; +export interface ITargetChangeTargetChangeTypeEnum { + NO_CHANGE: TargetChangeTargetChangeType; + ADD: TargetChangeTargetChangeType; + REMOVE: TargetChangeTargetChangeType; + CURRENT: TargetChangeTargetChangeType; + RESET: TargetChangeTargetChangeType; + values(): TargetChangeTargetChangeType[]; +} +export declare const TargetChangeTargetChangeTypeEnum: ITargetChangeTargetChangeTypeEnum; +export declare type UnaryFilterOp = + | 'OPERATOR_UNSPECIFIED' + | 'IS_NAN' + | 'IS_NULL' + | 'IS_NOT_NAN' + | 'IS_NOT_NULL'; +export interface IUnaryFilterOpEnum { + OPERATOR_UNSPECIFIED: UnaryFilterOp; + IS_NAN: UnaryFilterOp; + IS_NULL: UnaryFilterOp; + IS_NOT_NAN: UnaryFilterOp; + IS_NOT_NULL: UnaryFilterOp; + values(): UnaryFilterOp[]; +} +export declare const UnaryFilterOpEnum: IUnaryFilterOpEnum; +export declare type ValueNullValue = 'NULL_VALUE'; +export interface IValueNullValueEnum { + NULL_VALUE: ValueNullValue; + values(): ValueNullValue[]; +} +export declare const ValueNullValueEnum: IValueNullValueEnum; +export declare namespace firestoreV1ApiClientInterfaces { + interface ArrayValue { + values?: Value[]; + } + interface BatchGetDocumentsRequest { + database?: string; + documents?: string[]; + mask?: DocumentMask; + transaction?: string; + newTransaction?: TransactionOptions; + readTime?: string; + } + interface BatchGetDocumentsResponse { + found?: Document; + missing?: string; + transaction?: string; + readTime?: string; + } + interface BeginTransactionRequest { + options?: TransactionOptions; + } + interface BeginTransactionResponse { + transaction?: string; + } + interface CollectionSelector { + collectionId?: string; + allDescendants?: boolean; + } + interface CommitRequest { + database?: string; + writes?: Write[]; + transaction?: string; + } + interface CommitResponse { + writeResults?: WriteResult[]; + commitTime?: string; + } + interface CompositeFilter { + op?: CompositeFilterOp; + filters?: Filter[]; + } + interface Cursor { + values?: Value[]; + before?: boolean; + } + interface Document { + name?: string; + fields?: ApiClientObjectMap; + createTime?: Timestamp; + updateTime?: Timestamp; + } + interface DocumentChange { + document?: Document; + targetIds?: number[]; + removedTargetIds?: number[]; + } + interface DocumentDelete { + document?: string; + removedTargetIds?: number[]; + readTime?: Timestamp; + } + interface DocumentMask { + fieldPaths?: string[]; + } + interface DocumentRemove { + document?: string; + removedTargetIds?: number[]; + readTime?: string; + } + interface DocumentTransform { + document?: string; + fieldTransforms?: FieldTransform[]; + } + interface DocumentsTarget { + documents?: string[]; + } + interface Empty {} + interface ExistenceFilter { + targetId?: number; + count?: number; + } + interface FieldFilter { + field?: FieldReference; + op?: FieldFilterOp; + value?: Value; + } + interface FieldReference { + fieldPath?: string; + } + interface FieldTransform { + fieldPath?: string; + setToServerValue?: FieldTransformSetToServerValue; + appendMissingElements?: ArrayValue; + removeAllFromArray?: ArrayValue; + increment?: Value; + } + interface Filter { + compositeFilter?: CompositeFilter; + fieldFilter?: FieldFilter; + unaryFilter?: UnaryFilter; + } + interface Index { + name?: string; + collectionId?: string; + fields?: IndexField[]; + state?: IndexState; + } + interface IndexField { + fieldPath?: string; + mode?: IndexFieldMode; + } + interface LatLng { + latitude?: number; + longitude?: number; + } + interface ListCollectionIdsRequest { + pageSize?: number; + pageToken?: string; + } + interface ListCollectionIdsResponse { + collectionIds?: string[]; + nextPageToken?: string; + } + interface ListDocumentsResponse { + documents?: Document[]; + nextPageToken?: string; + } + interface ListIndexesResponse { + indexes?: Index[]; + nextPageToken?: string; + } + interface ListenRequest { + addTarget?: Target; + removeTarget?: number; + labels?: ApiClientObjectMap; + } + interface ListenResponse { + targetChange?: TargetChange; + documentChange?: DocumentChange; + documentDelete?: DocumentDelete; + documentRemove?: DocumentRemove; + filter?: ExistenceFilter; + } + interface MapValue { + fields?: ApiClientObjectMap; + } + interface Operation { + name?: string; + metadata?: ApiClientObjectMap; + done?: boolean; + error?: Status; + response?: ApiClientObjectMap; + } + interface Order { + field?: FieldReference; + direction?: OrderDirection; + } + interface Precondition { + exists?: boolean; + updateTime?: Timestamp; + } + interface Projection { + fields?: FieldReference[]; + } + interface QueryTarget { + parent?: string; + structuredQuery?: StructuredQuery; + } + interface ReadOnly { + readTime?: string; + } + interface ReadWrite { + retryTransaction?: string; + } + interface RollbackRequest { + transaction?: string; + } + interface RunQueryRequest { + parent?: string; + structuredQuery?: StructuredQuery; + transaction?: string; + newTransaction?: TransactionOptions; + readTime?: string; + } + interface RunQueryResponse { + transaction?: string; + document?: Document; + readTime?: string; + skippedResults?: number; + } + interface RunAggregationQueryRequest { + parent?: string; + structuredAggregationQuery?: StructuredAggregationQuery; + transaction?: string; + newTransaction?: TransactionOptions; + readTime?: string; + } + interface RunAggregationQueryResponse { + result?: AggregationResult; + transaction?: string; + readTime?: string; + } + interface AggregationResult { + aggregateFields?: ApiClientObjectMap; + } + interface StructuredAggregationQuery { + structuredQuery?: StructuredQuery; + aggregations?: Aggregation[]; + } + interface Aggregation { + count?: Count; + alias?: string; + } + interface Count { + upTo?: number; + } + interface Status { + code?: number; + message?: string; + details?: Array>; + } + interface StructuredQuery { + select?: Projection; + from?: CollectionSelector[]; + where?: Filter; + orderBy?: Order[]; + startAt?: Cursor; + endAt?: Cursor; + offset?: number; + limit?: number | { value: number }; + } + interface Target { + query?: QueryTarget; + documents?: DocumentsTarget; + resumeToken?: string | Uint8Array; + readTime?: Timestamp; + targetId?: number; + once?: boolean; + } + interface TargetChange { + targetChangeType?: TargetChangeTargetChangeType; + targetIds?: number[]; + cause?: Status; + resumeToken?: string | Uint8Array; + readTime?: Timestamp; + } + interface TransactionOptions { + readOnly?: ReadOnly; + readWrite?: ReadWrite; + } + interface UnaryFilter { + op?: UnaryFilterOp; + field?: FieldReference; + } + interface Value { + nullValue?: ValueNullValue; + booleanValue?: boolean; + integerValue?: string | number; + doubleValue?: string | number; + timestampValue?: Timestamp; + stringValue?: string; + bytesValue?: string | Uint8Array; + referenceValue?: string; + geoPointValue?: LatLng; + arrayValue?: ArrayValue; + mapValue?: MapValue; + } + interface Write { + update?: Document; + delete?: string; + verify?: string; + transform?: DocumentTransform; + updateMask?: DocumentMask; + updateTransforms?: FieldTransform[]; + currentDocument?: Precondition; + } + interface WriteRequest { + streamId?: string; + writes?: Write[]; + streamToken?: string | Uint8Array; + labels?: ApiClientObjectMap; + } + interface WriteResponse { + streamId?: string; + streamToken?: string | Uint8Array; + writeResults?: WriteResult[]; + commitTime?: Timestamp; + } + interface WriteResult { + updateTime?: Timestamp; + transformResults?: Value[]; + } +} +export declare type ArrayValue = firestoreV1ApiClientInterfaces.ArrayValue; +export declare type BatchGetDocumentsRequest = + firestoreV1ApiClientInterfaces.BatchGetDocumentsRequest; +export declare type BatchGetDocumentsResponse = + firestoreV1ApiClientInterfaces.BatchGetDocumentsResponse; +export declare type BeginTransactionRequest = + firestoreV1ApiClientInterfaces.BeginTransactionRequest; +export declare type BeginTransactionResponse = + firestoreV1ApiClientInterfaces.BeginTransactionResponse; +export declare type CollectionSelector = + firestoreV1ApiClientInterfaces.CollectionSelector; +export declare type CommitRequest = + firestoreV1ApiClientInterfaces.CommitRequest; +export declare type CommitResponse = + firestoreV1ApiClientInterfaces.CommitResponse; +export declare type CompositeFilter = + firestoreV1ApiClientInterfaces.CompositeFilter; +export declare type Cursor = firestoreV1ApiClientInterfaces.Cursor; +export declare type Document = firestoreV1ApiClientInterfaces.Document; +export declare type DocumentChange = + firestoreV1ApiClientInterfaces.DocumentChange; +export declare type DocumentDelete = + firestoreV1ApiClientInterfaces.DocumentDelete; +export declare type DocumentMask = firestoreV1ApiClientInterfaces.DocumentMask; +export declare type DocumentRemove = + firestoreV1ApiClientInterfaces.DocumentRemove; +export declare type DocumentTransform = + firestoreV1ApiClientInterfaces.DocumentTransform; +export declare type DocumentsTarget = + firestoreV1ApiClientInterfaces.DocumentsTarget; +export declare type Empty = firestoreV1ApiClientInterfaces.Empty; +export declare type ExistenceFilter = + firestoreV1ApiClientInterfaces.ExistenceFilter; +export declare type FieldFilter = firestoreV1ApiClientInterfaces.FieldFilter; +export declare type FieldReference = + firestoreV1ApiClientInterfaces.FieldReference; +export declare type FieldTransform = + firestoreV1ApiClientInterfaces.FieldTransform; +export declare type Filter = firestoreV1ApiClientInterfaces.Filter; +export declare type Index = firestoreV1ApiClientInterfaces.Index; +export declare type IndexField = firestoreV1ApiClientInterfaces.IndexField; +export declare type LatLng = firestoreV1ApiClientInterfaces.LatLng; +export declare type ListCollectionIdsRequest = + firestoreV1ApiClientInterfaces.ListCollectionIdsRequest; +export declare type ListCollectionIdsResponse = + firestoreV1ApiClientInterfaces.ListCollectionIdsResponse; +export declare type ListDocumentsResponse = + firestoreV1ApiClientInterfaces.ListDocumentsResponse; +export declare type ListIndexesResponse = + firestoreV1ApiClientInterfaces.ListIndexesResponse; +export declare type ListenRequest = + firestoreV1ApiClientInterfaces.ListenRequest; +export declare type ListenResponse = + firestoreV1ApiClientInterfaces.ListenResponse; +export declare type MapValue = firestoreV1ApiClientInterfaces.MapValue; +export declare type Operation = firestoreV1ApiClientInterfaces.Operation; +export declare type Order = firestoreV1ApiClientInterfaces.Order; +export declare type Precondition = firestoreV1ApiClientInterfaces.Precondition; +export declare type Projection = firestoreV1ApiClientInterfaces.Projection; +export declare type QueryTarget = firestoreV1ApiClientInterfaces.QueryTarget; +export declare type ReadOnly = firestoreV1ApiClientInterfaces.ReadOnly; +export declare type ReadWrite = firestoreV1ApiClientInterfaces.ReadWrite; +export declare type RollbackRequest = + firestoreV1ApiClientInterfaces.RollbackRequest; +export declare type RunQueryRequest = + firestoreV1ApiClientInterfaces.RunQueryRequest; +export declare type RunQueryResponse = + firestoreV1ApiClientInterfaces.RunQueryResponse; +export declare type RunAggregationQueryRequest = + firestoreV1ApiClientInterfaces.RunAggregationQueryRequest; +export declare type RunAggregationQueryResponse = + firestoreV1ApiClientInterfaces.RunAggregationQueryResponse; +export declare type Status = firestoreV1ApiClientInterfaces.Status; +export declare type StructuredQuery = + firestoreV1ApiClientInterfaces.StructuredQuery; +export declare type Target = firestoreV1ApiClientInterfaces.Target; +export declare type TargetChange = firestoreV1ApiClientInterfaces.TargetChange; +export declare type TransactionOptions = + firestoreV1ApiClientInterfaces.TransactionOptions; +export declare type UnaryFilter = firestoreV1ApiClientInterfaces.UnaryFilter; +export declare type Value = firestoreV1ApiClientInterfaces.Value; +export declare type Write = firestoreV1ApiClientInterfaces.Write; +export declare type WriteRequest = firestoreV1ApiClientInterfaces.WriteRequest; +export declare type WriteResponse = + firestoreV1ApiClientInterfaces.WriteResponse; +export declare type WriteResult = firestoreV1ApiClientInterfaces.WriteResult; +export declare type ProjectsDatabasesDocumentsApiClient$Xgafv = '1' | '2'; +export interface IProjectsDatabasesDocumentsApiClient$XgafvEnum { + 1: ProjectsDatabasesDocumentsApiClient$Xgafv; + 2: ProjectsDatabasesDocumentsApiClient$Xgafv; + values(): ProjectsDatabasesDocumentsApiClient$Xgafv[]; +} +export declare const ProjectsDatabasesDocumentsApiClient$XgafvEnum: IProjectsDatabasesDocumentsApiClient$XgafvEnum; +export declare type ProjectsDatabasesDocumentsApiClientAlt = + | 'json' + | 'media' + | 'proto'; +export interface IProjectsDatabasesDocumentsApiClientAltEnum { + JSON: ProjectsDatabasesDocumentsApiClientAlt; + MEDIA: ProjectsDatabasesDocumentsApiClientAlt; + PROTO: ProjectsDatabasesDocumentsApiClientAlt; + values(): ProjectsDatabasesDocumentsApiClientAlt[]; +} +export declare const ProjectsDatabasesDocumentsApiClientAltEnum: IProjectsDatabasesDocumentsApiClientAltEnum; +export interface ProjectsDatabasesDocumentsBatchGetNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsBeginTransactionNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsCommitNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsCreateDocumentNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; + documentId?: string; + maskFieldPaths?: string[]; +} +export interface ProjectsDatabasesDocumentsDeleteNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; + currentDocumentExists?: boolean; + currentDocumentUpdateTime?: string; +} +export interface ProjectsDatabasesDocumentsGetNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; + maskFieldPaths?: string[]; + transaction?: string; + readTime?: string; +} +export interface ProjectsDatabasesDocumentsListCollectionIdsNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsListNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; + pageSize?: number; + pageToken?: string; + orderBy?: string; + maskFieldPaths?: string[]; + transaction?: string; + readTime?: string; + showMissing?: boolean; +} +export interface ProjectsDatabasesDocumentsListenNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsPatchNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; + updateMaskFieldPaths?: string[]; + maskFieldPaths?: string[]; + currentDocumentExists?: boolean; + currentDocumentUpdateTime?: string; +} +export interface ProjectsDatabasesDocumentsRollbackNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsRunQueryNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export interface ProjectsDatabasesDocumentsWriteNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesDocumentsApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; +} +export abstract class ProjectsDatabasesDocumentsApiClient { + private constructor() {} + abstract batchGet( + database: string, + $requestBody: BatchGetDocumentsRequest, + __namedParams__?: ProjectsDatabasesDocumentsBatchGetNamedParameters & object + ): Promise; + abstract beginTransaction( + database: string, + $requestBody: BeginTransactionRequest, + __namedParams__?: ProjectsDatabasesDocumentsBeginTransactionNamedParameters & + object + ): Promise; + abstract commit( + database: string, + $requestBody: CommitRequest, + __namedParams__?: ProjectsDatabasesDocumentsCommitNamedParameters & object + ): Promise; + abstract createDocument( + parent: string, + collectionId: string, + $requestBody: Document, + __namedParams__?: ProjectsDatabasesDocumentsCreateDocumentNamedParameters & + object + ): Promise; + abstract delete( + name: string, + __namedParams__?: ProjectsDatabasesDocumentsDeleteNamedParameters & object + ): Promise; + abstract get( + name: string, + __namedParams__?: ProjectsDatabasesDocumentsGetNamedParameters & object + ): Promise; + abstract list( + parent: string, + collectionId: string, + __namedParams__?: ProjectsDatabasesDocumentsListNamedParameters & object + ): Promise; + abstract listCollectionIds( + parent: string, + $requestBody: ListCollectionIdsRequest, + __namedParams__?: ProjectsDatabasesDocumentsListCollectionIdsNamedParameters & + object + ): Promise; + abstract listen( + database: string, + $requestBody: ListenRequest, + __namedParams__?: ProjectsDatabasesDocumentsListenNamedParameters & object + ): Promise; + abstract patch( + name: string, + $requestBody: Document, + __namedParams__?: ProjectsDatabasesDocumentsPatchNamedParameters & object + ): Promise; + abstract rollback( + database: string, + $requestBody: RollbackRequest, + __namedParams__?: ProjectsDatabasesDocumentsRollbackNamedParameters & object + ): Promise; + abstract runQuery( + parent: string, + $requestBody: RunQueryRequest, + __namedParams__?: ProjectsDatabasesDocumentsRunQueryNamedParameters & object + ): Promise; + abstract write( + database: string, + $requestBody: WriteRequest, + __namedParams__?: ProjectsDatabasesDocumentsWriteNamedParameters & object + ): Promise; +} +export declare class ProjectsDatabasesDocumentsApiClientImpl + implements ProjectsDatabasesDocumentsApiClient +{ + private gapiVersion; + private $apiClient; + constructor( + gapiVersion: string, + gapiRequestService: PromiseRequestService, + apiClientHookFactory?: ApiClientHookFactory | null + ); + batchGet( + database: string, + $requestBody: BatchGetDocumentsRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsBatchGetNamedParameters & object + ): Promise; + beginTransaction( + database: string, + $requestBody: BeginTransactionRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsBeginTransactionNamedParameters & object + ): Promise; + commit( + database: string, + $requestBody: CommitRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsCommitNamedParameters & object + ): Promise; + createDocument( + parent: string, + collectionId: string, + $requestBody: Document, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + documentId, + fields, + key, + maskFieldPaths, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsCreateDocumentNamedParameters & object + ): Promise; + delete( + name: string, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + currentDocumentExists, + currentDocumentUpdateTime, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsDeleteNamedParameters & object + ): Promise; + get( + name: string, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + maskFieldPaths, + oauth_token, + pp, + prettyPrint, + quotaUser, + readTime, + transaction, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsGetNamedParameters & object + ): Promise; + list( + parent: string, + collectionId: string, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + maskFieldPaths, + oauth_token, + orderBy, + pageSize, + pageToken, + pp, + prettyPrint, + quotaUser, + readTime, + showMissing, + transaction, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsListNamedParameters & object + ): Promise; + listCollectionIds( + parent: string, + $requestBody: ListCollectionIdsRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsListCollectionIdsNamedParameters & object + ): Promise; + listen( + database: string, + $requestBody: ListenRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsListenNamedParameters & object + ): Promise; + patch( + name: string, + $requestBody: Document, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + currentDocumentExists, + currentDocumentUpdateTime, + fields, + key, + maskFieldPaths, + oauth_token, + pp, + prettyPrint, + quotaUser, + updateMaskFieldPaths, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsPatchNamedParameters & object + ): Promise; + rollback( + database: string, + $requestBody: RollbackRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsRollbackNamedParameters & object + ): Promise; + runQuery( + parent: string, + $requestBody: RunQueryRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsRunQueryNamedParameters & object + ): Promise; + write( + database: string, + $requestBody: WriteRequest, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesDocumentsWriteNamedParameters & object + ): Promise; +} +export declare type ProjectsDatabasesIndexesApiClient$Xgafv = '1' | '2'; +export interface IProjectsDatabasesIndexesApiClient$XgafvEnum { + 1: ProjectsDatabasesIndexesApiClient$Xgafv; + 2: ProjectsDatabasesIndexesApiClient$Xgafv; + values(): ProjectsDatabasesIndexesApiClient$Xgafv[]; +} +export declare const ProjectsDatabasesIndexesApiClient$XgafvEnum: IProjectsDatabasesIndexesApiClient$XgafvEnum; +export declare type ProjectsDatabasesIndexesApiClientAlt = + | 'json' + | 'media' + | 'proto'; +export interface IProjectsDatabasesIndexesApiClientAltEnum { + JSON: ProjectsDatabasesIndexesApiClientAlt; + MEDIA: ProjectsDatabasesIndexesApiClientAlt; + PROTO: ProjectsDatabasesIndexesApiClientAlt; + values(): ProjectsDatabasesIndexesApiClientAlt[]; +} +export declare const ProjectsDatabasesIndexesApiClientAltEnum: IProjectsDatabasesIndexesApiClientAltEnum; +export interface ProjectsDatabasesIndexesCreateNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesIndexesApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; +} +export interface ProjectsDatabasesIndexesDeleteNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesIndexesApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; +} +export interface ProjectsDatabasesIndexesGetNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesIndexesApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; +} +export interface ProjectsDatabasesIndexesListNamedParameters { + access_token?: string; + alt?: ProjectsDatabasesIndexesApiClientAlt; + bearer_token?: string; + callback?: string; + fields?: string; + key?: string; + oauth_token?: string; + pp?: boolean; + prettyPrint?: boolean; + quotaUser?: string; + upload_protocol?: string; + uploadType?: string; + $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; + filter?: string; + pageSize?: number; + pageToken?: string; +} +export abstract class ProjectsDatabasesIndexesApiClient { + private constructor() {} + abstract create( + parent: string, + $requestBody: Index, + __namedParams__?: ProjectsDatabasesIndexesCreateNamedParameters & object + ): Promise; + abstract delete( + name: string, + __namedParams__?: ProjectsDatabasesIndexesDeleteNamedParameters & object + ): Promise; + abstract get( + name: string, + __namedParams__?: ProjectsDatabasesIndexesGetNamedParameters & object + ): Promise; + abstract list( + parent: string, + __namedParams__?: ProjectsDatabasesIndexesListNamedParameters & object + ): Promise; +} +export declare class ProjectsDatabasesIndexesApiClientImpl + implements ProjectsDatabasesIndexesApiClient +{ + private gapiVersion; + private $apiClient; + constructor( + gapiVersion: string, + gapiRequestService: PromiseRequestService, + apiClientHookFactory?: ApiClientHookFactory | null + ); + create( + parent: string, + $requestBody: Index, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesIndexesCreateNamedParameters & object + ): Promise; + delete( + name: string, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesIndexesDeleteNamedParameters & object + ): Promise; + get( + name: string, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + key, + oauth_token, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesIndexesGetNamedParameters & object + ): Promise; + list( + parent: string, + { + $Xgafv, + access_token, + alt, + bearer_token, + callback, + fields, + filter, + key, + oauth_token, + pageSize, + pageToken, + pp, + prettyPrint, + quotaUser, + uploadType, + upload_protocol + }?: ProjectsDatabasesIndexesListNamedParameters & object + ): Promise; +} diff --git a/src/proto/proto/google/api/annotations.proto b/src/proto/proto/google/api/annotations.proto new file mode 100644 index 00000000..efdab3db --- /dev/null +++ b/src/proto/proto/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/src/proto/proto/google/api/client.proto b/src/proto/proto/google/api/client.proto new file mode 100644 index 00000000..3b3fd0c4 --- /dev/null +++ b/src/proto/proto/google/api/client.proto @@ -0,0 +1,99 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; +} diff --git a/src/proto/proto/google/api/field_behavior.proto b/src/proto/proto/google/api/field_behavior.proto new file mode 100644 index 00000000..c4abe3b6 --- /dev/null +++ b/src/proto/proto/google/api/field_behavior.proto @@ -0,0 +1,90 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; + + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + NON_EMPTY_DEFAULT = 7; +} diff --git a/src/proto/proto/google/api/http.proto b/src/proto/proto/google/api/http.proto new file mode 100644 index 00000000..113fa936 --- /dev/null +++ b/src/proto/proto/google/api/http.proto @@ -0,0 +1,375 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/src/proto/proto/google/firestore/v1/aggregation_result.proto b/src/proto/proto/google/firestore/v1/aggregation_result.proto new file mode 100644 index 00000000..538e3fef --- /dev/null +++ b/src/proto/proto/google/firestore/v1/aggregation_result.proto @@ -0,0 +1,42 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.v1; + +import "google/firestore/v1/document.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; +option java_multiple_files = true; +option java_outer_classname = "AggregationResultProto"; +option java_package = "com.google.firestore.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\V1"; +option ruby_package = "Google::Cloud::Firestore::V1"; + +// The result of a single bucket from a Firestore aggregation query. +// +// The keys of `aggregate_fields` are the same for all results in an aggregation +// query, unlike document queries which can have different fields present for +// each result. +message AggregationResult { + // The result of the aggregation functions, ex: `COUNT(*) AS total_docs`. + // + // The key is the [alias][google.firestore.v1.StructuredAggregationQuery.Aggregation.alias] + // assigned to the aggregation function on input and the size of this map + // equals the number of aggregation functions in the query. + map aggregate_fields = 2; +} diff --git a/src/proto/proto/google/firestore/v1/common.proto b/src/proto/proto/google/firestore/v1/common.proto new file mode 100644 index 00000000..3bc978ca --- /dev/null +++ b/src/proto/proto/google/firestore/v1/common.proto @@ -0,0 +1,83 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.v1; + +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.firestore.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\V1"; +option ruby_package = "Google::Cloud::Firestore::V1"; + +// A set of field paths on a document. +// Used to restrict a get or update operation on a document to a subset of its +// fields. +// This is different from standard field masks, as this is always scoped to a +// [Document][google.firestore.v1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1.Value]. +message DocumentMask { + // The list of field paths in the mask. See [Document.fields][google.firestore.v1.Document.fields] for a field + // path syntax reference. + repeated string field_paths = 1; +} + +// A precondition on a document, used for conditional operations. +message Precondition { + // The type of precondition. + oneof condition_type { + // When set to `true`, the target document must exist. + // When set to `false`, the target document must not exist. + bool exists = 1; + + // When set, the target document must exist and have been last updated at + // that time. + google.protobuf.Timestamp update_time = 2; + } +} + +// Options for creating a new transaction. +message TransactionOptions { + // Options for a transaction that can be used to read and write documents. + message ReadWrite { + // An optional transaction to retry. + bytes retry_transaction = 1; + } + + // Options for a transaction that can only be used to read documents. + message ReadOnly { + // The consistency mode for this transaction. If not set, defaults to strong + // consistency. + oneof consistency_selector { + // Reads documents at the given time. + // This may not be older than 60 seconds. + google.protobuf.Timestamp read_time = 2; + } + } + + // The mode of the transaction. + oneof mode { + // The transaction can only be used for read operations. + ReadOnly read_only = 2; + + // The transaction can be used for both read and write operations. + ReadWrite read_write = 3; + } +} diff --git a/src/proto/proto/google/firestore/v1/document.proto b/src/proto/proto/google/firestore/v1/document.proto new file mode 100644 index 00000000..5238a943 --- /dev/null +++ b/src/proto/proto/google/firestore/v1/document.proto @@ -0,0 +1,150 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.v1; + +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/type/latlng.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; +option java_multiple_files = true; +option java_outer_classname = "DocumentProto"; +option java_package = "com.google.firestore.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\V1"; +option ruby_package = "Google::Cloud::Firestore::V1"; + +// A Firestore document. +// +// Must not exceed 1 MiB - 4 bytes. +message Document { + // The resource name of the document, for example + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + string name = 1; + + // The document's fields. + // + // The map keys represent field names. + // + // A simple field name contains only characters `a` to `z`, `A` to `Z`, + // `0` to `9`, or `_`, and must not start with `0` to `9`. For example, + // `foo_bar_17`. + // + // Field names matching the regular expression `__.*__` are reserved. Reserved + // field names are forbidden except in certain documented contexts. The map + // keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be + // empty. + // + // Field paths may be used in other contexts to refer to structured fields + // defined here. For `map_value`, the field path is represented by the simple + // or quoted field names of the containing fields, delimited by `.`. For + // example, the structured field + // `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be + // represented by the field path `foo.x&y`. + // + // Within a field path, a quoted field name starts and ends with `` ` `` and + // may contain any character. Some characters, including `` ` ``, must be + // escaped using a `\`. For example, `` `x&y` `` represents `x&y` and + // `` `bak\`tik` `` represents `` bak`tik ``. + map fields = 2; + + // Output only. The time at which the document was created. + // + // This value increases monotonically when a document is deleted then + // recreated. It can also be compared to values from other documents and + // the `read_time` of a query. + google.protobuf.Timestamp create_time = 3; + + // Output only. The time at which the document was last changed. + // + // This value is initially set to the `create_time` then increases + // monotonically with each change to the document. It can also be + // compared to values from other documents and the `read_time` of a query. + google.protobuf.Timestamp update_time = 4; +} + +// A message that can hold any of the supported value types. +message Value { + // Must have a value set. + oneof value_type { + // A null value. + google.protobuf.NullValue null_value = 11; + + // A boolean value. + bool boolean_value = 1; + + // An integer value. + int64 integer_value = 2; + + // A double value. + double double_value = 3; + + // A timestamp value. + // + // Precise only to microseconds. When stored, any additional precision is + // rounded down. + google.protobuf.Timestamp timestamp_value = 10; + + // A string value. + // + // The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. + // Only the first 1,500 bytes of the UTF-8 representation are considered by + // queries. + string string_value = 17; + + // A bytes value. + // + // Must not exceed 1 MiB - 89 bytes. + // Only the first 1,500 bytes are considered by queries. + bytes bytes_value = 18; + + // A reference to a document. For example: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + string reference_value = 5; + + // A geo point value representing a point on the surface of Earth. + google.type.LatLng geo_point_value = 8; + + // An array value. + // + // Cannot directly contain another array value, though can contain an + // map which contains another array. + ArrayValue array_value = 9; + + // A map value. + MapValue map_value = 6; + } +} + +// An array value. +message ArrayValue { + // Values in the array. + repeated Value values = 1; +} + +// A map value. +message MapValue { + // The map's fields. + // + // The map keys represent field names. Field names matching the regular + // expression `__.*__` are reserved. Reserved field names are forbidden except + // in certain documented contexts. The map keys, represented as UTF-8, must + // not exceed 1,500 bytes and cannot be empty. + map fields = 1; +} diff --git a/src/proto/proto/google/firestore/v1/firestore.proto b/src/proto/proto/google/firestore/v1/firestore.proto new file mode 100644 index 00000000..aefbe716 --- /dev/null +++ b/src/proto/proto/google/firestore/v1/firestore.proto @@ -0,0 +1,980 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/firestore/v1/aggregation_result.proto"; +import "google/firestore/v1/common.proto"; +import "google/firestore/v1/document.proto"; +import "google/firestore/v1/query.proto"; +import "google/firestore/v1/write.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; +option java_multiple_files = true; +option java_outer_classname = "FirestoreProto"; +option java_package = "com.google.firestore.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\V1"; +option ruby_package = "Google::Cloud::Firestore::V1"; + +// Specification of the Firestore API. + +// The Cloud Firestore service. +// +// Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL +// document database that simplifies storing, syncing, and querying data for +// your mobile, web, and IoT apps at global scale. Its client libraries provide +// live synchronization and offline support, while its security features and +// integrations with Firebase and Google Cloud Platform (GCP) accelerate +// building truly serverless apps. +service Firestore { + option (google.api.default_host) = "firestore.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/datastore"; + + // Gets a single document. + rpc GetDocument(GetDocumentRequest) returns (Document) { + option (google.api.http) = { + get: "/v1/{name=projects/*/databases/*/documents/*/**}" + }; + } + + // Lists documents. + rpc ListDocuments(ListDocumentsRequest) returns (ListDocumentsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" + }; + } + + // Updates or inserts a document. + rpc UpdateDocument(UpdateDocumentRequest) returns (Document) { + option (google.api.http) = { + patch: "/v1/{document.name=projects/*/databases/*/documents/*/**}" + body: "document" + }; + option (google.api.method_signature) = "document,update_mask"; + } + + // Deletes a document. + rpc DeleteDocument(DeleteDocumentRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/databases/*/documents/*/**}" + }; + option (google.api.method_signature) = "name"; + } + + // Gets multiple documents. + // + // Documents returned by this method are not guaranteed to be returned in the + // same order that they were requested. + rpc BatchGetDocuments(BatchGetDocumentsRequest) returns (stream BatchGetDocumentsResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:batchGet" + body: "*" + }; + } + + // Starts a new transaction. + rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:beginTransaction" + body: "*" + }; + option (google.api.method_signature) = "database"; + } + + // Commits a transaction, while optionally updating documents. + rpc Commit(CommitRequest) returns (CommitResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:commit" + body: "*" + }; + option (google.api.method_signature) = "database,writes"; + } + + // Rolls back a transaction. + rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:rollback" + body: "*" + }; + option (google.api.method_signature) = "database,transaction"; + } + + // Runs a query. + rpc RunQuery(RunQueryRequest) returns (stream RunQueryResponse) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/databases/*/documents}:runQuery" + body: "*" + additional_bindings { + post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery" + body: "*" + } + }; + } + + // Runs an aggregation query. + // + // Rather than producing [Document][google.firestore.v1.Document] results like [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery], + // this API allows running an aggregation to produce a series of + // [AggregationResult][google.firestore.v1.AggregationResult] server-side. + // + // High-Level Example: + // + // ``` + // -- Return the number of documents in table given a filter. + // SELECT COUNT(*) FROM ( SELECT * FROM k where a = true ); + // ``` + rpc RunAggregationQuery(RunAggregationQueryRequest) returns (stream RunAggregationQueryResponse) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery" + body: "*" + additional_bindings { + post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery" + body: "*" + } + }; + } + + // Partitions a query by returning partition cursors that can be used to run + // the query in parallel. The returned partition cursors are split points that + // can be used by RunQuery as starting/end points for the query results. + rpc PartitionQuery(PartitionQueryRequest) returns (PartitionQueryResponse) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/databases/*/documents}:partitionQuery" + body: "*" + additional_bindings { + post: "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery" + body: "*" + } + }; + } + + // Streams batches of document updates and deletes, in order. + rpc Write(stream WriteRequest) returns (stream WriteResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:write" + body: "*" + }; + } + + // Listens to changes. + rpc Listen(stream ListenRequest) returns (stream ListenResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:listen" + body: "*" + }; + } + + // Lists all the collection IDs underneath a document. + rpc ListCollectionIds(ListCollectionIdsRequest) returns (ListCollectionIdsResponse) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds" + body: "*" + additional_bindings { + post: "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds" + body: "*" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Applies a batch of write operations. + // + // The BatchWrite method does not apply the write operations atomically + // and can apply them out of order. Method does not allow more than one write + // per document. Each write succeeds or fails independently. See the + // [BatchWriteResponse][google.firestore.v1.BatchWriteResponse] for the success status of each write. + // + // If you require an atomically applied set of writes, use + // [Commit][google.firestore.v1.Firestore.Commit] instead. + rpc BatchWrite(BatchWriteRequest) returns (BatchWriteResponse) { + option (google.api.http) = { + post: "/v1/{database=projects/*/databases/*}/documents:batchWrite" + body: "*" + }; + } + + // Creates a new document. + rpc CreateDocument(CreateDocumentRequest) returns (Document) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}" + body: "document" + }; + } +} + +// The request for [Firestore.GetDocument][google.firestore.v1.Firestore.GetDocument]. +message GetDocumentRequest { + // Required. The resource name of the Document to get. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // The fields to return. If not set, returns all fields. + // + // If the document has a field that is not present in this mask, that field + // will not be returned in the response. + DocumentMask mask = 2; + + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + oneof consistency_selector { + // Reads the document in a transaction. + bytes transaction = 3; + + // Reads the version of the document at the given time. + // This may not be older than 270 seconds. + google.protobuf.Timestamp read_time = 5; + } +} + +// The request for [Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments]. +message ListDocumentsRequest { + // Required. The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The collection ID, relative to `parent`, to list. For example: `chatrooms` + // or `messages`. + string collection_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of documents to return. + int32 page_size = 3; + + // The `next_page_token` value returned from a previous List request, if any. + string page_token = 4; + + // The order to sort results by. For example: `priority desc, name`. + string order_by = 6; + + // The fields to return. If not set, returns all fields. + // + // If a document has a field that is not present in this mask, that field + // will not be returned in the response. + DocumentMask mask = 7; + + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + oneof consistency_selector { + // Reads documents in a transaction. + bytes transaction = 8; + + // Reads documents as they were at the given time. + // This may not be older than 270 seconds. + google.protobuf.Timestamp read_time = 10; + } + + // If the list should show missing documents. A missing document is a + // document that does not exist but has sub-documents. These documents will + // be returned with a key but will not have fields, [Document.create_time][google.firestore.v1.Document.create_time], + // or [Document.update_time][google.firestore.v1.Document.update_time] set. + // + // Requests with `show_missing` may not specify `where` or + // `order_by`. + bool show_missing = 12; +} + +// The response for [Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments]. +message ListDocumentsResponse { + // The Documents found. + repeated Document documents = 1; + + // The next page token. + string next_page_token = 2; +} + +// The request for [Firestore.CreateDocument][google.firestore.v1.Firestore.CreateDocument]. +message CreateDocumentRequest { + // Required. The parent resource. For example: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}` + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`. + string collection_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The client-assigned document ID to use for this document. + // + // Optional. If not specified, an ID will be assigned by the service. + string document_id = 3; + + // Required. The document to create. `name` must not be set. + Document document = 4 [(google.api.field_behavior) = REQUIRED]; + + // The fields to return. If not set, returns all fields. + // + // If the document has a field that is not present in this mask, that field + // will not be returned in the response. + DocumentMask mask = 5; +} + +// The request for [Firestore.UpdateDocument][google.firestore.v1.Firestore.UpdateDocument]. +message UpdateDocumentRequest { + // Required. The updated document. + // Creates the document if it does not already exist. + Document document = 1 [(google.api.field_behavior) = REQUIRED]; + + // The fields to update. + // None of the field paths in the mask may contain a reserved name. + // + // If the document exists on the server and has fields not referenced in the + // mask, they are left unchanged. + // Fields referenced in the mask, but not present in the input document, are + // deleted from the document on the server. + DocumentMask update_mask = 2; + + // The fields to return. If not set, returns all fields. + // + // If the document has a field that is not present in this mask, that field + // will not be returned in the response. + DocumentMask mask = 3; + + // An optional precondition on the document. + // The request will fail if this is set and not met by the target document. + Precondition current_document = 4; +} + +// The request for [Firestore.DeleteDocument][google.firestore.v1.Firestore.DeleteDocument]. +message DeleteDocumentRequest { + // Required. The resource name of the Document to delete. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // An optional precondition on the document. + // The request will fail if this is set and not met by the target document. + Precondition current_document = 2; +} + +// The request for [Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments]. +message BatchGetDocumentsRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The names of the documents to retrieve. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // The request will fail if any of the document is not a child resource of the + // given `database`. Duplicate names will be elided. + repeated string documents = 2; + + // The fields to return. If not set, returns all fields. + // + // If a document has a field that is not present in this mask, that field will + // not be returned in the response. + DocumentMask mask = 3; + + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + oneof consistency_selector { + // Reads documents in a transaction. + bytes transaction = 4; + + // Starts a new transaction and reads the documents. + // Defaults to a read-only transaction. + // The new transaction ID will be returned as the first response in the + // stream. + TransactionOptions new_transaction = 5; + + // Reads documents as they were at the given time. + // This may not be older than 270 seconds. + google.protobuf.Timestamp read_time = 7; + } +} + +// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments]. +message BatchGetDocumentsResponse { + // A single result. + // This can be empty if the server is just returning a transaction. + oneof result { + // A document that was requested. + Document found = 1; + + // A document name that was requested but does not exist. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + string missing = 2; + } + + // The transaction that was started as part of this request. + // Will only be set in the first response, and only if + // [BatchGetDocumentsRequest.new_transaction][google.firestore.v1.BatchGetDocumentsRequest.new_transaction] was set in the request. + bytes transaction = 3; + + // The time at which the document was read. + // This may be monotically increasing, in this case the previous documents in + // the result stream are guaranteed not to have changed between their + // read_time and this one. + google.protobuf.Timestamp read_time = 4; +} + +// The request for [Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction]. +message BeginTransactionRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The options for the transaction. + // Defaults to a read-write transaction. + TransactionOptions options = 2; +} + +// The response for [Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction]. +message BeginTransactionResponse { + // The transaction that was started. + bytes transaction = 1; +} + +// The request for [Firestore.Commit][google.firestore.v1.Firestore.Commit]. +message CommitRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The writes to apply. + // + // Always executed atomically and in order. + repeated Write writes = 2; + + // If set, applies all writes in this transaction, and commits it. + bytes transaction = 3; +} + +// The response for [Firestore.Commit][google.firestore.v1.Firestore.Commit]. +message CommitResponse { + // The result of applying the writes. + // + // This i-th write result corresponds to the i-th write in the + // request. + repeated WriteResult write_results = 1; + + // The time at which the commit occurred. Any read with an equal or greater + // `read_time` is guaranteed to see the effects of the commit. + google.protobuf.Timestamp commit_time = 2; +} + +// The request for [Firestore.Rollback][google.firestore.v1.Firestore.Rollback]. +message RollbackRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The transaction to roll back. + bytes transaction = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery]. +message RunQueryRequest { + // Required. The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // The query to run. + oneof query_type { + // A structured query. + StructuredQuery structured_query = 2; + } + + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + oneof consistency_selector { + // Reads documents in a transaction. + bytes transaction = 5; + + // Starts a new transaction and reads the documents. + // Defaults to a read-only transaction. + // The new transaction ID will be returned as the first response in the + // stream. + TransactionOptions new_transaction = 6; + + // Reads documents as they were at the given time. + // This may not be older than 270 seconds. + google.protobuf.Timestamp read_time = 7; + } +} + +// The response for [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery]. +message RunQueryResponse { + // The transaction that was started as part of this request. + // Can only be set in the first response, and only if + // [RunQueryRequest.new_transaction][google.firestore.v1.RunQueryRequest.new_transaction] was set in the request. + // If set, no other fields will be set in this response. + bytes transaction = 2; + + // A query result. + // Not set when reporting partial progress. + Document document = 1; + + // The time at which the document was read. This may be monotonically + // increasing; in this case, the previous documents in the result stream are + // guaranteed not to have changed between their `read_time` and this one. + // + // If the query returns no results, a response with `read_time` and no + // `document` will be sent, and this represents the time at which the query + // was run. + google.protobuf.Timestamp read_time = 3; + + // The number of results that have been skipped due to an offset between + // the last response and the current response. + int32 skipped_results = 4; +} + +// The request for [Firestore.RunAggregationQuery][google.firestore.v1.Firestore.RunAggregationQuery]. +message RunAggregationQueryRequest { + // Required. The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // The query to run. + oneof query_type { + // An aggregation query. + StructuredAggregationQuery structured_aggregation_query = 2; + } + + // The consistency mode for the query, defaults to strong consistency. + oneof consistency_selector { + // Run the aggregation within an already active transaction. + // + // The value here is the opaque transaction ID to execute the query in. + bytes transaction = 4; + + // Starts a new transaction as part of the query, defaulting to read-only. + // + // The new transaction ID will be returned as the first response in the + // stream. + TransactionOptions new_transaction = 5; + + // Executes the query at the given timestamp. + // + // Requires: + // + // * Cannot be more than 270 seconds in the past. + google.protobuf.Timestamp read_time = 6; + } +} + +// The response for [Firestore.RunAggregationQuery][google.firestore.v1.Firestore.RunAggregationQuery]. +message RunAggregationQueryResponse { + // A single aggregation result. + // + // Not present when reporting partial progress or when the query produced + // zero results. + AggregationResult result = 1; + + // The transaction that was started as part of this request. + // + // Only present on the first response when the request requested to start + // a new transaction. + bytes transaction = 2; + + // The time at which the aggregate value is valid for. + google.protobuf.Timestamp read_time = 3; +} + +// The request for [Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery]. +message PartitionQueryRequest { + // Required. The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents`. + // Document resource names are not supported; only database resource names + // can be specified. + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // The query to partition. + oneof query_type { + // A structured query. + // Query must specify collection with all descendants and be ordered by name + // ascending. Other filters, order bys, limits, offsets, and start/end + // cursors are not supported. + StructuredQuery structured_query = 2; + } + + // The desired maximum number of partition points. + // The partitions may be returned across multiple pages of results. + // The number must be positive. The actual number of partitions + // returned may be fewer. + // + // For example, this may be set to one fewer than the number of parallel + // queries to be run, or in running a data pipeline job, one fewer than the + // number of workers or compute instances available. + int64 partition_count = 3; + + // The `next_page_token` value returned from a previous call to + // PartitionQuery that may be used to get an additional set of results. + // There are no ordering guarantees between sets of results. Thus, using + // multiple sets of results will require merging the different result sets. + // + // For example, two subsequent calls using a page_token may return: + // + // * cursor B, cursor M, cursor Q + // * cursor A, cursor U, cursor W + // + // To obtain a complete result set ordered with respect to the results of the + // query supplied to PartitionQuery, the results sets should be merged: + // cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W + string page_token = 4; + + // The maximum number of partitions to return in this call, subject to + // `partition_count`. + // + // For example, if `partition_count` = 10 and `page_size` = 8, the first call + // to PartitionQuery will return up to 8 partitions and a `next_page_token` + // if more results exist. A second call to PartitionQuery will return up to + // 2 partitions, to complete the total of 10 specified in `partition_count`. + int32 page_size = 5; +} + +// The response for [Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery]. +message PartitionQueryResponse { + // Partition results. + // Each partition is a split point that can be used by RunQuery as a starting + // or end point for the query results. The RunQuery requests must be made with + // the same query supplied to this PartitionQuery request. The partition + // cursors will be ordered according to same ordering as the results of the + // query supplied to PartitionQuery. + // + // For example, if a PartitionQuery request returns partition cursors A and B, + // running the following three queries will return the entire result set of + // the original query: + // + // * query, end_at A + // * query, start_at A, end_at B + // * query, start_at B + // + // An empty result may indicate that the query has too few results to be + // partitioned. + repeated Cursor partitions = 1; + + // A page token that may be used to request an additional set of results, up + // to the number specified by `partition_count` in the PartitionQuery request. + // If blank, there are no more results. + string next_page_token = 2; +} + +// The request for [Firestore.Write][google.firestore.v1.Firestore.Write]. +// +// The first request creates a stream, or resumes an existing one from a token. +// +// When creating a new stream, the server replies with a response containing +// only an ID and a token, to use in the next request. +// +// When resuming a stream, the server first streams any responses later than the +// given token, then a response containing only an up-to-date token, to use in +// the next request. +message WriteRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + // This is only required in the first message. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The ID of the write stream to resume. + // This may only be set in the first message. When left empty, a new write + // stream will be created. + string stream_id = 2; + + // The writes to apply. + // + // Always executed atomically and in order. + // This must be empty on the first request. + // This may be empty on the last request. + // This must not be empty on all other requests. + repeated Write writes = 3; + + // A stream token that was previously sent by the server. + // + // The client should set this field to the token from the most recent + // [WriteResponse][google.firestore.v1.WriteResponse] it has received. This acknowledges that the client has + // received responses up to this token. After sending this token, earlier + // tokens may not be used anymore. + // + // The server may close the stream if there are too many unacknowledged + // responses. + // + // Leave this field unset when creating a new stream. To resume a stream at + // a specific point, set this field and the `stream_id` field. + // + // Leave this field unset when creating a new stream. + bytes stream_token = 4; + + // Labels associated with this write request. + map labels = 5; +} + +// The response for [Firestore.Write][google.firestore.v1.Firestore.Write]. +message WriteResponse { + // The ID of the stream. + // Only set on the first message, when a new stream was created. + string stream_id = 1; + + // A token that represents the position of this response in the stream. + // This can be used by a client to resume the stream at this point. + // + // This field is always set. + bytes stream_token = 2; + + // The result of applying the writes. + // + // This i-th write result corresponds to the i-th write in the + // request. + repeated WriteResult write_results = 3; + + // The time at which the commit occurred. Any read with an equal or greater + // `read_time` is guaranteed to see the effects of the write. + google.protobuf.Timestamp commit_time = 4; +} + +// A request for [Firestore.Listen][google.firestore.v1.Firestore.Listen] +message ListenRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The supported target changes. + oneof target_change { + // A target to add to this stream. + Target add_target = 2; + + // The ID of a target to remove from this stream. + int32 remove_target = 3; + } + + // Labels associated with this target change. + map labels = 4; +} + +// The response for [Firestore.Listen][google.firestore.v1.Firestore.Listen]. +message ListenResponse { + // The supported responses. + oneof response_type { + // Targets have changed. + TargetChange target_change = 2; + + // A [Document][google.firestore.v1.Document] has changed. + DocumentChange document_change = 3; + + // A [Document][google.firestore.v1.Document] has been deleted. + DocumentDelete document_delete = 4; + + // A [Document][google.firestore.v1.Document] has been removed from a target (because it is no longer + // relevant to that target). + DocumentRemove document_remove = 6; + + // A filter to apply to the set of documents previously returned for the + // given target. + // + // Returned when documents may have been removed from the given target, but + // the exact documents are unknown. + ExistenceFilter filter = 5; + } +} + +// A specification of a set of documents to listen to. +message Target { + // A target specified by a set of documents names. + message DocumentsTarget { + // The names of the documents to retrieve. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // The request will fail if any of the document is not a child resource of + // the given `database`. Duplicate names will be elided. + repeated string documents = 2; + } + + // A target specified by a query. + message QueryTarget { + // The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + string parent = 1; + + // The query to run. + oneof query_type { + // A structured query. + StructuredQuery structured_query = 2; + } + } + + // The type of target to listen to. + oneof target_type { + // A target specified by a query. + QueryTarget query = 2; + + // A target specified by a set of document names. + DocumentsTarget documents = 3; + } + + // When to start listening. + // + // If not specified, all matching Documents are returned before any + // subsequent changes. + oneof resume_type { + // A resume token from a prior [TargetChange][google.firestore.v1.TargetChange] for an identical target. + // + // Using a resume token with a different target is unsupported and may fail. + bytes resume_token = 4; + + // Start listening after a specific `read_time`. + // + // The client must know the state of matching documents at this time. + google.protobuf.Timestamp read_time = 11; + } + + // The target ID that identifies the target on the stream. Must be a positive + // number and non-zero. + int32 target_id = 5; + + // If the target should be removed once it is current and consistent. + bool once = 6; +} + +// Targets being watched have changed. +message TargetChange { + // The type of change. + enum TargetChangeType { + // No change has occurred. Used only to send an updated `resume_token`. + NO_CHANGE = 0; + + // The targets have been added. + ADD = 1; + + // The targets have been removed. + REMOVE = 2; + + // The targets reflect all changes committed before the targets were added + // to the stream. + // + // This will be sent after or with a `read_time` that is greater than or + // equal to the time at which the targets were added. + // + // Listeners can wait for this change if read-after-write semantics + // are desired. + CURRENT = 3; + + // The targets have been reset, and a new initial state for the targets + // will be returned in subsequent changes. + // + // After the initial state is complete, `CURRENT` will be returned even + // if the target was previously indicated to be `CURRENT`. + RESET = 4; + } + + // The type of change that occurred. + TargetChangeType target_change_type = 1; + + // The target IDs of targets that have changed. + // + // If empty, the change applies to all targets. + // + // The order of the target IDs is not defined. + repeated int32 target_ids = 2; + + // The error that resulted in this change, if applicable. + google.rpc.Status cause = 3; + + // A token that can be used to resume the stream for the given `target_ids`, + // or all targets if `target_ids` is empty. + // + // Not set on every target change. + bytes resume_token = 4; + + // The consistent `read_time` for the given `target_ids` (omitted when the + // target_ids are not at a consistent snapshot). + // + // The stream is guaranteed to send a `read_time` with `target_ids` empty + // whenever the entire stream reaches a new consistent snapshot. ADD, + // CURRENT, and RESET messages are guaranteed to (eventually) result in a + // new consistent snapshot (while NO_CHANGE and REMOVE messages are not). + // + // For a given stream, `read_time` is guaranteed to be monotonically + // increasing. + google.protobuf.Timestamp read_time = 6; +} + +// The request for [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds]. +message ListCollectionIdsRequest { + // Required. The parent document. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return. + int32 page_size = 2; + + // A page token. Must be a value from + // [ListCollectionIdsResponse][google.firestore.v1.ListCollectionIdsResponse]. + string page_token = 3; +} + +// The response from [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds]. +message ListCollectionIdsResponse { + // The collection ids. + repeated string collection_ids = 1; + + // A page token that may be used to continue the list. + string next_page_token = 2; +} + +// The request for [Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite]. +message BatchWriteRequest { + // Required. The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + string database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The writes to apply. + // + // Method does not apply writes atomically and does not guarantee ordering. + // Each write succeeds or fails independently. You cannot write to the same + // document more than once per request. + repeated Write writes = 2; + + // Labels associated with this batch write. + map labels = 3; +} + +// The response from [Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite]. +message BatchWriteResponse { + // The result of applying the writes. + // + // This i-th write result corresponds to the i-th write in the + // request. + repeated WriteResult write_results = 1; + + // The status of applying the writes. + // + // This i-th write status corresponds to the i-th write in the + // request. + repeated google.rpc.Status status = 2; +} diff --git a/src/proto/proto/google/firestore/v1/query.proto b/src/proto/proto/google/firestore/v1/query.proto new file mode 100644 index 00000000..e3d95534 --- /dev/null +++ b/src/proto/proto/google/firestore/v1/query.proto @@ -0,0 +1,355 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.v1; + +import "google/firestore/v1/document.proto"; +import "google/protobuf/wrappers.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; +option java_multiple_files = true; +option java_outer_classname = "QueryProto"; +option java_package = "com.google.firestore.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\V1"; +option ruby_package = "Google::Cloud::Firestore::V1"; + +// A Firestore query. +message StructuredQuery { + // A selection of a collection, such as `messages as m1`. + message CollectionSelector { + // The collection ID. + // When set, selects only collections with this ID. + string collection_id = 2; + + // When false, selects only collections that are immediate children of + // the `parent` specified in the containing `RunQueryRequest`. + // When true, selects all descendant collections. + bool all_descendants = 3; + } + + // A filter. + message Filter { + // The type of filter. + oneof filter_type { + // A composite filter. + CompositeFilter composite_filter = 1; + + // A filter on a document field. + FieldFilter field_filter = 2; + + // A filter that takes exactly one argument. + UnaryFilter unary_filter = 3; + } + } + + // A filter that merges multiple other filters using the given operator. + message CompositeFilter { + // A composite filter operator. + enum Operator { + // Unspecified. This value must not be used. + OPERATOR_UNSPECIFIED = 0; + + // Documents are required to satisfy all of the combined filters. + AND = 1; + + // Documents are required to satisfy at least one of the combined filters. + OR = 2; + } + + // The operator for combining multiple filters. + Operator op = 1; + + // The list of filters to combine. + // Must contain at least one filter. + repeated Filter filters = 2; + } + + // A filter on a specific field. + message FieldFilter { + // A field filter operator. + enum Operator { + // Unspecified. This value must not be used. + OPERATOR_UNSPECIFIED = 0; + + // The given `field` is less than the given `value`. + // + // Requires: + // + // * That `field` come first in `order_by`. + LESS_THAN = 1; + + // The given `field` is less than or equal to the given `value`. + // + // Requires: + // + // * That `field` come first in `order_by`. + LESS_THAN_OR_EQUAL = 2; + + // The given `field` is greater than the given `value`. + // + // Requires: + // + // * That `field` come first in `order_by`. + GREATER_THAN = 3; + + // The given `field` is greater than or equal to the given `value`. + // + // Requires: + // + // * That `field` come first in `order_by`. + GREATER_THAN_OR_EQUAL = 4; + + // The given `field` is equal to the given `value`. + EQUAL = 5; + + // The given `field` is not equal to the given `value`. + // + // Requires: + // + // * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`. + // * That `field` comes first in the `order_by`. + NOT_EQUAL = 6; + + // The given `field` is an array that contains the given `value`. + ARRAY_CONTAINS = 7; + + // The given `field` is equal to at least one value in the given array. + // + // Requires: + // + // * That `value` is a non-empty `ArrayValue` with at most 10 values. + // * No other `IN` or `ARRAY_CONTAINS_ANY` or `NOT_IN`. + IN = 8; + + // The given `field` is an array that contains any of the values in the + // given array. + // + // Requires: + // + // * That `value` is a non-empty `ArrayValue` with at most 10 values. + // * No other `IN` or `ARRAY_CONTAINS_ANY` or `NOT_IN`. + ARRAY_CONTAINS_ANY = 9; + + // The value of the `field` is not in the given array. + // + // Requires: + // + // * That `value` is a non-empty `ArrayValue` with at most 10 values. + // * No other `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`, `NOT_EQUAL`, + // `IS_NOT_NULL`, or `IS_NOT_NAN`. + // * That `field` comes first in the `order_by`. + NOT_IN = 10; + } + + // The field to filter by. + FieldReference field = 1; + + // The operator to filter by. + Operator op = 2; + + // The value to compare to. + Value value = 3; + } + + // A filter with a single operand. + message UnaryFilter { + // A unary operator. + enum Operator { + // Unspecified. This value must not be used. + OPERATOR_UNSPECIFIED = 0; + + // The given `field` is equal to `NaN`. + IS_NAN = 2; + + // The given `field` is equal to `NULL`. + IS_NULL = 3; + + // The given `field` is not equal to `NaN`. + // + // Requires: + // + // * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`. + // * That `field` comes first in the `order_by`. + IS_NOT_NAN = 4; + + // The given `field` is not equal to `NULL`. + // + // Requires: + // + // * A single `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`. + // * That `field` comes first in the `order_by`. + IS_NOT_NULL = 5; + } + + // The unary operator to apply. + Operator op = 1; + + // The argument to the filter. + oneof operand_type { + // The field to which to apply the operator. + FieldReference field = 2; + } + } + + // An order on a field. + message Order { + // The field to order by. + FieldReference field = 1; + + // The direction to order by. Defaults to `ASCENDING`. + Direction direction = 2; + } + + // A reference to a field, such as `max(messages.time) as max_time`. + message FieldReference { + string field_path = 2; + } + + // The projection of document's fields to return. + message Projection { + // The fields to return. + // + // If empty, all fields are returned. To only return the name + // of the document, use `['__name__']`. + repeated FieldReference fields = 2; + } + + // A sort direction. + enum Direction { + // Unspecified. + DIRECTION_UNSPECIFIED = 0; + + // Ascending. + ASCENDING = 1; + + // Descending. + DESCENDING = 2; + } + + // The projection to return. + Projection select = 1; + + // The collections to query. + repeated CollectionSelector from = 2; + + // The filter to apply. + Filter where = 3; + + // The order to apply to the query results. + // + // Firestore guarantees a stable ordering through the following rules: + // + // * Any field required to appear in `order_by`, that is not already + // specified in `order_by`, is appended to the order in field name order + // by default. + // * If an order on `__name__` is not specified, it is appended by default. + // + // Fields are appended with the same sort direction as the last order + // specified, or 'ASCENDING' if no order was specified. For example: + // + // * `SELECT * FROM Foo ORDER BY A` becomes + // `SELECT * FROM Foo ORDER BY A, __name__` + // * `SELECT * FROM Foo ORDER BY A DESC` becomes + // `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC` + // * `SELECT * FROM Foo WHERE A > 1` becomes + // `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__` + repeated Order order_by = 4; + + // A starting point for the query results. + Cursor start_at = 7; + + // A end point for the query results. + Cursor end_at = 8; + + // The number of results to skip. + // + // Applies before limit, but after all other constraints. Must be >= 0 if + // specified. + int32 offset = 6; + + // The maximum number of results to return. + // + // Applies after all other constraints. + // Must be >= 0 if specified. + google.protobuf.Int32Value limit = 5; +} + +message StructuredAggregationQuery { + // Defines a aggregation that produces a single result. + message Aggregation { + // Count of documents that match the query. + // + // The `COUNT(*)` aggregation function operates on the entire document + // so it does not require a field reference. + message Count { + // Optional. Optional constraint on the maximum number of documents to count. + // + // This provides a way to set an upper bound on the number of documents + // to scan, limiting latency and cost. + // + // High-Level Example: + // + // ``` + // SELECT COUNT_UP_TO(1000) FROM ( SELECT * FROM k ); + // ``` + // + // Requires: + // + // * Must be greater than zero when present. + google.protobuf.Int64Value up_to = 1; + } + + // The type of aggregation to perform, required. + oneof operator { + // Count aggregator. + Count count = 1; + } + + // Required. The name of the field to store the result of the aggregation into. + // + // Requires: + // + // * Must be present. + // * Must be unique across all aggregation aliases. + // * Conform to existing [document field name][google.firestore.v1.Document.fields] limitations. + string alias = 7; + } + + // The base query to aggregate over. + oneof query_type { + // Nested structured query. + StructuredQuery structured_query = 1; + } + + // Optional. Series of aggregations to apply on top of the `structured_query`. + repeated Aggregation aggregations = 3; +} + +// A position in a query result set. +message Cursor { + // The values that represent a position, in the order they appear in + // the order by clause of a query. + // + // Can contain fewer values than specified in the order by clause. + repeated Value values = 1; + + // If the position is just before or just after the given values, relative + // to the sort order defined by the query. + bool before = 2; +} diff --git a/src/proto/proto/google/firestore/v1/write.proto b/src/proto/proto/google/firestore/v1/write.proto new file mode 100644 index 00000000..a9ac9832 --- /dev/null +++ b/src/proto/proto/google/firestore/v1/write.proto @@ -0,0 +1,264 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.v1; + +import "google/firestore/v1/common.proto"; +import "google/firestore/v1/document.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; +option java_multiple_files = true; +option java_outer_classname = "WriteProto"; +option java_package = "com.google.firestore.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\V1"; +option ruby_package = "Google::Cloud::Firestore::V1"; + +// A write on a document. +message Write { + // The operation to execute. + oneof operation { + // A document to write. + Document update = 1; + + // A document name to delete. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + string delete = 2; + + // The name of a document on which to verify the `current_document` + // precondition. + // This only requires read access to the document. + string verify = 5; + + // Applies a transformation to a document. + DocumentTransform transform = 6; + } + + // The fields to update in this write. + // + // This field can be set only when the operation is `update`. + // If the mask is not set for an `update` and the document exists, any + // existing data will be overwritten. + // If the mask is set and the document on the server has fields not covered by + // the mask, they are left unchanged. + // Fields referenced in the mask, but not present in the input document, are + // deleted from the document on the server. + // The field paths in this mask must not contain a reserved field name. + DocumentMask update_mask = 3; + + // The transforms to perform after update. + // + // This field can be set only when the operation is `update`. If present, this + // write is equivalent to performing `update` and `transform` to the same + // document atomically and in order. + repeated DocumentTransform.FieldTransform update_transforms = 7; + + // An optional precondition on the document. + // + // The write will fail if this is set and not met by the target document. + Precondition current_document = 4; +} + +// A transformation of a document. +message DocumentTransform { + // A transformation of a field of the document. + message FieldTransform { + // A value that is calculated by the server. + enum ServerValue { + // Unspecified. This value must not be used. + SERVER_VALUE_UNSPECIFIED = 0; + + // The time at which the server processed the request, with millisecond + // precision. If used on multiple fields (same or different documents) in + // a transaction, all the fields will get the same server timestamp. + REQUEST_TIME = 1; + } + + // The path of the field. See [Document.fields][google.firestore.v1.Document.fields] for the field path syntax + // reference. + string field_path = 1; + + // The transformation to apply on the field. + oneof transform_type { + // Sets the field to the given server value. + ServerValue set_to_server_value = 2; + + // Adds the given value to the field's current value. + // + // This must be an integer or a double value. + // If the field is not an integer or double, or if the field does not yet + // exist, the transformation will set the field to the given value. + // If either of the given value or the current field value are doubles, + // both values will be interpreted as doubles. Double arithmetic and + // representation of double values follow IEEE 754 semantics. + // If there is positive/negative integer overflow, the field is resolved + // to the largest magnitude positive/negative integer. + Value increment = 3; + + // Sets the field to the maximum of its current value and the given value. + // + // This must be an integer or a double value. + // If the field is not an integer or double, or if the field does not yet + // exist, the transformation will set the field to the given value. + // If a maximum operation is applied where the field and the input value + // are of mixed types (that is - one is an integer and one is a double) + // the field takes on the type of the larger operand. If the operands are + // equivalent (e.g. 3 and 3.0), the field does not change. + // 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and + // zero input value is always the stored value. + // The maximum of any numeric value x and NaN is NaN. + Value maximum = 4; + + // Sets the field to the minimum of its current value and the given value. + // + // This must be an integer or a double value. + // If the field is not an integer or double, or if the field does not yet + // exist, the transformation will set the field to the input value. + // If a minimum operation is applied where the field and the input value + // are of mixed types (that is - one is an integer and one is a double) + // the field takes on the type of the smaller operand. If the operands are + // equivalent (e.g. 3 and 3.0), the field does not change. + // 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and + // zero input value is always the stored value. + // The minimum of any numeric value x and NaN is NaN. + Value minimum = 5; + + // Append the given elements in order if they are not already present in + // the current field value. + // If the field is not an array, or if the field does not yet exist, it is + // first set to the empty array. + // + // Equivalent numbers of different types (e.g. 3L and 3.0) are + // considered equal when checking if a value is missing. + // NaN is equal to NaN, and Null is equal to Null. + // If the input contains multiple equivalent values, only the first will + // be considered. + // + // The corresponding transform_result will be the null value. + ArrayValue append_missing_elements = 6; + + // Remove all of the given elements from the array in the field. + // If the field is not an array, or if the field does not yet exist, it is + // set to the empty array. + // + // Equivalent numbers of the different types (e.g. 3L and 3.0) are + // considered equal when deciding whether an element should be removed. + // NaN is equal to NaN, and Null is equal to Null. + // This will remove all equivalent values if there are duplicates. + // + // The corresponding transform_result will be the null value. + ArrayValue remove_all_from_array = 7; + } + } + + // The name of the document to transform. + string document = 1; + + // The list of transformations to apply to the fields of the document, in + // order. + // This must not be empty. + repeated FieldTransform field_transforms = 2; +} + +// The result of applying a write. +message WriteResult { + // The last update time of the document after applying the write. Not set + // after a `delete`. + // + // If the write did not actually change the document, this will be the + // previous update_time. + google.protobuf.Timestamp update_time = 1; + + // The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1.DocumentTransform.FieldTransform], in the + // same order. + repeated Value transform_results = 2; +} + +// A [Document][google.firestore.v1.Document] has changed. +// +// May be the result of multiple [writes][google.firestore.v1.Write], including deletes, that +// ultimately resulted in a new value for the [Document][google.firestore.v1.Document]. +// +// Multiple [DocumentChange][google.firestore.v1.DocumentChange] messages may be returned for the same logical +// change, if multiple targets are affected. +message DocumentChange { + // The new state of the [Document][google.firestore.v1.Document]. + // + // If `mask` is set, contains only fields that were updated or added. + Document document = 1; + + // A set of target IDs of targets that match this document. + repeated int32 target_ids = 5; + + // A set of target IDs for targets that no longer match this document. + repeated int32 removed_target_ids = 6; +} + +// A [Document][google.firestore.v1.Document] has been deleted. +// +// May be the result of multiple [writes][google.firestore.v1.Write], including updates, the +// last of which deleted the [Document][google.firestore.v1.Document]. +// +// Multiple [DocumentDelete][google.firestore.v1.DocumentDelete] messages may be returned for the same logical +// delete, if multiple targets are affected. +message DocumentDelete { + // The resource name of the [Document][google.firestore.v1.Document] that was deleted. + string document = 1; + + // A set of target IDs for targets that previously matched this entity. + repeated int32 removed_target_ids = 6; + + // The read timestamp at which the delete was observed. + // + // Greater or equal to the `commit_time` of the delete. + google.protobuf.Timestamp read_time = 4; +} + +// A [Document][google.firestore.v1.Document] has been removed from the view of the targets. +// +// Sent if the document is no longer relevant to a target and is out of view. +// Can be sent instead of a DocumentDelete or a DocumentChange if the server +// can not send the new value of the document. +// +// Multiple [DocumentRemove][google.firestore.v1.DocumentRemove] messages may be returned for the same logical +// write or delete, if multiple targets are affected. +message DocumentRemove { + // The resource name of the [Document][google.firestore.v1.Document] that has gone out of view. + string document = 1; + + // A set of target IDs for targets that previously matched this document. + repeated int32 removed_target_ids = 2; + + // The read timestamp at which the remove was observed. + // + // Greater or equal to the `commit_time` of the change/delete/remove. + google.protobuf.Timestamp read_time = 4; +} + +// A digest of all the documents that match a given target. +message ExistenceFilter { + // The target ID to which this filter applies. + int32 target_id = 1; + + // The total count of documents that match [target_id][google.firestore.v1.ExistenceFilter.target_id]. + // + // If different from the count of documents in the client that match, the + // client must manually determine which documents no longer match the target. + int32 count = 2; +} diff --git a/src/proto/proto/google/protobuf/any.proto b/src/proto/proto/google/protobuf/any.proto new file mode 100644 index 00000000..c9be8541 --- /dev/null +++ b/src/proto/proto/google/protobuf/any.proto @@ -0,0 +1,155 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/src/proto/proto/google/protobuf/descriptor.proto b/src/proto/proto/google/protobuf/descriptor.proto new file mode 100644 index 00000000..d5d794f5 --- /dev/null +++ b/src/proto/proto/google/protobuf/descriptor.proto @@ -0,0 +1,882 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/src/proto/proto/google/protobuf/empty.proto b/src/proto/proto/google/protobuf/empty.proto new file mode 100644 index 00000000..03cacd23 --- /dev/null +++ b/src/proto/proto/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/src/proto/proto/google/protobuf/struct.proto b/src/proto/proto/google/protobuf/struct.proto new file mode 100644 index 00000000..7d7808e7 --- /dev/null +++ b/src/proto/proto/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/src/proto/proto/google/protobuf/timestamp.proto b/src/proto/proto/google/protobuf/timestamp.proto new file mode 100644 index 00000000..2b9e26a9 --- /dev/null +++ b/src/proto/proto/google/protobuf/timestamp.proto @@ -0,0 +1,137 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/src/proto/proto/google/protobuf/wrappers.proto b/src/proto/proto/google/protobuf/wrappers.proto new file mode 100644 index 00000000..9ee41e38 --- /dev/null +++ b/src/proto/proto/google/protobuf/wrappers.proto @@ -0,0 +1,123 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/src/proto/proto/google/rpc/status.proto b/src/proto/proto/google/rpc/status.proto new file mode 100644 index 00000000..3b1f7a93 --- /dev/null +++ b/src/proto/proto/google/rpc/status.proto @@ -0,0 +1,47 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/src/proto/proto/google/type/latlng.proto b/src/proto/proto/google/type/latlng.proto new file mode 100644 index 00000000..9231456e --- /dev/null +++ b/src/proto/proto/google/type/latlng.proto @@ -0,0 +1,37 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.type; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/type/latlng;latlng"; +option java_multiple_files = true; +option java_outer_classname = "LatLngProto"; +option java_package = "com.google.type"; +option objc_class_prefix = "GTP"; + +// An object that represents a latitude/longitude pair. This is expressed as a +// pair of doubles to represent degrees latitude and degrees longitude. Unless +// specified otherwise, this must conform to the +// WGS84 +// standard. Values must be within normalized ranges. +message LatLng { + // The latitude in degrees. It must be in the range [-90.0, +90.0]. + double latitude = 1; + + // The longitude in degrees. It must be in the range [-180.0, +180.0]. + double longitude = 2; +} diff --git a/src/proto/proto/protos.json b/src/proto/proto/protos.json new file mode 100644 index 00000000..2b6cac8d --- /dev/null +++ b/src/proto/proto/protos.json @@ -0,0 +1,2825 @@ +{ + "nested": { + "google": { + "nested": { + "protobuf": { + "options": { + "csharp_namespace": "Google.Protobuf.WellKnownTypes", + "go_package": "github.com/golang/protobuf/ptypes/wrappers", + "java_package": "com.google.protobuf", + "java_outer_classname": "WrappersProto", + "java_multiple_files": true, + "objc_class_prefix": "GPB", + "cc_enable_arenas": true, + "optimize_for": "SPEED" + }, + "nested": { + "Timestamp": { + "fields": { + "seconds": { + "type": "int64", + "id": 1 + }, + "nanos": { + "type": "int32", + "id": 2 + } + } + }, + "FileDescriptorSet": { + "fields": { + "file": { + "rule": "repeated", + "type": "FileDescriptorProto", + "id": 1 + } + } + }, + "FileDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "package": { + "type": "string", + "id": 2 + }, + "dependency": { + "rule": "repeated", + "type": "string", + "id": 3 + }, + "publicDependency": { + "rule": "repeated", + "type": "int32", + "id": 10, + "options": { + "packed": false + } + }, + "weakDependency": { + "rule": "repeated", + "type": "int32", + "id": 11, + "options": { + "packed": false + } + }, + "messageType": { + "rule": "repeated", + "type": "DescriptorProto", + "id": 4 + }, + "enumType": { + "rule": "repeated", + "type": "EnumDescriptorProto", + "id": 5 + }, + "service": { + "rule": "repeated", + "type": "ServiceDescriptorProto", + "id": 6 + }, + "extension": { + "rule": "repeated", + "type": "FieldDescriptorProto", + "id": 7 + }, + "options": { + "type": "FileOptions", + "id": 8 + }, + "sourceCodeInfo": { + "type": "SourceCodeInfo", + "id": 9 + }, + "syntax": { + "type": "string", + "id": 12 + } + } + }, + "DescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "field": { + "rule": "repeated", + "type": "FieldDescriptorProto", + "id": 2 + }, + "extension": { + "rule": "repeated", + "type": "FieldDescriptorProto", + "id": 6 + }, + "nestedType": { + "rule": "repeated", + "type": "DescriptorProto", + "id": 3 + }, + "enumType": { + "rule": "repeated", + "type": "EnumDescriptorProto", + "id": 4 + }, + "extensionRange": { + "rule": "repeated", + "type": "ExtensionRange", + "id": 5 + }, + "oneofDecl": { + "rule": "repeated", + "type": "OneofDescriptorProto", + "id": 8 + }, + "options": { + "type": "MessageOptions", + "id": 7 + }, + "reservedRange": { + "rule": "repeated", + "type": "ReservedRange", + "id": 9 + }, + "reservedName": { + "rule": "repeated", + "type": "string", + "id": 10 + } + }, + "nested": { + "ExtensionRange": { + "fields": { + "start": { + "type": "int32", + "id": 1 + }, + "end": { + "type": "int32", + "id": 2 + } + } + }, + "ReservedRange": { + "fields": { + "start": { + "type": "int32", + "id": 1 + }, + "end": { + "type": "int32", + "id": 2 + } + } + } + } + }, + "FieldDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "number": { + "type": "int32", + "id": 3 + }, + "label": { + "type": "Label", + "id": 4 + }, + "type": { + "type": "Type", + "id": 5 + }, + "typeName": { + "type": "string", + "id": 6 + }, + "extendee": { + "type": "string", + "id": 2 + }, + "defaultValue": { + "type": "string", + "id": 7 + }, + "oneofIndex": { + "type": "int32", + "id": 9 + }, + "jsonName": { + "type": "string", + "id": 10 + }, + "options": { + "type": "FieldOptions", + "id": 8 + } + }, + "nested": { + "Type": { + "values": { + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18 + } + }, + "Label": { + "values": { + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3 + } + } + } + }, + "OneofDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "options": { + "type": "OneofOptions", + "id": 2 + } + } + }, + "EnumDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "value": { + "rule": "repeated", + "type": "EnumValueDescriptorProto", + "id": 2 + }, + "options": { + "type": "EnumOptions", + "id": 3 + } + } + }, + "EnumValueDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "number": { + "type": "int32", + "id": 2 + }, + "options": { + "type": "EnumValueOptions", + "id": 3 + } + } + }, + "ServiceDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "method": { + "rule": "repeated", + "type": "MethodDescriptorProto", + "id": 2 + }, + "options": { + "type": "ServiceOptions", + "id": 3 + } + } + }, + "MethodDescriptorProto": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "inputType": { + "type": "string", + "id": 2 + }, + "outputType": { + "type": "string", + "id": 3 + }, + "options": { + "type": "MethodOptions", + "id": 4 + }, + "clientStreaming": { + "type": "bool", + "id": 5 + }, + "serverStreaming": { + "type": "bool", + "id": 6 + } + } + }, + "FileOptions": { + "fields": { + "javaPackage": { + "type": "string", + "id": 1 + }, + "javaOuterClassname": { + "type": "string", + "id": 8 + }, + "javaMultipleFiles": { + "type": "bool", + "id": 10 + }, + "javaGenerateEqualsAndHash": { + "type": "bool", + "id": 20, + "options": { + "deprecated": true + } + }, + "javaStringCheckUtf8": { + "type": "bool", + "id": 27 + }, + "optimizeFor": { + "type": "OptimizeMode", + "id": 9, + "options": { + "default": "SPEED" + } + }, + "goPackage": { + "type": "string", + "id": 11 + }, + "ccGenericServices": { + "type": "bool", + "id": 16 + }, + "javaGenericServices": { + "type": "bool", + "id": 17 + }, + "pyGenericServices": { + "type": "bool", + "id": 18 + }, + "deprecated": { + "type": "bool", + "id": 23 + }, + "ccEnableArenas": { + "type": "bool", + "id": 31 + }, + "objcClassPrefix": { + "type": "string", + "id": 36 + }, + "csharpNamespace": { + "type": "string", + "id": 37 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ], + "reserved": [ + [ + 38, + 38 + ] + ], + "nested": { + "OptimizeMode": { + "values": { + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3 + } + } + } + }, + "MessageOptions": { + "fields": { + "messageSetWireFormat": { + "type": "bool", + "id": 1 + }, + "noStandardDescriptorAccessor": { + "type": "bool", + "id": 2 + }, + "deprecated": { + "type": "bool", + "id": 3 + }, + "mapEntry": { + "type": "bool", + "id": 7 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ], + "reserved": [ + [ + 8, + 8 + ] + ] + }, + "FieldOptions": { + "fields": { + "ctype": { + "type": "CType", + "id": 1, + "options": { + "default": "STRING" + } + }, + "packed": { + "type": "bool", + "id": 2 + }, + "jstype": { + "type": "JSType", + "id": 6, + "options": { + "default": "JS_NORMAL" + } + }, + "lazy": { + "type": "bool", + "id": 5 + }, + "deprecated": { + "type": "bool", + "id": 3 + }, + "weak": { + "type": "bool", + "id": 10 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ], + "reserved": [ + [ + 4, + 4 + ] + ], + "nested": { + "CType": { + "values": { + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2 + } + }, + "JSType": { + "values": { + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2 + } + } + } + }, + "OneofOptions": { + "fields": { + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ] + }, + "EnumOptions": { + "fields": { + "allowAlias": { + "type": "bool", + "id": 2 + }, + "deprecated": { + "type": "bool", + "id": 3 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ] + }, + "EnumValueOptions": { + "fields": { + "deprecated": { + "type": "bool", + "id": 1 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ] + }, + "ServiceOptions": { + "fields": { + "deprecated": { + "type": "bool", + "id": 33 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ] + }, + "MethodOptions": { + "fields": { + "deprecated": { + "type": "bool", + "id": 33 + }, + "uninterpretedOption": { + "rule": "repeated", + "type": "UninterpretedOption", + "id": 999 + } + }, + "extensions": [ + [ + 1000, + 536870911 + ] + ] + }, + "UninterpretedOption": { + "fields": { + "name": { + "rule": "repeated", + "type": "NamePart", + "id": 2 + }, + "identifierValue": { + "type": "string", + "id": 3 + }, + "positiveIntValue": { + "type": "uint64", + "id": 4 + }, + "negativeIntValue": { + "type": "int64", + "id": 5 + }, + "doubleValue": { + "type": "double", + "id": 6 + }, + "stringValue": { + "type": "bytes", + "id": 7 + }, + "aggregateValue": { + "type": "string", + "id": 8 + } + }, + "nested": { + "NamePart": { + "fields": { + "namePart": { + "rule": "required", + "type": "string", + "id": 1 + }, + "isExtension": { + "rule": "required", + "type": "bool", + "id": 2 + } + } + } + } + }, + "SourceCodeInfo": { + "fields": { + "location": { + "rule": "repeated", + "type": "Location", + "id": 1 + } + }, + "nested": { + "Location": { + "fields": { + "path": { + "rule": "repeated", + "type": "int32", + "id": 1 + }, + "span": { + "rule": "repeated", + "type": "int32", + "id": 2 + }, + "leadingComments": { + "type": "string", + "id": 3 + }, + "trailingComments": { + "type": "string", + "id": 4 + }, + "leadingDetachedComments": { + "rule": "repeated", + "type": "string", + "id": 6 + } + } + } + } + }, + "GeneratedCodeInfo": { + "fields": { + "annotation": { + "rule": "repeated", + "type": "Annotation", + "id": 1 + } + }, + "nested": { + "Annotation": { + "fields": { + "path": { + "rule": "repeated", + "type": "int32", + "id": 1 + }, + "sourceFile": { + "type": "string", + "id": 2 + }, + "begin": { + "type": "int32", + "id": 3 + }, + "end": { + "type": "int32", + "id": 4 + } + } + } + } + }, + "Struct": { + "fields": { + "fields": { + "keyType": "string", + "type": "Value", + "id": 1 + } + } + }, + "Value": { + "oneofs": { + "kind": { + "oneof": [ + "nullValue", + "numberValue", + "stringValue", + "boolValue", + "structValue", + "listValue" + ] + } + }, + "fields": { + "nullValue": { + "type": "NullValue", + "id": 1 + }, + "numberValue": { + "type": "double", + "id": 2 + }, + "stringValue": { + "type": "string", + "id": 3 + }, + "boolValue": { + "type": "bool", + "id": 4 + }, + "structValue": { + "type": "Struct", + "id": 5 + }, + "listValue": { + "type": "ListValue", + "id": 6 + } + } + }, + "NullValue": { + "values": { + "NULL_VALUE": 0 + } + }, + "ListValue": { + "fields": { + "values": { + "rule": "repeated", + "type": "Value", + "id": 1 + } + } + }, + "Empty": { + "fields": {} + }, + "DoubleValue": { + "fields": { + "value": { + "type": "double", + "id": 1 + } + } + }, + "FloatValue": { + "fields": { + "value": { + "type": "float", + "id": 1 + } + } + }, + "Int64Value": { + "fields": { + "value": { + "type": "int64", + "id": 1 + } + } + }, + "UInt64Value": { + "fields": { + "value": { + "type": "uint64", + "id": 1 + } + } + }, + "Int32Value": { + "fields": { + "value": { + "type": "int32", + "id": 1 + } + } + }, + "UInt32Value": { + "fields": { + "value": { + "type": "uint32", + "id": 1 + } + } + }, + "BoolValue": { + "fields": { + "value": { + "type": "bool", + "id": 1 + } + } + }, + "StringValue": { + "fields": { + "value": { + "type": "string", + "id": 1 + } + } + }, + "BytesValue": { + "fields": { + "value": { + "type": "bytes", + "id": 1 + } + } + }, + "Any": { + "fields": { + "typeUrl": { + "type": "string", + "id": 1 + }, + "value": { + "type": "bytes", + "id": 2 + } + } + } + } + }, + "firestore": { + "nested": { + "v1": { + "options": { + "csharp_namespace": "Google.Cloud.Firestore.V1", + "go_package": "google.golang.org/genproto/googleapis/firestore/v1;firestore", + "java_multiple_files": true, + "java_outer_classname": "WriteProto", + "java_package": "com.google.firestore.v1", + "objc_class_prefix": "GCFS", + "php_namespace": "Google\\Cloud\\Firestore\\V1", + "ruby_package": "Google::Cloud::Firestore::V1" + }, + "nested": { + "AggregationResult": { + "fields": { + "aggregateFields": { + "keyType": "string", + "type": "Value", + "id": 2 + } + } + }, + "DocumentMask": { + "fields": { + "fieldPaths": { + "rule": "repeated", + "type": "string", + "id": 1 + } + } + }, + "Precondition": { + "oneofs": { + "conditionType": { + "oneof": [ + "exists", + "updateTime" + ] + } + }, + "fields": { + "exists": { + "type": "bool", + "id": 1 + }, + "updateTime": { + "type": "google.protobuf.Timestamp", + "id": 2 + } + } + }, + "TransactionOptions": { + "oneofs": { + "mode": { + "oneof": [ + "readOnly", + "readWrite" + ] + } + }, + "fields": { + "readOnly": { + "type": "ReadOnly", + "id": 2 + }, + "readWrite": { + "type": "ReadWrite", + "id": 3 + } + }, + "nested": { + "ReadWrite": { + "fields": { + "retryTransaction": { + "type": "bytes", + "id": 1 + } + } + }, + "ReadOnly": { + "oneofs": { + "consistencySelector": { + "oneof": [ + "readTime" + ] + } + }, + "fields": { + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 2 + } + } + } + } + }, + "Document": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "fields": { + "keyType": "string", + "type": "Value", + "id": 2 + }, + "createTime": { + "type": "google.protobuf.Timestamp", + "id": 3 + }, + "updateTime": { + "type": "google.protobuf.Timestamp", + "id": 4 + } + } + }, + "Value": { + "oneofs": { + "valueType": { + "oneof": [ + "nullValue", + "booleanValue", + "integerValue", + "doubleValue", + "timestampValue", + "stringValue", + "bytesValue", + "referenceValue", + "geoPointValue", + "arrayValue", + "mapValue" + ] + } + }, + "fields": { + "nullValue": { + "type": "google.protobuf.NullValue", + "id": 11 + }, + "booleanValue": { + "type": "bool", + "id": 1 + }, + "integerValue": { + "type": "int64", + "id": 2 + }, + "doubleValue": { + "type": "double", + "id": 3 + }, + "timestampValue": { + "type": "google.protobuf.Timestamp", + "id": 10 + }, + "stringValue": { + "type": "string", + "id": 17 + }, + "bytesValue": { + "type": "bytes", + "id": 18 + }, + "referenceValue": { + "type": "string", + "id": 5 + }, + "geoPointValue": { + "type": "google.type.LatLng", + "id": 8 + }, + "arrayValue": { + "type": "ArrayValue", + "id": 9 + }, + "mapValue": { + "type": "MapValue", + "id": 6 + } + } + }, + "ArrayValue": { + "fields": { + "values": { + "rule": "repeated", + "type": "Value", + "id": 1 + } + } + }, + "MapValue": { + "fields": { + "fields": { + "keyType": "string", + "type": "Value", + "id": 1 + } + } + }, + "Firestore": { + "options": { + "(google.api.default_host)": "firestore.googleapis.com", + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore" + }, + "methods": { + "GetDocument": { + "requestType": "GetDocumentRequest", + "responseType": "Document", + "options": { + "(google.api.http).get": "/v1/{name=projects/*/databases/*/documents/*/**}" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "get": "/v1/{name=projects/*/databases/*/documents/*/**}" + } + } + ] + }, + "ListDocuments": { + "requestType": "ListDocumentsRequest", + "responseType": "ListDocumentsResponse", + "options": { + "(google.api.http).get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" + } + } + ] + }, + "UpdateDocument": { + "requestType": "UpdateDocumentRequest", + "responseType": "Document", + "options": { + "(google.api.http).patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}", + "(google.api.http).body": "document", + "(google.api.method_signature)": "document,update_mask" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}", + "body": "document" + } + }, + { + "(google.api.method_signature)": "document,update_mask" + } + ] + }, + "DeleteDocument": { + "requestType": "DeleteDocumentRequest", + "responseType": "google.protobuf.Empty", + "options": { + "(google.api.http).delete": "/v1/{name=projects/*/databases/*/documents/*/**}", + "(google.api.method_signature)": "name" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "delete": "/v1/{name=projects/*/databases/*/documents/*/**}" + } + }, + { + "(google.api.method_signature)": "name" + } + ] + }, + "BatchGetDocuments": { + "requestType": "BatchGetDocumentsRequest", + "responseType": "BatchGetDocumentsResponse", + "responseStream": true, + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchGet", + "(google.api.http).body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:batchGet", + "body": "*" + } + } + ] + }, + "BeginTransaction": { + "requestType": "BeginTransactionRequest", + "responseType": "BeginTransactionResponse", + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction", + "(google.api.http).body": "*", + "(google.api.method_signature)": "database" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction", + "body": "*" + } + }, + { + "(google.api.method_signature)": "database" + } + ] + }, + "Commit": { + "requestType": "CommitRequest", + "responseType": "CommitResponse", + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:commit", + "(google.api.http).body": "*", + "(google.api.method_signature)": "database,writes" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:commit", + "body": "*" + } + }, + { + "(google.api.method_signature)": "database,writes" + } + ] + }, + "Rollback": { + "requestType": "RollbackRequest", + "responseType": "google.protobuf.Empty", + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:rollback", + "(google.api.http).body": "*", + "(google.api.method_signature)": "database,transaction" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:rollback", + "body": "*" + } + }, + { + "(google.api.method_signature)": "database,transaction" + } + ] + }, + "RunQuery": { + "requestType": "RunQueryRequest", + "responseType": "RunQueryResponse", + "responseStream": true, + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runQuery", + "(google.api.http).body": "*", + "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery", + "(google.api.http).additional_bindings.body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/databases/*/documents}:runQuery", + "body": "*", + "additional_bindings": { + "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery", + "body": "*" + } + } + } + ] + }, + "RunAggregationQuery": { + "requestType": "RunAggregationQueryRequest", + "responseType": "RunAggregationQueryResponse", + "responseStream": true, + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery", + "(google.api.http).body": "*", + "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery", + "(google.api.http).additional_bindings.body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery", + "body": "*", + "additional_bindings": { + "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery", + "body": "*" + } + } + } + ] + }, + "PartitionQuery": { + "requestType": "PartitionQueryRequest", + "responseType": "PartitionQueryResponse", + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery", + "(google.api.http).body": "*", + "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery", + "(google.api.http).additional_bindings.body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery", + "body": "*", + "additional_bindings": { + "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery", + "body": "*" + } + } + } + ] + }, + "Write": { + "requestType": "WriteRequest", + "requestStream": true, + "responseType": "WriteResponse", + "responseStream": true, + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:write", + "(google.api.http).body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:write", + "body": "*" + } + } + ] + }, + "Listen": { + "requestType": "ListenRequest", + "requestStream": true, + "responseType": "ListenResponse", + "responseStream": true, + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:listen", + "(google.api.http).body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:listen", + "body": "*" + } + } + ] + }, + "ListCollectionIds": { + "requestType": "ListCollectionIdsRequest", + "responseType": "ListCollectionIdsResponse", + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds", + "(google.api.http).body": "*", + "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds", + "(google.api.http).additional_bindings.body": "*", + "(google.api.method_signature)": "parent" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds", + "body": "*", + "additional_bindings": { + "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds", + "body": "*" + } + } + }, + { + "(google.api.method_signature)": "parent" + } + ] + }, + "BatchWrite": { + "requestType": "BatchWriteRequest", + "responseType": "BatchWriteResponse", + "options": { + "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchWrite", + "(google.api.http).body": "*" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{database=projects/*/databases/*}/documents:batchWrite", + "body": "*" + } + } + ] + }, + "CreateDocument": { + "requestType": "CreateDocumentRequest", + "responseType": "Document", + "options": { + "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}", + "(google.api.http).body": "document" + }, + "parsedOptions": [ + { + "(google.api.http)": { + "post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}", + "body": "document" + } + } + ] + } + } + }, + "GetDocumentRequest": { + "oneofs": { + "consistencySelector": { + "oneof": [ + "transaction", + "readTime" + ] + } + }, + "fields": { + "name": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "mask": { + "type": "DocumentMask", + "id": 2 + }, + "transaction": { + "type": "bytes", + "id": 3 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 5 + } + } + }, + "ListDocumentsRequest": { + "oneofs": { + "consistencySelector": { + "oneof": [ + "transaction", + "readTime" + ] + } + }, + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "collectionId": { + "type": "string", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "pageSize": { + "type": "int32", + "id": 3 + }, + "pageToken": { + "type": "string", + "id": 4 + }, + "orderBy": { + "type": "string", + "id": 6 + }, + "mask": { + "type": "DocumentMask", + "id": 7 + }, + "transaction": { + "type": "bytes", + "id": 8 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 10 + }, + "showMissing": { + "type": "bool", + "id": 12 + } + } + }, + "ListDocumentsResponse": { + "fields": { + "documents": { + "rule": "repeated", + "type": "Document", + "id": 1 + }, + "nextPageToken": { + "type": "string", + "id": 2 + } + } + }, + "CreateDocumentRequest": { + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "collectionId": { + "type": "string", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "documentId": { + "type": "string", + "id": 3 + }, + "document": { + "type": "Document", + "id": 4, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "mask": { + "type": "DocumentMask", + "id": 5 + } + } + }, + "UpdateDocumentRequest": { + "fields": { + "document": { + "type": "Document", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "updateMask": { + "type": "DocumentMask", + "id": 2 + }, + "mask": { + "type": "DocumentMask", + "id": 3 + }, + "currentDocument": { + "type": "Precondition", + "id": 4 + } + } + }, + "DeleteDocumentRequest": { + "fields": { + "name": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "currentDocument": { + "type": "Precondition", + "id": 2 + } + } + }, + "BatchGetDocumentsRequest": { + "oneofs": { + "consistencySelector": { + "oneof": [ + "transaction", + "newTransaction", + "readTime" + ] + } + }, + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "documents": { + "rule": "repeated", + "type": "string", + "id": 2 + }, + "mask": { + "type": "DocumentMask", + "id": 3 + }, + "transaction": { + "type": "bytes", + "id": 4 + }, + "newTransaction": { + "type": "TransactionOptions", + "id": 5 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 7 + } + } + }, + "BatchGetDocumentsResponse": { + "oneofs": { + "result": { + "oneof": [ + "found", + "missing" + ] + } + }, + "fields": { + "found": { + "type": "Document", + "id": 1 + }, + "missing": { + "type": "string", + "id": 2 + }, + "transaction": { + "type": "bytes", + "id": 3 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 4 + } + } + }, + "BeginTransactionRequest": { + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "options": { + "type": "TransactionOptions", + "id": 2 + } + } + }, + "BeginTransactionResponse": { + "fields": { + "transaction": { + "type": "bytes", + "id": 1 + } + } + }, + "CommitRequest": { + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "writes": { + "rule": "repeated", + "type": "Write", + "id": 2 + }, + "transaction": { + "type": "bytes", + "id": 3 + } + } + }, + "CommitResponse": { + "fields": { + "writeResults": { + "rule": "repeated", + "type": "WriteResult", + "id": 1 + }, + "commitTime": { + "type": "google.protobuf.Timestamp", + "id": 2 + } + } + }, + "RollbackRequest": { + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "transaction": { + "type": "bytes", + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + } + } + }, + "RunQueryRequest": { + "oneofs": { + "queryType": { + "oneof": [ + "structuredQuery" + ] + }, + "consistencySelector": { + "oneof": [ + "transaction", + "newTransaction", + "readTime" + ] + } + }, + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "structuredQuery": { + "type": "StructuredQuery", + "id": 2 + }, + "transaction": { + "type": "bytes", + "id": 5 + }, + "newTransaction": { + "type": "TransactionOptions", + "id": 6 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 7 + } + } + }, + "RunQueryResponse": { + "fields": { + "transaction": { + "type": "bytes", + "id": 2 + }, + "document": { + "type": "Document", + "id": 1 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 3 + }, + "skippedResults": { + "type": "int32", + "id": 4 + } + } + }, + "RunAggregationQueryRequest": { + "oneofs": { + "queryType": { + "oneof": [ + "structuredAggregationQuery" + ] + }, + "consistencySelector": { + "oneof": [ + "transaction", + "newTransaction", + "readTime" + ] + } + }, + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "structuredAggregationQuery": { + "type": "StructuredAggregationQuery", + "id": 2 + }, + "transaction": { + "type": "bytes", + "id": 4 + }, + "newTransaction": { + "type": "TransactionOptions", + "id": 5 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 6 + } + } + }, + "RunAggregationQueryResponse": { + "fields": { + "result": { + "type": "AggregationResult", + "id": 1 + }, + "transaction": { + "type": "bytes", + "id": 2 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 3 + } + } + }, + "PartitionQueryRequest": { + "oneofs": { + "queryType": { + "oneof": [ + "structuredQuery" + ] + } + }, + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "structuredQuery": { + "type": "StructuredQuery", + "id": 2 + }, + "partitionCount": { + "type": "int64", + "id": 3 + }, + "pageToken": { + "type": "string", + "id": 4 + }, + "pageSize": { + "type": "int32", + "id": 5 + } + } + }, + "PartitionQueryResponse": { + "fields": { + "partitions": { + "rule": "repeated", + "type": "Cursor", + "id": 1 + }, + "nextPageToken": { + "type": "string", + "id": 2 + } + } + }, + "WriteRequest": { + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "streamId": { + "type": "string", + "id": 2 + }, + "writes": { + "rule": "repeated", + "type": "Write", + "id": 3 + }, + "streamToken": { + "type": "bytes", + "id": 4 + }, + "labels": { + "keyType": "string", + "type": "string", + "id": 5 + } + } + }, + "WriteResponse": { + "fields": { + "streamId": { + "type": "string", + "id": 1 + }, + "streamToken": { + "type": "bytes", + "id": 2 + }, + "writeResults": { + "rule": "repeated", + "type": "WriteResult", + "id": 3 + }, + "commitTime": { + "type": "google.protobuf.Timestamp", + "id": 4 + } + } + }, + "ListenRequest": { + "oneofs": { + "targetChange": { + "oneof": [ + "addTarget", + "removeTarget" + ] + } + }, + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "addTarget": { + "type": "Target", + "id": 2 + }, + "removeTarget": { + "type": "int32", + "id": 3 + }, + "labels": { + "keyType": "string", + "type": "string", + "id": 4 + } + } + }, + "ListenResponse": { + "oneofs": { + "responseType": { + "oneof": [ + "targetChange", + "documentChange", + "documentDelete", + "documentRemove", + "filter" + ] + } + }, + "fields": { + "targetChange": { + "type": "TargetChange", + "id": 2 + }, + "documentChange": { + "type": "DocumentChange", + "id": 3 + }, + "documentDelete": { + "type": "DocumentDelete", + "id": 4 + }, + "documentRemove": { + "type": "DocumentRemove", + "id": 6 + }, + "filter": { + "type": "ExistenceFilter", + "id": 5 + } + } + }, + "Target": { + "oneofs": { + "targetType": { + "oneof": [ + "query", + "documents" + ] + }, + "resumeType": { + "oneof": [ + "resumeToken", + "readTime" + ] + } + }, + "fields": { + "query": { + "type": "QueryTarget", + "id": 2 + }, + "documents": { + "type": "DocumentsTarget", + "id": 3 + }, + "resumeToken": { + "type": "bytes", + "id": 4 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 11 + }, + "targetId": { + "type": "int32", + "id": 5 + }, + "once": { + "type": "bool", + "id": 6 + } + }, + "nested": { + "DocumentsTarget": { + "fields": { + "documents": { + "rule": "repeated", + "type": "string", + "id": 2 + } + } + }, + "QueryTarget": { + "oneofs": { + "queryType": { + "oneof": [ + "structuredQuery" + ] + } + }, + "fields": { + "parent": { + "type": "string", + "id": 1 + }, + "structuredQuery": { + "type": "StructuredQuery", + "id": 2 + } + } + } + } + }, + "TargetChange": { + "fields": { + "targetChangeType": { + "type": "TargetChangeType", + "id": 1 + }, + "targetIds": { + "rule": "repeated", + "type": "int32", + "id": 2 + }, + "cause": { + "type": "google.rpc.Status", + "id": 3 + }, + "resumeToken": { + "type": "bytes", + "id": 4 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 6 + } + }, + "nested": { + "TargetChangeType": { + "values": { + "NO_CHANGE": 0, + "ADD": 1, + "REMOVE": 2, + "CURRENT": 3, + "RESET": 4 + } + } + } + }, + "ListCollectionIdsRequest": { + "fields": { + "parent": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "pageSize": { + "type": "int32", + "id": 2 + }, + "pageToken": { + "type": "string", + "id": 3 + } + } + }, + "ListCollectionIdsResponse": { + "fields": { + "collectionIds": { + "rule": "repeated", + "type": "string", + "id": 1 + }, + "nextPageToken": { + "type": "string", + "id": 2 + } + } + }, + "BatchWriteRequest": { + "fields": { + "database": { + "type": "string", + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } + }, + "writes": { + "rule": "repeated", + "type": "Write", + "id": 2 + }, + "labels": { + "keyType": "string", + "type": "string", + "id": 3 + } + } + }, + "BatchWriteResponse": { + "fields": { + "writeResults": { + "rule": "repeated", + "type": "WriteResult", + "id": 1 + }, + "status": { + "rule": "repeated", + "type": "google.rpc.Status", + "id": 2 + } + } + }, + "StructuredQuery": { + "fields": { + "select": { + "type": "Projection", + "id": 1 + }, + "from": { + "rule": "repeated", + "type": "CollectionSelector", + "id": 2 + }, + "where": { + "type": "Filter", + "id": 3 + }, + "orderBy": { + "rule": "repeated", + "type": "Order", + "id": 4 + }, + "startAt": { + "type": "Cursor", + "id": 7 + }, + "endAt": { + "type": "Cursor", + "id": 8 + }, + "offset": { + "type": "int32", + "id": 6 + }, + "limit": { + "type": "google.protobuf.Int32Value", + "id": 5 + } + }, + "nested": { + "CollectionSelector": { + "fields": { + "collectionId": { + "type": "string", + "id": 2 + }, + "allDescendants": { + "type": "bool", + "id": 3 + } + } + }, + "Filter": { + "oneofs": { + "filterType": { + "oneof": [ + "compositeFilter", + "fieldFilter", + "unaryFilter" + ] + } + }, + "fields": { + "compositeFilter": { + "type": "CompositeFilter", + "id": 1 + }, + "fieldFilter": { + "type": "FieldFilter", + "id": 2 + }, + "unaryFilter": { + "type": "UnaryFilter", + "id": 3 + } + } + }, + "CompositeFilter": { + "fields": { + "op": { + "type": "Operator", + "id": 1 + }, + "filters": { + "rule": "repeated", + "type": "Filter", + "id": 2 + } + }, + "nested": { + "Operator": { + "values": { + "OPERATOR_UNSPECIFIED": 0, + "AND": 1, + "OR": 2 + } + } + } + }, + "FieldFilter": { + "fields": { + "field": { + "type": "FieldReference", + "id": 1 + }, + "op": { + "type": "Operator", + "id": 2 + }, + "value": { + "type": "Value", + "id": 3 + } + }, + "nested": { + "Operator": { + "values": { + "OPERATOR_UNSPECIFIED": 0, + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "NOT_EQUAL": 6, + "ARRAY_CONTAINS": 7, + "IN": 8, + "ARRAY_CONTAINS_ANY": 9, + "NOT_IN": 10 + } + } + } + }, + "UnaryFilter": { + "oneofs": { + "operandType": { + "oneof": [ + "field" + ] + } + }, + "fields": { + "op": { + "type": "Operator", + "id": 1 + }, + "field": { + "type": "FieldReference", + "id": 2 + } + }, + "nested": { + "Operator": { + "values": { + "OPERATOR_UNSPECIFIED": 0, + "IS_NAN": 2, + "IS_NULL": 3, + "IS_NOT_NAN": 4, + "IS_NOT_NULL": 5 + } + } + } + }, + "Order": { + "fields": { + "field": { + "type": "FieldReference", + "id": 1 + }, + "direction": { + "type": "Direction", + "id": 2 + } + } + }, + "FieldReference": { + "fields": { + "fieldPath": { + "type": "string", + "id": 2 + } + } + }, + "Projection": { + "fields": { + "fields": { + "rule": "repeated", + "type": "FieldReference", + "id": 2 + } + } + }, + "Direction": { + "values": { + "DIRECTION_UNSPECIFIED": 0, + "ASCENDING": 1, + "DESCENDING": 2 + } + } + } + }, + "StructuredAggregationQuery": { + "oneofs": { + "queryType": { + "oneof": [ + "structuredQuery" + ] + } + }, + "fields": { + "structuredQuery": { + "type": "StructuredQuery", + "id": 1 + }, + "aggregations": { + "rule": "repeated", + "type": "Aggregation", + "id": 3 + } + }, + "nested": { + "Aggregation": { + "oneofs": { + "operator": { + "oneof": [ + "count" + ] + } + }, + "fields": { + "count": { + "type": "Count", + "id": 1 + }, + "alias": { + "type": "string", + "id": 7 + } + }, + "nested": { + "Count": { + "fields": { + "upTo": { + "type": "google.protobuf.Int64Value", + "id": 1 + } + } + } + } + } + } + }, + "Cursor": { + "fields": { + "values": { + "rule": "repeated", + "type": "Value", + "id": 1 + }, + "before": { + "type": "bool", + "id": 2 + } + } + }, + "Write": { + "oneofs": { + "operation": { + "oneof": [ + "update", + "delete", + "verify", + "transform" + ] + } + }, + "fields": { + "update": { + "type": "Document", + "id": 1 + }, + "delete": { + "type": "string", + "id": 2 + }, + "verify": { + "type": "string", + "id": 5 + }, + "transform": { + "type": "DocumentTransform", + "id": 6 + }, + "updateMask": { + "type": "DocumentMask", + "id": 3 + }, + "updateTransforms": { + "rule": "repeated", + "type": "DocumentTransform.FieldTransform", + "id": 7 + }, + "currentDocument": { + "type": "Precondition", + "id": 4 + } + } + }, + "DocumentTransform": { + "fields": { + "document": { + "type": "string", + "id": 1 + }, + "fieldTransforms": { + "rule": "repeated", + "type": "FieldTransform", + "id": 2 + } + }, + "nested": { + "FieldTransform": { + "oneofs": { + "transformType": { + "oneof": [ + "setToServerValue", + "increment", + "maximum", + "minimum", + "appendMissingElements", + "removeAllFromArray" + ] + } + }, + "fields": { + "fieldPath": { + "type": "string", + "id": 1 + }, + "setToServerValue": { + "type": "ServerValue", + "id": 2 + }, + "increment": { + "type": "Value", + "id": 3 + }, + "maximum": { + "type": "Value", + "id": 4 + }, + "minimum": { + "type": "Value", + "id": 5 + }, + "appendMissingElements": { + "type": "ArrayValue", + "id": 6 + }, + "removeAllFromArray": { + "type": "ArrayValue", + "id": 7 + } + }, + "nested": { + "ServerValue": { + "values": { + "SERVER_VALUE_UNSPECIFIED": 0, + "REQUEST_TIME": 1 + } + } + } + } + } + }, + "WriteResult": { + "fields": { + "updateTime": { + "type": "google.protobuf.Timestamp", + "id": 1 + }, + "transformResults": { + "rule": "repeated", + "type": "Value", + "id": 2 + } + } + }, + "DocumentChange": { + "fields": { + "document": { + "type": "Document", + "id": 1 + }, + "targetIds": { + "rule": "repeated", + "type": "int32", + "id": 5 + }, + "removedTargetIds": { + "rule": "repeated", + "type": "int32", + "id": 6 + } + } + }, + "DocumentDelete": { + "fields": { + "document": { + "type": "string", + "id": 1 + }, + "removedTargetIds": { + "rule": "repeated", + "type": "int32", + "id": 6 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 4 + } + } + }, + "DocumentRemove": { + "fields": { + "document": { + "type": "string", + "id": 1 + }, + "removedTargetIds": { + "rule": "repeated", + "type": "int32", + "id": 2 + }, + "readTime": { + "type": "google.protobuf.Timestamp", + "id": 4 + } + } + }, + "ExistenceFilter": { + "fields": { + "targetId": { + "type": "int32", + "id": 1 + }, + "count": { + "type": "int32", + "id": 2 + } + } + } + } + } + } + }, + "api": { + "options": { + "go_package": "google.golang.org/genproto/googleapis/api/annotations;annotations", + "java_multiple_files": true, + "java_outer_classname": "HttpProto", + "java_package": "com.google.api", + "objc_class_prefix": "GAPI", + "cc_enable_arenas": true + }, + "nested": { + "http": { + "type": "HttpRule", + "id": 72295728, + "extend": "google.protobuf.MethodOptions" + }, + "Http": { + "fields": { + "rules": { + "rule": "repeated", + "type": "HttpRule", + "id": 1 + } + } + }, + "HttpRule": { + "oneofs": { + "pattern": { + "oneof": [ + "get", + "put", + "post", + "delete", + "patch", + "custom" + ] + } + }, + "fields": { + "get": { + "type": "string", + "id": 2 + }, + "put": { + "type": "string", + "id": 3 + }, + "post": { + "type": "string", + "id": 4 + }, + "delete": { + "type": "string", + "id": 5 + }, + "patch": { + "type": "string", + "id": 6 + }, + "custom": { + "type": "CustomHttpPattern", + "id": 8 + }, + "selector": { + "type": "string", + "id": 1 + }, + "body": { + "type": "string", + "id": 7 + }, + "additionalBindings": { + "rule": "repeated", + "type": "HttpRule", + "id": 11 + } + } + }, + "CustomHttpPattern": { + "fields": { + "kind": { + "type": "string", + "id": 1 + }, + "path": { + "type": "string", + "id": 2 + } + } + }, + "methodSignature": { + "rule": "repeated", + "type": "string", + "id": 1051, + "extend": "google.protobuf.MethodOptions" + }, + "defaultHost": { + "type": "string", + "id": 1049, + "extend": "google.protobuf.ServiceOptions" + }, + "oauthScopes": { + "type": "string", + "id": 1050, + "extend": "google.protobuf.ServiceOptions" + }, + "fieldBehavior": { + "rule": "repeated", + "type": "google.api.FieldBehavior", + "id": 1052, + "extend": "google.protobuf.FieldOptions" + }, + "FieldBehavior": { + "values": { + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7 + } + } + } + }, + "type": { + "options": { + "cc_enable_arenas": true, + "go_package": "google.golang.org/genproto/googleapis/type/latlng;latlng", + "java_multiple_files": true, + "java_outer_classname": "LatLngProto", + "java_package": "com.google.type", + "objc_class_prefix": "GTP" + }, + "nested": { + "LatLng": { + "fields": { + "latitude": { + "type": "double", + "id": 1 + }, + "longitude": { + "type": "double", + "id": 2 + } + } + } + } + }, + "rpc": { + "options": { + "cc_enable_arenas": true, + "go_package": "google.golang.org/genproto/googleapis/rpc/status;status", + "java_multiple_files": true, + "java_outer_classname": "StatusProto", + "java_package": "com.google.rpc", + "objc_class_prefix": "RPC" + }, + "nested": { + "Status": { + "fields": { + "code": { + "type": "int32", + "id": 1 + }, + "message": { + "type": "string", + "id": 2 + }, + "details": { + "rule": "repeated", + "type": "google.protobuf.Any", + "id": 3 + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/src/proto/proto/update.sh b/src/proto/proto/update.sh new file mode 100755 index 00000000..9f2ea2bc --- /dev/null +++ b/src/proto/proto/update.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail +IFS=$'\n\t' + +# Variables +PROTOS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +WORK_DIR=`mktemp -d` +PBJS="$(npm bin)/pbjs" + +# deletes the temp directory on exit +function cleanup { + rm -rf "$WORK_DIR" + echo "Deleted temp working directory $WORK_DIR" +} + +# register the cleanup function to be called on the EXIT signal +trap cleanup EXIT + +# Enter work dir +pushd "$WORK_DIR" + +# Clone necessary git repos. +git clone --depth 1 https://github.com/googleapis/googleapis.git +git clone --depth 1 https://github.com/google/protobuf.git + +# Copy necessary protos. +mkdir -p "${PROTOS_DIR}/google/api" +cp googleapis/google/api/{annotations.proto,http.proto,client.proto,field_behavior.proto} \ + "${PROTOS_DIR}/google/api/" + +mkdir -p "${PROTOS_DIR}/google/firestore/v1" +cp googleapis/google/firestore/v1/*.proto \ + "${PROTOS_DIR}/google/firestore/v1/" + +mkdir -p "${PROTOS_DIR}/google/rpc" +cp googleapis/google/rpc/status.proto \ + "${PROTOS_DIR}/google/rpc/" + +mkdir -p "${PROTOS_DIR}/google/type" +cp googleapis/google/type/latlng.proto \ + "${PROTOS_DIR}/google/type/" + +# Hack in `verify` support +ex "${PROTOS_DIR}/google/firestore/v1/write.proto" < Date: Sun, 29 Jan 2023 17:01:14 +0800 Subject: [PATCH 2/9] feat: add admin --- .../google/firestore/admin/v1/database.proto | 129 +++++ .../google/firestore/admin/v1/field.proto | 136 ++++++ .../firestore/admin/v1/firestore_admin.proto | 457 ++++++++++++++++++ .../firestore_admin_grpc_service_config.json | 61 +++ .../firestore/admin/v1/firestore_gapic.yaml | 5 + .../firestore/admin/v1/firestore_v1.yaml | 75 +++ .../google/firestore/admin/v1/index.proto | 156 ++++++ .../google/firestore/admin/v1/location.proto | 31 ++ .../google/firestore/admin/v1/operation.proto | 223 +++++++++ .../admin/v1beta1/firestore_admin.proto | 370 ++++++++++++++ .../firestore/admin/v1beta1/index.proto | 101 ++++ .../firestore/admin/v1beta1/location.proto | 33 ++ .../firestore/admin/v1beta2/field.proto | 92 ++++ .../admin/v1beta2/firestore_admin.proto | 278 +++++++++++ .../firestore/admin/v1beta2/index.proto | 150 ++++++ .../firestore/admin/v1beta2/operation.proto | 202 ++++++++ src/proto/proto/update.sh | 5 + 17 files changed, 2504 insertions(+) create mode 100644 src/proto/proto/google/firestore/admin/v1/database.proto create mode 100644 src/proto/proto/google/firestore/admin/v1/field.proto create mode 100644 src/proto/proto/google/firestore/admin/v1/firestore_admin.proto create mode 100755 src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json create mode 100644 src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml create mode 100644 src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml create mode 100644 src/proto/proto/google/firestore/admin/v1/index.proto create mode 100644 src/proto/proto/google/firestore/admin/v1/location.proto create mode 100644 src/proto/proto/google/firestore/admin/v1/operation.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta1/index.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta1/location.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta2/field.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta2/index.proto create mode 100644 src/proto/proto/google/firestore/admin/v1beta2/operation.proto diff --git a/src/proto/proto/google/firestore/admin/v1/database.proto b/src/proto/proto/google/firestore/admin/v1/database.proto new file mode 100644 index 00000000..3f242c3e --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/database.proto @@ -0,0 +1,129 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.admin.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; +option java_multiple_files = true; +option java_outer_classname = "DatabaseProto"; +option java_package = "com.google.firestore.admin.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; +option ruby_package = "Google::Cloud::Firestore::Admin::V1"; + +// A Cloud Firestore Database. +// Currently only one database is allowed per cloud project; this database +// must have a `database_id` of '(default)'. +message Database { + option (google.api.resource) = { + type: "firestore.googleapis.com/Database" + pattern: "projects/{project}/databases/{database}" + style: DECLARATIVE_FRIENDLY + }; + + // The type of the database. + // See https://cloud.google.com/datastore/docs/firestore-or-datastore for + // information about how to choose. + // + // Mode changes are only allowed if the database is empty. + enum DatabaseType { + // The default value. This value is used if the database type is omitted. + DATABASE_TYPE_UNSPECIFIED = 0; + + // Firestore Native Mode + FIRESTORE_NATIVE = 1; + + // Firestore in Datastore Mode. + DATASTORE_MODE = 2; + } + + // The type of concurrency control mode for transactions. + enum ConcurrencyMode { + // Not used. + CONCURRENCY_MODE_UNSPECIFIED = 0; + + // Use optimistic concurrency control by default. This mode is available + // for Cloud Firestore databases. + OPTIMISTIC = 1; + + // Use pessimistic concurrency control by default. This mode is available + // for Cloud Firestore databases. + // + // This is the default setting for Cloud Firestore. + PESSIMISTIC = 2; + + // Use optimistic concurrency control with entity groups by default. + // + // This is the only available mode for Cloud Datastore. + // + // This mode is also available for Cloud Firestore with Datastore Mode but + // is not recommended. + OPTIMISTIC_WITH_ENTITY_GROUPS = 3; + } + + // The type of App Engine integration mode. + enum AppEngineIntegrationMode { + // Not used. + APP_ENGINE_INTEGRATION_MODE_UNSPECIFIED = 0; + + // If an App Engine application exists in the same region as this database, + // App Engine configuration will impact this database. This includes + // disabling of the application & database, as well as disabling writes to + // the database. + ENABLED = 1; + + // Appengine has no affect on the ability of this database to serve + // requests. + DISABLED = 2; + } + + // The resource name of the Database. + // Format: `projects/{project}/databases/{database}` + string name = 1; + + // The location of the database. Available databases are listed at + // https://cloud.google.com/firestore/docs/locations. + string location_id = 9; + + // The type of the database. + // See https://cloud.google.com/datastore/docs/firestore-or-datastore for + // information about how to choose. + DatabaseType type = 10; + + // The concurrency control mode to use for this database. + ConcurrencyMode concurrency_mode = 15; + + // The App Engine integration mode to use for this database. + AppEngineIntegrationMode app_engine_integration_mode = 19; + + // Output only. The key_prefix for this database. This key_prefix is used, in combination + // with the project id ("~") to construct the + // application id that is returned from the Cloud Datastore APIs in Google App + // Engine first generation runtimes. + // + // This value may be empty in which case the appid to use for URL-encoded keys + // is the project_id (eg: foo instead of v~foo). + string key_prefix = 20 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // This checksum is computed by the server based on the value of other + // fields, and may be sent on update and delete requests to ensure the + // client has an up-to-date value before proceeding. + string etag = 99; +} diff --git a/src/proto/proto/google/firestore/admin/v1/field.proto b/src/proto/proto/google/firestore/admin/v1/field.proto new file mode 100644 index 00000000..0bbb11d8 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/field.proto @@ -0,0 +1,136 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.admin.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/firestore/admin/v1/index.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; +option java_multiple_files = true; +option java_outer_classname = "FieldProto"; +option java_package = "com.google.firestore.admin.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; +option ruby_package = "Google::Cloud::Firestore::Admin::V1"; + +// Represents a single field in the database. +// +// Fields are grouped by their "Collection Group", which represent all +// collections in the database with the same id. +message Field { + option (google.api.resource) = { + type: "firestore.googleapis.com/Field" + pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}" + }; + + // The index configuration for this field. + message IndexConfig { + // The indexes supported for this field. + repeated Index indexes = 1; + + // Output only. When true, the `Field`'s index configuration is set from the + // configuration specified by the `ancestor_field`. + // When false, the `Field`'s index configuration is defined explicitly. + bool uses_ancestor_config = 2; + + // Output only. Specifies the resource name of the `Field` from which this field's + // index configuration is set (when `uses_ancestor_config` is true), + // or from which it *would* be set if this field had no index configuration + // (when `uses_ancestor_config` is false). + string ancestor_field = 3; + + // Output only + // When true, the `Field`'s index configuration is in the process of being + // reverted. Once complete, the index config will transition to the same + // state as the field specified by `ancestor_field`, at which point + // `uses_ancestor_config` will be `true` and `reverting` will be `false`. + bool reverting = 4; + } + + // The TTL (time-to-live) configuration for documents that have this `Field` + // set. + // Storing a timestamp value into a TTL-enabled field will be treated as + // the document's absolute expiration time. Using any other data type or + // leaving the field absent will disable the TTL for the individual document. + message TtlConfig { + // The state of applying the TTL configuration to all documents. + enum State { + // The state is unspecified or unknown. + STATE_UNSPECIFIED = 0; + + // The TTL is being applied. There is an active long-running operation to + // track the change. Newly written documents will have TTLs applied as + // requested. Requested TTLs on existing documents are still being + // processed. When TTLs on all existing documents have been processed, the + // state will move to 'ACTIVE'. + CREATING = 1; + + // The TTL is active for all documents. + ACTIVE = 2; + + // The TTL configuration could not be enabled for all existing documents. + // Newly written documents will continue to have their TTL applied. + // The LRO returned when last attempting to enable TTL for this `Field` + // has failed, and may have more details. + NEEDS_REPAIR = 3; + } + + // Output only. The state of the TTL configuration. + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Required. A field name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` + // + // A field path may be a simple field name, e.g. `address` or a path to fields + // within map_value , e.g. `address.city`, + // or a special field path. The only valid special field is `*`, which + // represents any field. + // + // Field paths may be quoted using ` (backtick). The only character that needs + // to be escaped within a quoted field path is the backtick character itself, + // escaped using a backslash. Special characters in field paths that + // must be quoted include: `*`, `.`, + // ``` (backtick), `[`, `]`, as well as any ascii symbolic characters. + // + // Examples: + // (Note: Comments here are written in markdown syntax, so there is an + // additional layer of backticks to represent a code block) + // `\`address.city\`` represents a field named `address.city`, not the map key + // `city` in the field `address`. + // `\`*\`` represents a field named `*`, not any field. + // + // A special `Field` contains the default indexing settings for all fields. + // This field's resource name is: + // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*` + // Indexes defined on this `Field` will be applied to all fields which do not + // have their own `Field` index configuration. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // The index configuration for this field. If unset, field indexing will + // revert to the configuration defined by the `ancestor_field`. To + // explicitly remove all indexes for this field, specify an index config + // with an empty list of indexes. + IndexConfig index_config = 2; + + // The TTL configuration for this `Field`. + // Setting or unsetting this will enable or disable the TTL for + // documents that have this `Field`. + TtlConfig ttl_config = 3; +} diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_admin.proto b/src/proto/proto/google/firestore/admin/v1/firestore_admin.proto new file mode 100644 index 00000000..c493673a --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/firestore_admin.proto @@ -0,0 +1,457 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.admin.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/firestore/admin/v1/database.proto"; +import "google/firestore/admin/v1/field.proto"; +import "google/firestore/admin/v1/index.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; +option java_multiple_files = true; +option java_outer_classname = "FirestoreAdminProto"; +option java_package = "com.google.firestore.admin.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; +option ruby_package = "Google::Cloud::Firestore::Admin::V1"; +option (google.api.resource_definition) = { + type: "firestore.googleapis.com/Location" + pattern: "projects/{project}/locations/{location}" +}; +option (google.api.resource_definition) = { + type: "firestore.googleapis.com/CollectionGroup" + pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}" +}; + +// The Cloud Firestore Admin API. +// +// This API provides several administrative services for Cloud Firestore. +// +// Project, Database, Namespace, Collection, Collection Group, and Document are +// used as defined in the Google Cloud Firestore API. +// +// Operation: An Operation represents work being performed in the background. +// +// The index service manages Cloud Firestore indexes. +// +// Index creation is performed asynchronously. +// An Operation resource is created for each such asynchronous operation. +// The state of the operation (including any errors encountered) +// may be queried via the Operation resource. +// +// The Operations collection provides a record of actions performed for the +// specified Project (including any Operations in progress). Operations are not +// created directly but through calls on other collections or resources. +// +// An Operation that is done may be deleted so that it is no longer listed as +// part of the Operation collection. Operations are garbage collected after +// 30 days. By default, ListOperations will only return in progress and failed +// operations. To list completed operation, issue a ListOperations request with +// the filter `done: true`. +// +// Operations are created by service `FirestoreAdmin`, but are accessed via +// service `google.longrunning.Operations`. +service FirestoreAdmin { + option (google.api.default_host) = "firestore.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/datastore"; + + // Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation] + // which may be used to track the status of the creation. The metadata for + // the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata]. + rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes" + body: "index" + }; + option (google.api.method_signature) = "parent,index"; + option (google.longrunning.operation_info) = { + response_type: "Index" + metadata_type: "IndexOperationMetadata" + }; + } + + // Lists composite indexes. + rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes" + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a composite index. + rpc GetIndex(GetIndexRequest) returns (Index) { + option (google.api.http) = { + get: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Deletes a composite index. + rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Gets the metadata and configuration for a Field. + rpc GetField(GetFieldRequest) returns (Field) { + option (google.api.http) = { + get: "/v1/{name=projects/*/databases/*/collectionGroups/*/fields/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a field configuration. Currently, field updates apply only to + // single field index configuration. However, calls to + // [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField] should provide a field mask to avoid + // changing any configuration that the caller isn't aware of. The field mask + // should be specified as: `{ paths: "index_config" }`. + // + // This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to + // track the status of the field update. The metadata for + // the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata]. + // + // To configure the default field settings for the database, use + // the special `Field` with resource name: + // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. + rpc UpdateField(UpdateFieldRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}" + body: "field" + }; + option (google.api.method_signature) = "field"; + option (google.longrunning.operation_info) = { + response_type: "Field" + metadata_type: "FieldOperationMetadata" + }; + } + + // Lists the field configuration and metadata for this database. + // + // Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields + // that have been explicitly overridden. To issue this query, call + // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with the filter set to + // `indexConfig.usesAncestorConfig:false` . + rpc ListFields(ListFieldsRequest) returns (ListFieldsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/fields" + }; + option (google.api.method_signature) = "parent"; + } + + // Exports a copy of all or a subset of documents from Google Cloud Firestore + // to another storage system, such as Google Cloud Storage. Recent updates to + // documents may not be reflected in the export. The export occurs in the + // background and its progress can be monitored and managed via the + // Operation resource that is created. The output of an export may only be + // used once the associated operation is done. If an export operation is + // cancelled before completion it may leave partial data behind in Google + // Cloud Storage. + // + // For more details on export behavior and output format, refer to: + // https://cloud.google.com/firestore/docs/manage-data/export-import + rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{name=projects/*/databases/*}:exportDocuments" + body: "*" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "ExportDocumentsResponse" + metadata_type: "ExportDocumentsMetadata" + }; + } + + // Imports documents into Google Cloud Firestore. Existing documents with the + // same name are overwritten. The import occurs in the background and its + // progress can be monitored and managed via the Operation resource that is + // created. If an ImportDocuments operation is cancelled, it is possible + // that a subset of the data has already been imported to Cloud Firestore. + rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{name=projects/*/databases/*}:importDocuments" + body: "*" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "ImportDocumentsMetadata" + }; + } + + // Gets information about a database. + rpc GetDatabase(GetDatabaseRequest) returns (Database) { + option (google.api.http) = { + get: "/v1/{name=projects/*/databases/*}" + }; + option (google.api.method_signature) = "name"; + } + + // List all the databases in the project. + rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*}/databases" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates a database. + rpc UpdateDatabase(UpdateDatabaseRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1/{database.name=projects/*/databases/*}" + body: "database" + }; + option (google.api.method_signature) = "database,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "Database" + metadata_type: "UpdateDatabaseMetadata" + }; + } +} + +// A request to list the Firestore Databases in all locations for a project. +message ListDatabasesRequest { + // Required. A parent name of the form + // `projects/{project_id}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "firestore.googleapis.com/Database" + } + ]; +} + +// The list of databases for a project. +message ListDatabasesResponse { + // The databases in the project. + repeated Database databases = 1; +} + +// The request for [FirestoreAdmin.GetDatabase][google.firestore.admin.v1.FirestoreAdmin.GetDatabase]. +message GetDatabaseRequest { + // Required. A name of the form + // `projects/{project_id}/databases/{database_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/Database" + } + ]; +} + +// The request for [FirestoreAdmin.UpdateDatabase][google.firestore.admin.v1.FirestoreAdmin.UpdateDatabase]. +message UpdateDatabaseRequest { + // Required. The database to update. + Database database = 1 [(google.api.field_behavior) = REQUIRED]; + + // The list of fields to be updated. + google.protobuf.FieldMask update_mask = 2; +} + +// Metadata related to the update database operation. +message UpdateDatabaseMetadata { + +} + +// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. +message CreateIndexRequest { + // Required. A parent name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/CollectionGroup" + } + ]; + + // Required. The composite index to create. + Index index = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. +message ListIndexesRequest { + // Required. A parent name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/CollectionGroup" + } + ]; + + // The filter to apply to list results. + string filter = 2; + + // The number of results to return. + int32 page_size = 3; + + // A page token, returned from a previous call to + // [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes], that may be used to get the next + // page of results. + string page_token = 4; +} + +// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. +message ListIndexesResponse { + // The requested indexes. + repeated Index indexes = 1; + + // A page token that may be used to request another page of results. If blank, + // this is the last page. + string next_page_token = 2; +} + +// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1.FirestoreAdmin.GetIndex]. +message GetIndexRequest { + // Required. A name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/Index" + } + ]; +} + +// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1.FirestoreAdmin.DeleteIndex]. +message DeleteIndexRequest { + // Required. A name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/Index" + } + ]; +} + +// The request for [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. +message UpdateFieldRequest { + // Required. The field to be updated. + Field field = 1 [(google.api.field_behavior) = REQUIRED]; + + // A mask, relative to the field. If specified, only configuration specified + // by this field_mask will be updated in the field. + google.protobuf.FieldMask update_mask = 2; +} + +// The request for [FirestoreAdmin.GetField][google.firestore.admin.v1.FirestoreAdmin.GetField]. +message GetFieldRequest { + // Required. A name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/Field" + } + ]; +} + +// The request for [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. +message ListFieldsRequest { + // Required. A parent name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/CollectionGroup" + } + ]; + + // The filter to apply to list results. Currently, + // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields + // that have been explicitly overridden. To issue this query, call + // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with a filter that includes + // `indexConfig.usesAncestorConfig:false` . + string filter = 2; + + // The number of results to return. + int32 page_size = 3; + + // A page token, returned from a previous call to + // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields], that may be used to get the next + // page of results. + string page_token = 4; +} + +// The response for [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. +message ListFieldsResponse { + // The requested fields. + repeated Field fields = 1; + + // A page token that may be used to request another page of results. If blank, + // this is the last page. + string next_page_token = 2; +} + +// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. +message ExportDocumentsRequest { + // Required. Database to export. Should be of the form: + // `projects/{project_id}/databases/{database_id}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/Database" + } + ]; + + // Which collection ids to export. Unspecified means all collections. + repeated string collection_ids = 2; + + // The output URI. Currently only supports Google Cloud Storage URIs of the + // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name + // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional + // Google Cloud Storage namespace path. When + // choosing a name, be sure to consider Google Cloud Storage naming + // guidelines: https://cloud.google.com/storage/docs/naming. + // If the URI is a bucket (without a namespace path), a prefix will be + // generated based on the start time. + string output_uri_prefix = 3; +} + +// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. +message ImportDocumentsRequest { + // Required. Database to import into. Should be of the form: + // `projects/{project_id}/databases/{database_id}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "firestore.googleapis.com/Database" + } + ]; + + // Which collection ids to import. Unspecified means all collections included + // in the import. + repeated string collection_ids = 2; + + // Location of the exported files. + // This must match the output_uri_prefix of an ExportDocumentsResponse from + // an export that has completed successfully. + // See: + // [google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix]. + string input_uri_prefix = 3; +} diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json b/src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json new file mode 100755 index 00000000..1460a8b7 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json @@ -0,0 +1,61 @@ +{ + "methodConfig": [ + { + "name": [ + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "ListIndexes" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "GetIndex" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "DeleteIndex" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "GetField" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "ListFields" + } + ], + "timeout": "60s", + "retryPolicy": { + "maxAttempts": 5, + "initialBackoff": "0.100s", + "maxBackoff": "60s", + "backoffMultiplier": 1.3, + "retryableStatusCodes": [ + "UNAVAILABLE", + "INTERNAL", + "DEADLINE_EXCEEDED" + ] + } + }, + { + "name": [ + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "CreateIndex" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "ImportDocuments" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "ExportDocuments" + }, + { + "service": "google.firestore.admin.v1.FirestoreAdmin", + "method": "UpdateField" + } + ], + "timeout": "60s" + } + ] +} diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml b/src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml new file mode 100644 index 00000000..f13f3c2b --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml @@ -0,0 +1,5 @@ +type: com.google.api.codegen.ConfigProto +config_schema_version: 2.0.0 +language_settings: + java: + package_name: com.google.cloud.firestore.v1 diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml b/src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml new file mode 100644 index 00000000..3b45b2f6 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml @@ -0,0 +1,75 @@ +type: google.api.Service +config_version: 3 +name: firestore.googleapis.com +title: Cloud Firestore API + +apis: +- name: google.cloud.location.Locations +- name: google.firestore.admin.v1.FirestoreAdmin +- name: google.longrunning.Operations + +types: +- name: google.firestore.admin.v1.ExportDocumentsMetadata +- name: google.firestore.admin.v1.ExportDocumentsResponse +- name: google.firestore.admin.v1.FieldOperationMetadata +- name: google.firestore.admin.v1.ImportDocumentsMetadata +- name: google.firestore.admin.v1.IndexOperationMetadata +- name: google.firestore.admin.v1.LocationMetadata +- name: google.firestore.admin.v1.UpdateDatabaseMetadata + +documentation: + summary: |- + Accesses the NoSQL document database built for automatic scaling, high + performance, and ease of application development. + rules: + - selector: google.cloud.location.Locations.GetLocation + description: Gets information about a location. + + - selector: google.cloud.location.Locations.ListLocations + description: Lists information about the supported locations for this service. + +backend: + rules: + - selector: google.cloud.location.Locations.GetLocation + deadline: 295.0 + - selector: google.cloud.location.Locations.ListLocations + deadline: 295.0 + - selector: 'google.firestore.admin.v1.FirestoreAdmin.*' + deadline: 295.0 + - selector: 'google.longrunning.Operations.*' + deadline: 295.0 + +http: + rules: + - selector: google.longrunning.Operations.CancelOperation + post: '/v1/{name=projects/*/databases/*/operations/*}:cancel' + body: '*' + - selector: google.longrunning.Operations.DeleteOperation + delete: '/v1/{name=projects/*/databases/*/operations/*}' + - selector: google.longrunning.Operations.GetOperation + get: '/v1/{name=projects/*/databases/*/operations/*}' + - selector: google.longrunning.Operations.ListOperations + get: '/v1/{name=projects/*/databases/*}/operations' + +authentication: + rules: + - selector: google.cloud.location.Locations.GetLocation + oauth: + canonical_scopes: |- + https://www.googleapis.com/auth/cloud-platform, + https://www.googleapis.com/auth/datastore + - selector: google.cloud.location.Locations.ListLocations + oauth: + canonical_scopes: |- + https://www.googleapis.com/auth/cloud-platform, + https://www.googleapis.com/auth/datastore + - selector: 'google.firestore.admin.v1.FirestoreAdmin.*' + oauth: + canonical_scopes: |- + https://www.googleapis.com/auth/cloud-platform, + https://www.googleapis.com/auth/datastore + - selector: 'google.longrunning.Operations.*' + oauth: + canonical_scopes: |- + https://www.googleapis.com/auth/cloud-platform, + https://www.googleapis.com/auth/datastore diff --git a/src/proto/proto/google/firestore/admin/v1/index.proto b/src/proto/proto/google/firestore/admin/v1/index.proto new file mode 100644 index 00000000..066d4109 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/index.proto @@ -0,0 +1,156 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.admin.v1; + +import "google/api/resource.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; +option java_multiple_files = true; +option java_outer_classname = "IndexProto"; +option java_package = "com.google.firestore.admin.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; +option ruby_package = "Google::Cloud::Firestore::Admin::V1"; + +// Cloud Firestore indexes enable simple and complex queries against +// documents in a database. +message Index { + option (google.api.resource) = { + type: "firestore.googleapis.com/Index" + pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}" + }; + + // Query Scope defines the scope at which a query is run. This is specified on + // a StructuredQuery's `from` field. + enum QueryScope { + // The query scope is unspecified. Not a valid option. + QUERY_SCOPE_UNSPECIFIED = 0; + + // Indexes with a collection query scope specified allow queries + // against a collection that is the child of a specific document, specified + // at query time, and that has the collection id specified by the index. + COLLECTION = 1; + + // Indexes with a collection group query scope specified allow queries + // against all collections that has the collection id specified by the + // index. + COLLECTION_GROUP = 2; + } + + // A field in an index. + // The field_path describes which field is indexed, the value_mode describes + // how the field value is indexed. + message IndexField { + // The supported orderings. + enum Order { + // The ordering is unspecified. Not a valid option. + ORDER_UNSPECIFIED = 0; + + // The field is ordered by ascending field value. + ASCENDING = 1; + + // The field is ordered by descending field value. + DESCENDING = 2; + } + + // The supported array value configurations. + enum ArrayConfig { + // The index does not support additional array queries. + ARRAY_CONFIG_UNSPECIFIED = 0; + + // The index supports array containment queries. + CONTAINS = 1; + } + + // Can be __name__. + // For single field indexes, this must match the name of the field or may + // be omitted. + string field_path = 1; + + // How the field value is indexed. + oneof value_mode { + // Indicates that this field supports ordering by the specified order or + // comparing using =, !=, <, <=, >, >=. + Order order = 2; + + // Indicates that this field supports operations on `array_value`s. + ArrayConfig array_config = 3; + } + } + + // The state of an index. During index creation, an index will be in the + // `CREATING` state. If the index is created successfully, it will transition + // to the `READY` state. If the index creation encounters a problem, the index + // will transition to the `NEEDS_REPAIR` state. + enum State { + // The state is unspecified. + STATE_UNSPECIFIED = 0; + + // The index is being created. + // There is an active long-running operation for the index. + // The index is updated when writing a document. + // Some index data may exist. + CREATING = 1; + + // The index is ready to be used. + // The index is updated when writing a document. + // The index is fully populated from all stored documents it applies to. + READY = 2; + + // The index was being created, but something went wrong. + // There is no active long-running operation for the index, + // and the most recently finished long-running operation failed. + // The index is not updated when writing a document. + // Some index data may exist. + // Use the google.longrunning.Operations API to determine why the operation + // that last attempted to create this index failed, then re-create the + // index. + NEEDS_REPAIR = 3; + } + + // Output only. A server defined name for this index. + // The form of this name for composite indexes will be: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}` + // For single field indexes, this field will be empty. + string name = 1; + + // Indexes with a collection query scope specified allow queries + // against a collection that is the child of a specific document, specified at + // query time, and that has the same collection id. + // + // Indexes with a collection group query scope specified allow queries against + // all collections descended from a specific document, specified at query + // time, and that have the same collection id as this index. + QueryScope query_scope = 2; + + // The fields supported by this index. + // + // For composite indexes, this is always 2 or more fields. + // The last field entry is always for the field path `__name__`. If, on + // creation, `__name__` was not specified as the last field, it will be added + // automatically with the same direction as that of the last field defined. If + // the final field in a composite index is not directional, the `__name__` + // will be ordered ASCENDING (unless explicitly specified). + // + // For single field indexes, this will always be exactly one entry with a + // field path equal to the field path of the associated field. + repeated IndexField fields = 3; + + // Output only. The serving state of the index. + State state = 4; +} diff --git a/src/proto/proto/google/firestore/admin/v1/location.proto b/src/proto/proto/google/firestore/admin/v1/location.proto new file mode 100644 index 00000000..8f7519c4 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/location.proto @@ -0,0 +1,31 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.admin.v1; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; +option java_multiple_files = true; +option java_outer_classname = "LocationProto"; +option java_package = "com.google.firestore.admin.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; +option ruby_package = "Google::Cloud::Firestore::Admin::V1"; + +// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. +message LocationMetadata { + +} diff --git a/src/proto/proto/google/firestore/admin/v1/operation.proto b/src/proto/proto/google/firestore/admin/v1/operation.proto new file mode 100644 index 00000000..654a6ad6 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1/operation.proto @@ -0,0 +1,223 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.firestore.admin.v1; + +import "google/firestore/admin/v1/index.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; +option java_multiple_files = true; +option java_outer_classname = "OperationProto"; +option java_package = "com.google.firestore.admin.v1"; +option objc_class_prefix = "GCFS"; +option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; +option ruby_package = "Google::Cloud::Firestore::Admin::V1"; + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. +message IndexOperationMetadata { + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The index resource that this operation is acting on. For example: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` + string index = 3; + + // The state of the operation. + OperationState state = 4; + + // The progress, in documents, of this operation. + Progress progress_documents = 5; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 6; +} + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. +message FieldOperationMetadata { + // Information about an index configuration change. + message IndexConfigDelta { + // Specifies how the index is changing. + enum ChangeType { + // The type of change is not specified or known. + CHANGE_TYPE_UNSPECIFIED = 0; + + // The single field index is being added. + ADD = 1; + + // The single field index is being removed. + REMOVE = 2; + } + + // Specifies how the index is changing. + ChangeType change_type = 1; + + // The index being changed. + Index index = 2; + } + + // Information about an TTL configuration change. + message TtlConfigDelta { + // Specifies how the TTL config is changing. + enum ChangeType { + // The type of change is not specified or known. + CHANGE_TYPE_UNSPECIFIED = 0; + + // The TTL config is being added. + ADD = 1; + + // The TTL config is being removed. + REMOVE = 2; + } + + // Specifies how the TTL configuration is changing. + ChangeType change_type = 1; + } + + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The field resource that this operation is acting on. For example: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` + string field = 3; + + // A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this + // operation. + repeated IndexConfigDelta index_config_deltas = 4; + + // The state of the operation. + OperationState state = 5; + + // The progress, in documents, of this operation. + Progress progress_documents = 6; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 7; + + // Describes the deltas of TTL configuration. + TtlConfigDelta ttl_config_delta = 8; +} + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. +message ExportDocumentsMetadata { + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The state of the export operation. + OperationState operation_state = 3; + + // The progress, in documents, of this operation. + Progress progress_documents = 4; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 5; + + // Which collection ids are being exported. + repeated string collection_ids = 6; + + // Where the entities are being exported to. + string output_uri_prefix = 7; +} + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. +message ImportDocumentsMetadata { + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The state of the import operation. + OperationState operation_state = 3; + + // The progress, in documents, of this operation. + Progress progress_documents = 4; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 5; + + // Which collection ids are being imported. + repeated string collection_ids = 6; + + // The location of the documents being imported. + string input_uri_prefix = 7; +} + +// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. +message ExportDocumentsResponse { + // Location of the output files. This can be used to begin an import + // into Cloud Firestore (this project or another project) after the operation + // completes successfully. + string output_uri_prefix = 1; +} + +// Describes the state of the operation. +enum OperationState { + // Unspecified. + OPERATION_STATE_UNSPECIFIED = 0; + + // Request is being prepared for processing. + INITIALIZING = 1; + + // Request is actively being processed. + PROCESSING = 2; + + // Request is in the process of being cancelled after user called + // google.longrunning.Operations.CancelOperation on the operation. + CANCELLING = 3; + + // Request has been processed and is in its finalization stage. + FINALIZING = 4; + + // Request has completed successfully. + SUCCESSFUL = 5; + + // Request has finished being processed, but encountered an error. + FAILED = 6; + + // Request has finished being cancelled after user called + // google.longrunning.Operations.CancelOperation. + CANCELLED = 7; +} + +// Describes the progress of the operation. +// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1.Progress] +// is used. +message Progress { + // The amount of work estimated. + int64 estimated_work = 1; + + // The amount of work completed. + int64 completed_work = 2; +} diff --git a/src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto b/src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto new file mode 100644 index 00000000..5d7908b9 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto @@ -0,0 +1,370 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta1; + +import "google/api/annotations.proto"; +import "google/firestore/admin/v1beta1/index.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/client.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; +option java_multiple_files = true; +option java_outer_classname = "FirestoreAdminProto"; +option java_package = "com.google.firestore.admin.v1beta1"; +option objc_class_prefix = "GCFS"; + +// The Cloud Firestore Admin API. +// +// This API provides several administrative services for Cloud Firestore. +// +// # Concepts +// +// Project, Database, Namespace, Collection, and Document are used as defined in +// the Google Cloud Firestore API. +// +// Operation: An Operation represents work being performed in the background. +// +// +// # Services +// +// ## Index +// +// The index service manages Cloud Firestore indexes. +// +// Index creation is performed asynchronously. +// An Operation resource is created for each such asynchronous operation. +// The state of the operation (including any errors encountered) +// may be queried via the Operation resource. +// +// ## Metadata +// +// Provides metadata and statistical information about data in Cloud Firestore. +// The data provided as part of this API may be stale. +// +// ## Operation +// +// The Operations collection provides a record of actions performed for the +// specified Project (including any Operations in progress). Operations are not +// created directly but through calls on other collections or resources. +// +// An Operation that is not yet done may be cancelled. The request to cancel is +// asynchronous and the Operation may continue to run for some time after the +// request to cancel is made. +// +// An Operation that is done may be deleted so that it is no longer listed as +// part of the Operation collection. +// +// Operations are created by service `FirestoreAdmin`, but are accessed via +// service `google.longrunning.Operations`. +service FirestoreAdmin { + option (google.api.default_host) = "firestore.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/datastore"; + + // Creates the specified index. + // A newly created index's initial state is `CREATING`. On completion of the + // returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. + // If the index already exists, the call will return an `ALREADY_EXISTS` + // status. + // + // During creation, the process could result in an error, in which case the + // index will move to the `ERROR` state. The process can be recovered by + // fixing the data that caused the error, removing the index with + // [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with + // [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. + // + // Indexes with a single field cannot be created. + rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/databases/*}/indexes" + body: "index" + }; + } + + // Lists the indexes that match the specified filters. + rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/databases/*}/indexes" + }; + } + + // Gets an index. + rpc GetIndex(GetIndexRequest) returns (Index) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/databases/*/indexes/*}" + }; + } + + // Deletes an index. + rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/databases/*/indexes/*}" + }; + } + + // Exports a copy of all or a subset of documents from Google Cloud Firestore + // to another storage system, such as Google Cloud Storage. Recent updates to + // documents may not be reflected in the export. The export occurs in the + // background and its progress can be monitored and managed via the + // Operation resource that is created. The output of an export may only be + // used once the associated operation is done. If an export operation is + // cancelled before completion it may leave partial data behind in Google + // Cloud Storage. + rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/databases/*}:exportDocuments" + body: "*" + }; + } + + // Imports documents into Google Cloud Firestore. Existing documents with the + // same name are overwritten. The import occurs in the background and its + // progress can be monitored and managed via the Operation resource that is + // created. If an ImportDocuments operation is cancelled, it is possible + // that a subset of the data has already been imported to Cloud Firestore. + rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/databases/*}:importDocuments" + body: "*" + }; + } +} + +// Metadata for index operations. This metadata populates +// the metadata field of [google.longrunning.Operation][google.longrunning.Operation]. +message IndexOperationMetadata { + // The type of index operation. + enum OperationType { + // Unspecified. Never set by server. + OPERATION_TYPE_UNSPECIFIED = 0; + + // The operation is creating the index. Initiated by a `CreateIndex` call. + CREATING_INDEX = 1; + } + + // The time that work began on the operation. + google.protobuf.Timestamp start_time = 1; + + // The time the operation ended, either successfully or otherwise. Unset if + // the operation is still active. + google.protobuf.Timestamp end_time = 2; + + // The index resource that this operation is acting on. For example: + // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` + string index = 3; + + // The type of index operation. + OperationType operation_type = 4; + + // True if the [google.longrunning.Operation] was cancelled. If the + // cancellation is in progress, cancelled will be true but + // [google.longrunning.Operation.done][google.longrunning.Operation.done] will be false. + bool cancelled = 5; + + // Progress of the existing operation, measured in number of documents. + Progress document_progress = 6; +} + +// Measures the progress of a particular metric. +message Progress { + // An estimate of how much work has been completed. Note that this may be + // greater than `work_estimated`. + int64 work_completed = 1; + + // An estimate of how much work needs to be performed. Zero if the + // work estimate is unavailable. May change as work progresses. + int64 work_estimated = 2; +} + +// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. +message CreateIndexRequest { + // The name of the database this index will apply to. For example: + // `projects/{project_id}/databases/{database_id}` + string parent = 1; + + // The index to create. The name and state fields are output only and will be + // ignored. Certain single field indexes cannot be created or deleted. + Index index = 2; +} + +// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta1.FirestoreAdmin.GetIndex]. +message GetIndexRequest { + // The name of the index. For example: + // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` + string name = 1; +} + +// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. +message ListIndexesRequest { + // The database name. For example: + // `projects/{project_id}/databases/{database_id}` + string parent = 1; + + string filter = 2; + + // The standard List page size. + int32 page_size = 3; + + // The standard List page token. + string page_token = 4; +} + +// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex]. +message DeleteIndexRequest { + // The index name. For example: + // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` + string name = 1; +} + +// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. +message ListIndexesResponse { + // The indexes. + repeated Index indexes = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ExportDocuments]. +message ExportDocumentsRequest { + // Database to export. Should be of the form: + // `projects/{project_id}/databases/{database_id}`. + string name = 1; + + // Which collection ids to export. Unspecified means all collections. + repeated string collection_ids = 3; + + // The output URI. Currently only supports Google Cloud Storage URIs of the + // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name + // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional + // Google Cloud Storage namespace path. When + // choosing a name, be sure to consider Google Cloud Storage naming + // guidelines: https://cloud.google.com/storage/docs/naming. + // If the URI is a bucket (without a namespace path), a prefix will be + // generated based on the start time. + string output_uri_prefix = 4; +} + +// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ImportDocuments]. +message ImportDocumentsRequest { + // Database to import into. Should be of the form: + // `projects/{project_id}/databases/{database_id}`. + string name = 1; + + // Which collection ids to import. Unspecified means all collections included + // in the import. + repeated string collection_ids = 3; + + // Location of the exported files. + // This must match the output_uri_prefix of an ExportDocumentsResponse from + // an export that has completed successfully. + // See: + // [google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix]. + string input_uri_prefix = 4; +} + +// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. +message ExportDocumentsResponse { + // Location of the output files. This can be used to begin an import + // into Cloud Firestore (this project or another project) after the operation + // completes successfully. + string output_uri_prefix = 1; +} + +// Metadata for ExportDocuments operations. +message ExportDocumentsMetadata { + // The time that work began on the operation. + google.protobuf.Timestamp start_time = 1; + + // The time the operation ended, either successfully or otherwise. Unset if + // the operation is still active. + google.protobuf.Timestamp end_time = 2; + + // The state of the export operation. + OperationState operation_state = 3; + + // An estimate of the number of documents processed. + Progress progress_documents = 4; + + // An estimate of the number of bytes processed. + Progress progress_bytes = 5; + + // Which collection ids are being exported. + repeated string collection_ids = 6; + + // Where the entities are being exported to. + string output_uri_prefix = 7; +} + +// Metadata for ImportDocuments operations. +message ImportDocumentsMetadata { + // The time that work began on the operation. + google.protobuf.Timestamp start_time = 1; + + // The time the operation ended, either successfully or otherwise. Unset if + // the operation is still active. + google.protobuf.Timestamp end_time = 2; + + // The state of the import operation. + OperationState operation_state = 3; + + // An estimate of the number of documents processed. + Progress progress_documents = 4; + + // An estimate of the number of bytes processed. + Progress progress_bytes = 5; + + // Which collection ids are being imported. + repeated string collection_ids = 6; + + // The location of the documents being imported. + string input_uri_prefix = 7; +} + +// The various possible states for an ongoing Operation. +enum OperationState { + // Unspecified. + STATE_UNSPECIFIED = 0; + + // Request is being prepared for processing. + INITIALIZING = 1; + + // Request is actively being processed. + PROCESSING = 2; + + // Request is in the process of being cancelled after user called + // google.longrunning.Operations.CancelOperation on the operation. + CANCELLING = 3; + + // Request has been processed and is in its finalization stage. + FINALIZING = 4; + + // Request has completed successfully. + SUCCESSFUL = 5; + + // Request has finished being processed, but encountered an error. + FAILED = 6; + + // Request has finished being cancelled after user called + // google.longrunning.Operations.CancelOperation. + CANCELLED = 7; +} diff --git a/src/proto/proto/google/firestore/admin/v1beta1/index.proto b/src/proto/proto/google/firestore/admin/v1beta1/index.proto new file mode 100644 index 00000000..0ca95985 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta1/index.proto @@ -0,0 +1,101 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta1; + +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; +option java_multiple_files = true; +option java_outer_classname = "IndexProto"; +option java_package = "com.google.firestore.admin.v1beta1"; +option objc_class_prefix = "GCFS"; + +// A field of an index. +message IndexField { + // The mode determines how a field is indexed. + enum Mode { + // The mode is unspecified. + MODE_UNSPECIFIED = 0; + + // The field's values are indexed so as to support sequencing in + // ascending order and also query by <, >, <=, >=, and =. + ASCENDING = 2; + + // The field's values are indexed so as to support sequencing in + // descending order and also query by <, >, <=, >=, and =. + DESCENDING = 3; + + // The field's array values are indexed so as to support membership using + // ARRAY_CONTAINS queries. + ARRAY_CONTAINS = 4; + } + + // The path of the field. Must match the field path specification described + // by [google.firestore.v1beta1.Document.fields][fields]. + // Special field path `__name__` may be used by itself or at the end of a + // path. `__type__` may be used only at the end of path. + string field_path = 1; + + // The field's mode. + Mode mode = 2; +} + +// An index definition. +message Index { + // The state of an index. During index creation, an index will be in the + // `CREATING` state. If the index is created successfully, it will transition + // to the `READY` state. If the index is not able to be created, it will + // transition to the `ERROR` state. + enum State { + // The state is unspecified. + STATE_UNSPECIFIED = 0; + + // The index is being created. + // There is an active long-running operation for the index. + // The index is updated when writing a document. + // Some index data may exist. + CREATING = 3; + + // The index is ready to be used. + // The index is updated when writing a document. + // The index is fully populated from all stored documents it applies to. + READY = 2; + + // The index was being created, but something went wrong. + // There is no active long-running operation for the index, + // and the most recently finished long-running operation failed. + // The index is not updated when writing a document. + // Some index data may exist. + ERROR = 5; + } + + // The resource name of the index. + // Output only. + string name = 1; + + // The collection ID to which this index applies. Required. + string collection_id = 2; + + // The fields to index. + repeated IndexField fields = 3; + + // The state of the index. + // Output only. + State state = 6; +} diff --git a/src/proto/proto/google/firestore/admin/v1beta1/location.proto b/src/proto/proto/google/firestore/admin/v1beta1/location.proto new file mode 100644 index 00000000..2201b07d --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta1/location.proto @@ -0,0 +1,33 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta1; + +import "google/type/latlng.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; +option java_multiple_files = true; +option java_outer_classname = "LocationProto"; +option java_package = "com.google.firestore.admin.v1beta1"; +option objc_class_prefix = "GCFS"; + +// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. +message LocationMetadata { + +} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/field.proto b/src/proto/proto/google/firestore/admin/v1beta2/field.proto new file mode 100644 index 00000000..cec35519 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta2/field.proto @@ -0,0 +1,92 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta2; + +import "google/firestore/admin/v1beta2/index.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; +option java_multiple_files = true; +option java_outer_classname = "FieldProto"; +option java_package = "com.google.firestore.admin.v1beta2"; +option objc_class_prefix = "GCFS"; + +// Represents a single field in the database. +// +// Fields are grouped by their "Collection Group", which represent all +// collections in the database with the same id. +message Field { + // The index configuration for this field. + message IndexConfig { + // The indexes supported for this field. + repeated Index indexes = 1; + + // Output only. When true, the `Field`'s index configuration is set from the + // configuration specified by the `ancestor_field`. + // When false, the `Field`'s index configuration is defined explicitly. + bool uses_ancestor_config = 2; + + // Output only. Specifies the resource name of the `Field` from which this field's + // index configuration is set (when `uses_ancestor_config` is true), + // or from which it *would* be set if this field had no index configuration + // (when `uses_ancestor_config` is false). + string ancestor_field = 3; + + // Output only + // When true, the `Field`'s index configuration is in the process of being + // reverted. Once complete, the index config will transition to the same + // state as the field specified by `ancestor_field`, at which point + // `uses_ancestor_config` will be `true` and `reverting` will be `false`. + bool reverting = 4; + } + + // A field name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` + // + // A field path may be a simple field name, e.g. `address` or a path to fields + // within map_value , e.g. `address.city`, + // or a special field path. The only valid special field is `*`, which + // represents any field. + // + // Field paths may be quoted using ` (backtick). The only character that needs + // to be escaped within a quoted field path is the backtick character itself, + // escaped using a backslash. Special characters in field paths that + // must be quoted include: `*`, `.`, + // ``` (backtick), `[`, `]`, as well as any ascii symbolic characters. + // + // Examples: + // (Note: Comments here are written in markdown syntax, so there is an + // additional layer of backticks to represent a code block) + // `\`address.city\`` represents a field named `address.city`, not the map key + // `city` in the field `address`. + // `\`*\`` represents a field named `*`, not any field. + // + // A special `Field` contains the default indexing settings for all fields. + // This field's resource name is: + // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*` + // Indexes defined on this `Field` will be applied to all fields which do not + // have their own `Field` index configuration. + string name = 1; + + // The index configuration for this field. If unset, field indexing will + // revert to the configuration defined by the `ancestor_field`. To + // explicitly remove all indexes for this field, specify an index config + // with an empty list of indexes. + IndexConfig index_config = 2; +} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto b/src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto new file mode 100644 index 00000000..56ca764b --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto @@ -0,0 +1,278 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta2; + +import "google/api/annotations.proto"; +import "google/firestore/admin/v1beta2/field.proto"; +import "google/firestore/admin/v1beta2/index.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/api/client.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; +option java_multiple_files = true; +option java_outer_classname = "FirestoreAdminProto"; +option java_package = "com.google.firestore.admin.v1beta2"; +option objc_class_prefix = "GCFS"; + +// Operations are created by service `FirestoreAdmin`, but are accessed via +// service `google.longrunning.Operations`. +service FirestoreAdmin { + option (google.api.default_host) = "firestore.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/datastore"; + + // Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation] + // which may be used to track the status of the creation. The metadata for + // the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1beta2.IndexOperationMetadata]. + rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/{parent=projects/*/databases/*/collectionGroups/*}/indexes" + body: "index" + }; + } + + // Lists composite indexes. + rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { + option (google.api.http) = { + get: "/v1beta2/{parent=projects/*/databases/*/collectionGroups/*}/indexes" + }; + } + + // Gets a composite index. + rpc GetIndex(GetIndexRequest) returns (Index) { + option (google.api.http) = { + get: "/v1beta2/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" + }; + } + + // Deletes a composite index. + rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1beta2/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" + }; + } + + // Gets the metadata and configuration for a Field. + rpc GetField(GetFieldRequest) returns (Field) { + option (google.api.http) = { + get: "/v1beta2/{name=projects/*/databases/*/collectionGroups/*/fields/*}" + }; + } + + // Updates a field configuration. Currently, field updates apply only to + // single field index configuration. However, calls to + // [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField] should provide a field mask to avoid + // changing any configuration that the caller isn't aware of. The field mask + // should be specified as: `{ paths: "index_config" }`. + // + // This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to + // track the status of the field update. The metadata for + // the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1beta2.FieldOperationMetadata]. + // + // To configure the default field settings for the database, use + // the special `Field` with resource name: + // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. + rpc UpdateField(UpdateFieldRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1beta2/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}" + body: "field" + }; + } + + // Lists the field configuration and metadata for this database. + // + // Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] only supports listing fields + // that have been explicitly overridden. To issue this query, call + // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] with the filter set to + // `indexConfig.usesAncestorConfig:false`. + rpc ListFields(ListFieldsRequest) returns (ListFieldsResponse) { + option (google.api.http) = { + get: "/v1beta2/{parent=projects/*/databases/*/collectionGroups/*}/fields" + }; + } + + // Exports a copy of all or a subset of documents from Google Cloud Firestore + // to another storage system, such as Google Cloud Storage. Recent updates to + // documents may not be reflected in the export. The export occurs in the + // background and its progress can be monitored and managed via the + // Operation resource that is created. The output of an export may only be + // used once the associated operation is done. If an export operation is + // cancelled before completion it may leave partial data behind in Google + // Cloud Storage. + rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/{name=projects/*/databases/*}:exportDocuments" + body: "*" + }; + } + + // Imports documents into Google Cloud Firestore. Existing documents with the + // same name are overwritten. The import occurs in the background and its + // progress can be monitored and managed via the Operation resource that is + // created. If an ImportDocuments operation is cancelled, it is possible + // that a subset of the data has already been imported to Cloud Firestore. + rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/{name=projects/*/databases/*}:importDocuments" + body: "*" + }; + } +} + +// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta2.FirestoreAdmin.CreateIndex]. +message CreateIndexRequest { + // A parent name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` + string parent = 1; + + // The composite index to create. + Index index = 2; +} + +// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta2.FirestoreAdmin.ListIndexes]. +message ListIndexesRequest { + // A parent name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` + string parent = 1; + + // The filter to apply to list results. + string filter = 2; + + // The number of results to return. + int32 page_size = 3; + + // A page token, returned from a previous call to + // [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta2.FirestoreAdmin.ListIndexes], that may be used to get the next + // page of results. + string page_token = 4; +} + +// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta2.FirestoreAdmin.ListIndexes]. +message ListIndexesResponse { + // The requested indexes. + repeated Index indexes = 1; + + // A page token that may be used to request another page of results. If blank, + // this is the last page. + string next_page_token = 2; +} + +// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta2.FirestoreAdmin.GetIndex]. +message GetIndexRequest { + // A name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` + string name = 1; +} + +// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta2.FirestoreAdmin.DeleteIndex]. +message DeleteIndexRequest { + // A name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` + string name = 1; +} + +// The request for [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField]. +message UpdateFieldRequest { + // The field to be updated. + Field field = 1; + + // A mask, relative to the field. If specified, only configuration specified + // by this field_mask will be updated in the field. + google.protobuf.FieldMask update_mask = 2; +} + +// The request for [FirestoreAdmin.GetField][google.firestore.admin.v1beta2.FirestoreAdmin.GetField]. +message GetFieldRequest { + // A name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}` + string name = 1; +} + +// The request for [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields]. +message ListFieldsRequest { + // A parent name of the form + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` + string parent = 1; + + // The filter to apply to list results. Currently, + // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] only supports listing fields + // that have been explicitly overridden. To issue this query, call + // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] with the filter set to + // `indexConfig.usesAncestorConfig:false`. + string filter = 2; + + // The number of results to return. + int32 page_size = 3; + + // A page token, returned from a previous call to + // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields], that may be used to get the next + // page of results. + string page_token = 4; +} + +// The response for [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields]. +message ListFieldsResponse { + // The requested fields. + repeated Field fields = 1; + + // A page token that may be used to request another page of results. If blank, + // this is the last page. + string next_page_token = 2; +} + +// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ExportDocuments]. +message ExportDocumentsRequest { + // Database to export. Should be of the form: + // `projects/{project_id}/databases/{database_id}`. + string name = 1; + + // Which collection ids to export. Unspecified means all collections. + repeated string collection_ids = 2; + + // The output URI. Currently only supports Google Cloud Storage URIs of the + // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name + // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional + // Google Cloud Storage namespace path. When + // choosing a name, be sure to consider Google Cloud Storage naming + // guidelines: https://cloud.google.com/storage/docs/naming. + // If the URI is a bucket (without a namespace path), a prefix will be + // generated based on the start time. + string output_uri_prefix = 3; +} + +// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ImportDocuments]. +message ImportDocumentsRequest { + // Database to import into. Should be of the form: + // `projects/{project_id}/databases/{database_id}`. + string name = 1; + + // Which collection ids to import. Unspecified means all collections included + // in the import. + repeated string collection_ids = 2; + + // Location of the exported files. + // This must match the output_uri_prefix of an ExportDocumentsResponse from + // an export that has completed successfully. + // See: + // [google.firestore.admin.v1beta2.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1beta2.ExportDocumentsResponse.output_uri_prefix]. + string input_uri_prefix = 3; +} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/index.proto b/src/proto/proto/google/firestore/admin/v1beta2/index.proto new file mode 100644 index 00000000..c5dc6b98 --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta2/index.proto @@ -0,0 +1,150 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta2; + +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; +option java_multiple_files = true; +option java_outer_classname = "IndexProto"; +option java_package = "com.google.firestore.admin.v1beta2"; +option objc_class_prefix = "GCFS"; + +// Cloud Firestore indexes enable simple and complex queries against +// documents in a database. +message Index { + // A field in an index. + // The field_path describes which field is indexed, the value_mode describes + // how the field value is indexed. + message IndexField { + // The supported orderings. + enum Order { + // The ordering is unspecified. Not a valid option. + ORDER_UNSPECIFIED = 0; + + // The field is ordered by ascending field value. + ASCENDING = 1; + + // The field is ordered by descending field value. + DESCENDING = 2; + } + + // The supported array value configurations. + enum ArrayConfig { + // The index does not support additional array queries. + ARRAY_CONFIG_UNSPECIFIED = 0; + + // The index supports array containment queries. + CONTAINS = 1; + } + + // Can be __name__. + // For single field indexes, this must match the name of the field or may + // be omitted. + string field_path = 1; + + // How the field value is indexed. + oneof value_mode { + // Indicates that this field supports ordering by the specified order or + // comparing using =, <, <=, >, >=. + Order order = 2; + + // Indicates that this field supports operations on `array_value`s. + ArrayConfig array_config = 3; + } + } + + // Query Scope defines the scope at which a query is run. This is specified on + // a StructuredQuery's `from` field. + enum QueryScope { + // The query scope is unspecified. Not a valid option. + QUERY_SCOPE_UNSPECIFIED = 0; + + // Indexes with a collection query scope specified allow queries + // against a collection that is the child of a specific document, specified + // at query time, and that has the collection id specified by the index. + COLLECTION = 1; + + // Indexes with a collection group query scope specified allow queries + // against all collections that has the collection id specified by the + // index. + COLLECTION_GROUP = 2; + } + + // The state of an index. During index creation, an index will be in the + // `CREATING` state. If the index is created successfully, it will transition + // to the `READY` state. If the index creation encounters a problem, the index + // will transition to the `NEEDS_REPAIR` state. + enum State { + // The state is unspecified. + STATE_UNSPECIFIED = 0; + + // The index is being created. + // There is an active long-running operation for the index. + // The index is updated when writing a document. + // Some index data may exist. + CREATING = 1; + + // The index is ready to be used. + // The index is updated when writing a document. + // The index is fully populated from all stored documents it applies to. + READY = 2; + + // The index was being created, but something went wrong. + // There is no active long-running operation for the index, + // and the most recently finished long-running operation failed. + // The index is not updated when writing a document. + // Some index data may exist. + // Use the google.longrunning.Operations API to determine why the operation + // that last attempted to create this index failed, then re-create the + // index. + NEEDS_REPAIR = 3; + } + + // Output only. A server defined name for this index. + // The form of this name for composite indexes will be: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}` + // For single field indexes, this field will be empty. + string name = 1; + + // Indexes with a collection query scope specified allow queries + // against a collection that is the child of a specific document, specified at + // query time, and that has the same collection id. + // + // Indexes with a collection group query scope specified allow queries against + // all collections descended from a specific document, specified at query + // time, and that have the same collection id as this index. + QueryScope query_scope = 2; + + // The fields supported by this index. + // + // For composite indexes, this is always 2 or more fields. + // The last field entry is always for the field path `__name__`. If, on + // creation, `__name__` was not specified as the last field, it will be added + // automatically with the same direction as that of the last field defined. If + // the final field in a composite index is not directional, the `__name__` + // will be ordered ASCENDING (unless explicitly specified). + // + // For single field indexes, this will always be exactly one entry with a + // field path equal to the field path of the associated field. + repeated IndexField fields = 3; + + // Output only. The serving state of the index. + State state = 4; +} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/operation.proto b/src/proto/proto/google/firestore/admin/v1beta2/operation.proto new file mode 100644 index 00000000..d9a1f84e --- /dev/null +++ b/src/proto/proto/google/firestore/admin/v1beta2/operation.proto @@ -0,0 +1,202 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.firestore.admin.v1beta2; + +import "google/firestore/admin/v1beta2/index.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; +option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; +option java_multiple_files = true; +option java_outer_classname = "OperationProto"; +option java_package = "com.google.firestore.admin.v1beta2"; +option objc_class_prefix = "GCFS"; + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta2.FirestoreAdmin.CreateIndex]. +message IndexOperationMetadata { + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The index resource that this operation is acting on. For example: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` + string index = 3; + + // The state of the operation. + OperationState state = 4; + + // The progress, in documents, of this operation. + Progress progress_documents = 5; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 6; +} + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField]. +message FieldOperationMetadata { + // Information about an index configuration change. + message IndexConfigDelta { + // Specifies how the index is changing. + enum ChangeType { + // The type of change is not specified or known. + CHANGE_TYPE_UNSPECIFIED = 0; + + // The single field index is being added. + ADD = 1; + + // The single field index is being removed. + REMOVE = 2; + } + + // Specifies how the index is changing. + ChangeType change_type = 1; + + // The index being changed. + Index index = 2; + } + + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The field resource that this operation is acting on. For example: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` + string field = 3; + + // A list of [IndexConfigDelta][google.firestore.admin.v1beta2.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this + // operation. + repeated IndexConfigDelta index_config_deltas = 4; + + // The state of the operation. + OperationState state = 5; + + // The progress, in documents, of this operation. + Progress document_progress = 6; + + // The progress, in bytes, of this operation. + Progress bytes_progress = 7; +} + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ExportDocuments]. +message ExportDocumentsMetadata { + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The state of the export operation. + OperationState operation_state = 3; + + // The progress, in documents, of this operation. + Progress progress_documents = 4; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 5; + + // Which collection ids are being exported. + repeated string collection_ids = 6; + + // Where the entities are being exported to. + string output_uri_prefix = 7; +} + +// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from +// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ImportDocuments]. +message ImportDocumentsMetadata { + // The time this operation started. + google.protobuf.Timestamp start_time = 1; + + // The time this operation completed. Will be unset if operation still in + // progress. + google.protobuf.Timestamp end_time = 2; + + // The state of the import operation. + OperationState operation_state = 3; + + // The progress, in documents, of this operation. + Progress progress_documents = 4; + + // The progress, in bytes, of this operation. + Progress progress_bytes = 5; + + // Which collection ids are being imported. + repeated string collection_ids = 6; + + // The location of the documents being imported. + string input_uri_prefix = 7; +} + +// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. +message ExportDocumentsResponse { + // Location of the output files. This can be used to begin an import + // into Cloud Firestore (this project or another project) after the operation + // completes successfully. + string output_uri_prefix = 1; +} + +// Describes the state of the operation. +enum OperationState { + // Unspecified. + OPERATION_STATE_UNSPECIFIED = 0; + + // Request is being prepared for processing. + INITIALIZING = 1; + + // Request is actively being processed. + PROCESSING = 2; + + // Request is in the process of being cancelled after user called + // google.longrunning.Operations.CancelOperation on the operation. + CANCELLING = 3; + + // Request has been processed and is in its finalization stage. + FINALIZING = 4; + + // Request has completed successfully. + SUCCESSFUL = 5; + + // Request has finished being processed, but encountered an error. + FAILED = 6; + + // Request has finished being cancelled after user called + // google.longrunning.Operations.CancelOperation. + CANCELLED = 7; +} + +// Describes the progress of the operation. +// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1beta2.Progress] +// is used. +message Progress { + // The amount of work estimated. + int64 estimated_work = 1; + + // The amount of work completed. + int64 completed_work = 2; +} diff --git a/src/proto/proto/update.sh b/src/proto/proto/update.sh index 9f2ea2bc..e8526d96 100755 --- a/src/proto/proto/update.sh +++ b/src/proto/proto/update.sh @@ -47,6 +47,10 @@ mkdir -p "${PROTOS_DIR}/google/firestore/v1" cp googleapis/google/firestore/v1/*.proto \ "${PROTOS_DIR}/google/firestore/v1/" +mkdir -p "${PROTOS_DIR}/google/firestore/admin" +cp -rf googleapis/google/firestore/admin/* \ + "${PROTOS_DIR}/google/firestore/admin/" + mkdir -p "${PROTOS_DIR}/google/rpc" cp googleapis/google/rpc/status.proto \ "${PROTOS_DIR}/google/rpc/" @@ -55,6 +59,7 @@ mkdir -p "${PROTOS_DIR}/google/type" cp googleapis/google/type/latlng.proto \ "${PROTOS_DIR}/google/type/" + # Hack in `verify` support ex "${PROTOS_DIR}/google/firestore/v1/write.proto" < Date: Mon, 30 Jan 2023 15:04:49 +0800 Subject: [PATCH 3/9] feat: update db store --- src/crypto/Cargo.toml | 2 + src/crypto/src/db3_address.rs | 2 - src/crypto/src/id.rs | 123 +++++++++++++++++++++ src/crypto/src/lib.rs | 1 + src/node/src/abci_impl.rs | 25 ++--- src/node/src/auth_storage.rs | 21 ++-- src/proto/proto/db3_database.proto | 144 ++++++++++++++++++++++--- src/proto/proto/db3_mutation.proto | 14 +-- src/proto/proto/firestore/bundle.proto | 121 --------------------- src/proto/proto/protos.json | 2 +- src/storage/Cargo.toml | 3 + src/storage/src/db_key.rs | 68 ++++++------ src/storage/src/db_store.rs | 80 +++++++------- 13 files changed, 365 insertions(+), 241 deletions(-) create mode 100644 src/crypto/src/id.rs delete mode 100644 src/proto/proto/firestore/bundle.proto diff --git a/src/crypto/Cargo.toml b/src/crypto/Cargo.toml index 6d93b64e..4758bae3 100644 --- a/src/crypto/Cargo.toml +++ b/src/crypto/Cargo.toml @@ -35,5 +35,7 @@ derive_more = "0.99.17" fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "bbb2d02a7a64c27314721748cc4d015b00490dbe" , features=['copy_key']} bip32 = "0.4.0" slip10_ed25519 = "0.1.3" +byteorder = "1.4.3" +rust_secp256k1 = { version = "0.24.0", package = "secp256k1", features = ["bitcoin_hashes"] } diff --git a/src/crypto/src/db3_address.rs b/src/crypto/src/db3_address.rs index 5fee4b39..373ad6bc 100644 --- a/src/crypto/src/db3_address.rs +++ b/src/crypto/src/db3_address.rs @@ -91,7 +91,6 @@ impl From<&DB3PublicKey> for DB3Address { hasher.update(pk); let g_arr = hasher.finalize(); let mut res = [0u8; DB3_ADDRESS_LENGTH]; - // OK to access slice because Sha3_256 should never be shorter than DB3_ADDRESS_LENGTH. res.copy_from_slice(&AsRef::<[u8]>::as_ref(&g_arr)[..DB3_ADDRESS_LENGTH]); DB3Address(res) } @@ -104,7 +103,6 @@ impl From<&T> for DB3Address { hasher.update(pk); let g_arr = hasher.finalize(); let mut res = [0u8; DB3_ADDRESS_LENGTH]; - // OK to access slice because Sha3_256 should never be shorter than SUI_ADDRESS_LENGTH. res.copy_from_slice(&AsRef::<[u8]>::as_ref(&g_arr)[..DB3_ADDRESS_LENGTH]); DB3Address(res) } diff --git a/src/crypto/src/id.rs b/src/crypto/src/id.rs new file mode 100644 index 00000000..51969e87 --- /dev/null +++ b/src/crypto/src/id.rs @@ -0,0 +1,123 @@ +// +// id.rs +// Copyright (C) 2023 db3.network Author imotai +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use crate::db3_address::{DB3Address, DB3_ADDRESS_LENGTH}; +use byteorder::{BigEndian, WriteBytesExt}; +use db3_error::DB3Error; +use fastcrypto::hash::{HashFunction, Sha3_256}; +use rust_secp256k1::hashes::{sha256, Hash}; +use rust_secp256k1::ThirtyTwoByteHash; +// it's ethereum compatiable account id +#[derive(Eq, Default, PartialEq, Ord, PartialOrd, Copy, Clone)] +pub struct AccountId { + pub addr: DB3Address, +} + +impl AccountId { + pub fn new(addr: DB3Address) -> Self { + Self { addr } + } +} + +#[derive(Eq, Default, PartialEq, Ord, PartialOrd, Copy, Clone)] +pub struct TxId { + data: [u8; 32], +} + +impl From<&[u8]> for TxId { + fn from(message: &[u8]) -> Self { + let id = sha256::Hash::hash(message); + Self { data: id.into_32() } + } +} + +impl AsRef<[u8]> for TxId { + fn as_ref(&self) -> &[u8] { + &self.data[..] + } +} + +pub const DBID_LENGTH: usize = DB3_ADDRESS_LENGTH; + +#[derive(Eq, Default, PartialEq, Ord, PartialOrd, Copy, Clone)] +pub struct DbId { + addr: DB3Address, +} + +impl DbId { + #[inline] + pub fn length() -> usize { + DBID_LENGTH + } + + #[inline] + pub fn min_id() -> DbId { + DbId::from(&[std::u8::MIN; DB3_ADDRESS_LENGTH]) + } + + #[inline] + pub fn max_id() -> DbId { + DbId::from(&[std::u8::MAX; DB3_ADDRESS_LENGTH]) + } +} + +impl AsRef<[u8]> for DbId { + fn as_ref(&self) -> &[u8] { + self.addr.as_ref() + } +} + +impl From<&[u8; DB3_ADDRESS_LENGTH]> for DbId { + fn from(data: &[u8; DB3_ADDRESS_LENGTH]) -> Self { + Self { + addr: DB3Address::from(data), + } + } +} + +impl From for DbId { + fn from(addr: DB3Address) -> Self { + Self { addr } + } +} + +impl TryFrom<(&DB3Address, u64)> for DbId { + type Error = DB3Error; + fn try_from(input: (&DB3Address, u64)) -> std::result::Result { + let mut bs = [0u8; std::mem::size_of::()]; + bs.as_mut() + .write_u64::(input.1) + .map_err(|e| DB3Error::KeyCodecError(format!("{e}")))?; + let mut hasher = Sha3_256::default(); + hasher.update(bs.as_ref()); + hasher.update(input.0); + let g_arr = hasher.finalize(); + let mut res = [0u8; DB3_ADDRESS_LENGTH]; + res.copy_from_slice(&AsRef::<[u8]>::as_ref(&g_arr)[..DB3_ADDRESS_LENGTH]); + Ok(Self { + addr: DB3Address::from(&res), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() {} +} diff --git a/src/crypto/src/lib.rs b/src/crypto/src/lib.rs index a38371ed..e1fc48d0 100644 --- a/src/crypto/src/lib.rs +++ b/src/crypto/src/lib.rs @@ -24,5 +24,6 @@ pub mod db3_serde; pub mod db3_signature; pub mod db3_signer; pub mod db3_verifier; +pub mod id; pub mod key_derive; pub mod signature_scheme; diff --git a/src/node/src/abci_impl.rs b/src/node/src/abci_impl.rs index 8e1aa665..c913f7e5 100644 --- a/src/node/src/abci_impl.rs +++ b/src/node/src/abci_impl.rs @@ -17,18 +17,15 @@ use shadow_rs::shadow; shadow!(build); -use super::auth_storage::Hash; use crate::node_storage::NodeStorage; use bytes::Bytes; -use db3_crypto::db3_address::DB3Address as AccountAddress; -use db3_crypto::db3_verifier; -use db3_proto::db3_mutation_proto::{DatabaseRequest, Mutation, PayloadType, WriteRequest}; +use db3_crypto::{db3_address::DB3Address as AccountAddress, db3_verifier, id::TxId}; +use db3_proto::db3_mutation_proto::{DatabaseMutation, Mutation, PayloadType, WriteRequest}; use db3_proto::db3_session_proto::{QuerySession, QuerySessionInfo}; use db3_session::query_session_verifier; use db3_storage::kv_store::KvStore; use hex; use prost::Message; -use rust_secp256k1::Message as HashMessage; use std::pin::Pin; use std::sync::atomic::AtomicU64; use std::sync::{Arc, Mutex}; @@ -50,11 +47,11 @@ pub struct NodeState { #[derive(Clone)] pub struct AbciImpl { node_store: Arc>>>, - pending_mutation: Arc>>, + pending_mutation: Arc>>, pending_query_session: - Arc>>, + Arc>>, node_state: Arc, - pending_databases: Arc>>, + pending_databases: Arc>>, } impl AbciImpl { @@ -244,9 +241,7 @@ impl Application for AbciImpl { fn deliver_tx(&self, request: RequestDeliverTx) -> ResponseDeliverTx { //TODO match the hash fucntion with tendermint - let mutation_id = HashMessage::from_hashed_data::( - request.tx.as_ref(), - ); + let txId = TxId::from(request.tx.as_ref()); if let Ok(wrequest) = WriteRequest::decode(request.tx.as_ref()) { if let Ok(account_id) = db3_verifier::DB3Verifier::verify( wrequest.payload.as_ref(), @@ -255,10 +250,10 @@ impl Application for AbciImpl { let payload_type = PayloadType::from_i32(wrequest.payload_type); match payload_type { Some(PayloadType::DatabasePayload) => { - if let Ok(dr) = DatabaseRequest::decode(wrequest.payload.as_ref()) { + if let Ok(dr) = DatabaseMutation::decode(wrequest.payload.as_ref()) { match self.pending_databases.lock() { Ok(mut s) => { - s.push((account_id.addr, dr)); + s.push((account_id.addr, dr, txId)); return ResponseDeliverTx { code: 0, data: Bytes::new(), @@ -383,7 +378,7 @@ impl Application for AbciImpl { todo!(); } }; - let pending_databases: Vec<(AccountAddress, DatabaseRequest)> = + let pending_databases: Vec<(AccountAddress, DatabaseMutation)> = match self.pending_databases.lock() { Ok(mut q) => { let clone_q = q.drain(..).collect(); @@ -446,7 +441,6 @@ impl Application for AbciImpl { { //TODO how to revert if let Ok(hash) = s.commit() { - span.exit(); ResponseCommit { data: Bytes::copy_from_slice(&hash), retain_height: 0, @@ -462,6 +456,7 @@ impl Application for AbciImpl { retain_height: 0, } } + span.exit(); } Err(_) => { todo!(); diff --git a/src/node/src/auth_storage.rs b/src/node/src/auth_storage.rs index 942ea7eb..5f65f8b8 100644 --- a/src/node/src/auth_storage.rs +++ b/src/node/src/auth_storage.rs @@ -22,7 +22,7 @@ use db3_proto::db3_base_proto::Units; use db3_proto::db3_bill_proto::{Bill, BillType}; use db3_proto::db3_database_proto::Database; use db3_proto::db3_mutation_proto::{ - database_request::Body, DatabaseRequest, KvPair, Mutation, MutationAction, + DatabaseMutation, KvPair, Mutation, MutationAction, }; use db3_proto::db3_node_proto::{BatchGetKey, BatchGetValue, RangeKey, RangeValue}; use db3_proto::db3_session_proto::QuerySessionInfo; @@ -245,18 +245,15 @@ impl AuthStorage { Ok(gas_fee) } - pub fn apply_database(&mut self, addr: &DB3Address, database: &DatabaseRequest) -> Result<()> { + pub fn apply_database( + &mut self, + sender: &DB3Address, + nonce: u64, + tx: &TxId, + mutation: &DatabaseMutation, + ) -> Result<()> { let db: Pin<&mut Merk> = Pin::as_mut(&mut self.db); - match &database.body { - Some(Body::Name(name)) => { - DbStore::apply_del(db, addr, &name) - //TODO delete data in kv_store - } - Some(Body::Database(d)) => DbStore::apply_add(db, addr, &d), - _ => { - todo!() - } - } + DbStore::apply_mutation(db, sender, nonce, tx, mutation) } pub fn apply_mutation( diff --git a/src/proto/proto/db3_database.proto b/src/proto/proto/db3_database.proto index 55e6b58d..2ee9630b 100644 --- a/src/proto/proto/db3_database.proto +++ b/src/proto/proto/db3_database.proto @@ -20,23 +20,139 @@ syntax = "proto3"; import "db3_base.proto"; package db3_database_proto; -// -// the definition of price for query -// -message QueryPrice { - // the price - db3_base_proto.Price price = 1; - // the count of query - uint64 query_count = 2; -} - // // the definition of database // message Database { + bytes address = 1; + // the owner of the Database + bytes sender = 2; + bytes tx = 3; + repeated Collection collections = 4; +} + +message Collection { string name = 1; - QueryPrice price = 2; - // the time when database was created - uint64 ts = 3; - string description = 4; + repeated Index index_list = 2; +} + +message Index { + // Query Scope defines the scope at which a query is run. This is specified on + // a StructuredQuery's `from` field. + enum QueryScope { + // The query scope is unspecified. Not a valid option. + QUERY_SCOPE_UNSPECIFIED = 0; + + // Indexes with a collection query scope specified allow queries + // against a collection that is the child of a specific document, specified + // at query time, and that has the collection id specified by the index. + COLLECTION = 1; + + // Indexes with a collection group query scope specified allow queries + // against all collections that has the collection id specified by the + // index. + COLLECTION_GROUP = 2; + } + + // A field in an index. + // The field_path describes which field is indexed, the value_mode describes + // how the field value is indexed. + message IndexField { + // The supported orderings. + enum Order { + // The ordering is unspecified. Not a valid option. + ORDER_UNSPECIFIED = 0; + + // The field is ordered by ascending field value. + ASCENDING = 1; + + // The field is ordered by descending field value. + DESCENDING = 2; + } + + // The supported array value configurations. + enum ArrayConfig { + // The index does not support additional array queries. + ARRAY_CONFIG_UNSPECIFIED = 0; + + // The index supports array containment queries. + CONTAINS = 1; + } + + // Can be __name__. + // For single field indexes, this must match the name of the field or may + // be omitted. + string field_path = 1; + + // How the field value is indexed. + oneof value_mode { + // Indicates that this field supports ordering by the specified order or + // comparing using =, !=, <, <=, >, >=. + Order order = 2; + + // Indicates that this field supports operations on `array_value`s. + ArrayConfig array_config = 3; + } + } + + // The state of an index. During index creation, an index will be in the + // `CREATING` state. If the index is created successfully, it will transition + // to the `READY` state. If the index creation encounters a problem, the index + // will transition to the `NEEDS_REPAIR` state. + enum State { + // The state is unspecified. + STATE_UNSPECIFIED = 0; + + // The index is being created. + // There is an active long-running operation for the index. + // The index is updated when writing a document. + // Some index data may exist. + CREATING = 1; + + // The index is ready to be used. + // The index is updated when writing a document. + // The index is fully populated from all stored documents it applies to. + READY = 2; + + // The index was being created, but something went wrong. + // There is no active long-running operation for the index, + // and the most recently finished long-running operation failed. + // The index is not updated when writing a document. + // Some index data may exist. + // Use the google.longrunning.Operations API to determine why the operation + // that last attempted to create this index failed, then re-create the + // index. + NEEDS_REPAIR = 3; + } + + // Output only. A server defined name for this index. + // The form of this name for composite indexes will be: + // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}` + // For single field indexes, this field will be empty. + string name = 1; + + // Indexes with a collection query scope specified allow queries + // against a collection that is the child of a specific document, specified at + // query time, and that has the same collection id. + // + // Indexes with a collection group query scope specified allow queries against + // all collections descended from a specific document, specified at query + // time, and that have the same collection id as this index. + QueryScope query_scope = 2; + + // The fields supported by this index. + // + // For composite indexes, this is always 2 or more fields. + // The last field entry is always for the field path `__name__`. If, on + // creation, `__name__` was not specified as the last field, it will be added + // automatically with the same direction as that of the last field defined. If + // the final field in a composite index is not directional, the `__name__` + // will be ordered ASCENDING (unless explicitly specified). + // + // For single field indexes, this will always be exactly one entry with a + // field path equal to the field path of the associated field. + repeated IndexField fields = 3; + + // Output only. The serving state of the index. + State state = 4; } diff --git a/src/proto/proto/db3_mutation.proto b/src/proto/proto/db3_mutation.proto index a004e1f8..c5dea49a 100644 --- a/src/proto/proto/db3_mutation.proto +++ b/src/proto/proto/db3_mutation.proto @@ -27,12 +27,14 @@ enum MutationAction { Nonce= 2; } -message DatabaseRequest { - oneof body { - string name = 1; - db3_database_proto.Database database = 2; - } - db3_base_proto.BroadcastMeta meta = 3; +message DatabaseMutation { + db3_base_proto.BroadcastMeta meta = 1; + repeated IndexMutation index_mutations = 2; +} + +message IndexMutation { + repeated db3_database_proto.Index index = 1; + string collection_id = 2; } message KVPair { diff --git a/src/proto/proto/firestore/bundle.proto b/src/proto/proto/firestore/bundle.proto deleted file mode 100644 index ee7954e6..00000000 --- a/src/proto/proto/firestore/bundle.proto +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2020 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// This file defines the format of Firestore bundle file/stream. It is not a part of the -// Firestore API, only a specification used by Server and Client SDK to write and read -// bundles. - -syntax = "proto3"; - -package firestore; - -import "google/firestore/v1/document.proto"; -import "google/firestore/v1/query.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Firestore.Proto"; -option go_package = "google.golang.org/genproto/firestore/proto;firestore"; -option java_multiple_files = true; -option java_outer_classname = "BundleProto"; -option java_package = "com.google.firestore.proto"; -option objc_class_prefix = "FSTPB"; -option php_namespace = "Firestore\\Proto"; - -// Describes a query saved in the bundle. -message BundledQuery { - // The parent resource name. - string parent = 1; - - // The query to run. - oneof query_type { - // A structured query. - google.firestore.v1.StructuredQuery structured_query = 2; - } - - // If the query is a limit query, should the limit be applied to the beginning or - // the end of results. - enum LimitType { - FIRST = 0; - LAST = 1; - } - LimitType limit_type = 3; -} - -// A Query associated with a name, created as part of the bundle file, and can be read -// by client SDKs once the bundle containing them is loaded. -message NamedQuery { - // Name of the query, such that client can use the name to load this query - // from bundle, and resume from when the query results are materialized - // into this bundle. - string name = 1; - - // The query saved in the bundle. - BundledQuery bundled_query = 2; - - // The read time of the query, when it is used to build the bundle. This is useful to - // resume the query from the bundle, once it is loaded by client SDKs. - google.protobuf.Timestamp read_time = 3; -} - -// Metadata describing a Firestore document saved in the bundle. -message BundledDocumentMetadata { - // The document key of a bundled document. - string name = 1; - - // The snapshot version of the document data bundled. - google.protobuf.Timestamp read_time = 2; - - // Whether the document exists. - bool exists = 3; - - // The names of the queries in this bundle that this document matches to. - repeated string queries = 4; -} - -// Metadata describing the bundle file/stream. -message BundleMetadata { - // The ID of the bundle. - string id = 1; - - // Time at which the documents snapshot is taken for this bundle. - google.protobuf.Timestamp create_time = 2; - - // The schema version of the bundle. - uint32 version = 3; - - // The number of documents in the bundle. - uint32 total_documents = 4; - - // The size of the bundle in bytes, excluding this `BundleMetadata`. - uint64 total_bytes = 5; -} - -// A Firestore bundle is a length-prefixed stream of JSON representations of -// `BundleElement`. -// Only one `BundleMetadata` is expected, and it should be the first element. -// The named queries follow after `metadata`. If a document exists when the -// bundle is built, `document_metadata` is immediately followed by the -// `document`, otherwise `document_metadata` will present by itself. -message BundleElement { - oneof element_type { - BundleMetadata metadata = 1; - - NamedQuery named_query = 2; - - BundledDocumentMetadata document_metadata = 3; - - google.firestore.v1.Document document = 4; - } -} diff --git a/src/proto/proto/protos.json b/src/proto/proto/protos.json index 2b6cac8d..1586bfb8 100644 --- a/src/proto/proto/protos.json +++ b/src/proto/proto/protos.json @@ -2822,4 +2822,4 @@ } } } -} \ No newline at end of file +} diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml index 6053b154..e3b3476a 100644 --- a/src/storage/Cargo.toml +++ b/src/storage/Cargo.toml @@ -16,9 +16,12 @@ db3-types={ path = "../types" } db3-crypto={ path = "../crypto" } tracing = "0.1" merkdb = {version= "2.0.0", path = "../../thirdparty/merkdb"} +fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "bbb2d02a7a64c27314721748cc4d015b00490dbe" , features=['copy_key']} hex = "0.4.3" bytes = "1" prost = "0.11" prost-types = "0.11" tempdir = "0.3.7" byteorder = "1.4.3" +serde_json = "1.0.88" +serde = { version = "1.0.144", features = ["derive"] } diff --git a/src/storage/src/db_key.rs b/src/storage/src/db_key.rs index 012d6670..871f9e6c 100644 --- a/src/storage/src/db_key.rs +++ b/src/storage/src/db_key.rs @@ -14,49 +14,58 @@ // See the License for the specific language governing permissions and // limitations under the License. -use db3_crypto::db3_address::{DB3Address, DB3_ADDRESS_LENGTH}; +use db3_crypto::id::{DbId, DBID_LENGTH}; use db3_error::{DB3Error, Result}; -/// account_address + _NS_ + ns -pub struct DbKey<'a>(pub DB3Address, pub &'a [u8]); -const DATABASE: &str = "_DB_"; -impl<'a> DbKey<'a> { +/// /db/{db_address} +pub struct DbKey(pub DbId); + +const DATABASE: &str = "/db/"; + +impl DbKey { /// - /// encode the key + /// encode the database key /// pub fn encode(&self) -> Result> { - let mut encoded_key = self.0.as_ref().to_vec(); - encoded_key.extend_from_slice(DATABASE.as_bytes()); - encoded_key.extend_from_slice(self.1); + let mut encoded_key = DATABASE.as_bytes().to_vec(); + encoded_key.extend_from_slice(self.0.as_ref()); Ok(encoded_key) } /// - /// decode the key + /// decode the database key /// - pub fn decode(data: &'a [u8]) -> Result { - const MIN_KEY_TOTAL_LEN: usize = DB3_ADDRESS_LENGTH + DATABASE.len(); + pub fn decode(data: &[u8]) -> Result { + const MIN_KEY_TOTAL_LEN: usize = DBID_LENGTH + DATABASE.len(); if data.len() <= MIN_KEY_TOTAL_LEN { return Err(DB3Error::KeyCodecError( "the length of data is invalid".to_string(), )); } - let key_start_offset = MIN_KEY_TOTAL_LEN; - let data_slice: &[u8; DB3_ADDRESS_LENGTH] = &data[..DB3_ADDRESS_LENGTH] + let address_offset = DATABASE.len(); + let data_slice: &[u8; DBID_LENGTH] = &data[address_offset..address_offset + DbId::length()] .try_into() .map_err(|e| DB3Error::KeyCodecError(format!("{e}")))?; - let addr = DB3Address::from(data_slice); - Ok(Self(addr, &data[key_start_offset..])) + let id = DbId::from(data_slice); + Ok(Self(id)) + } + + #[inline] + pub fn max() -> Self { + DbKey(DbId::max_id()) + } + + #[inline] + pub fn min() -> Self { + DbKey(DbId::min_id()) } } #[cfg(test)] mod tests { use super::*; - use db3_crypto::key_derive; use db3_crypto::signature_scheme::SignatureScheme; - fn gen_address() -> DB3Address { let seed: [u8; 32] = [0; 32]; let (address, _) = @@ -67,28 +76,21 @@ mod tests { #[test] fn it_key_serde() { let addr = gen_address(); - let ns: &str = "ns1"; - let key = DbKey(addr, ns.as_bytes()); + let id = DbId::from(addr); + let key = DbKey(id); let key_encoded = key.encode(); assert!(key_encoded.is_ok()); let key_decoded = DbKey::decode(key_encoded.as_ref().unwrap()); assert!(key_decoded.is_ok()); let key2 = key_decoded.unwrap(); - assert!(key2.0 == addr); - assert_eq!(key2.1, ns.as_bytes().to_vec()); + assert!(key2.0 == id); } #[test] - fn it_key_serde_cmp() -> Result<()> { - let addr = gen_address(); - let ns: &str = "ns1"; - let key = DbKey(addr, ns.as_bytes()); - let key_encoded1 = key.encode()?; - let ns: &str = "ns2"; - let key = DbKey(addr, ns.as_bytes()); - let key_encoded2 = key.encode()?; - assert!(key_encoded1.cmp(&key_encoded1) == std::cmp::Ordering::Equal); - assert!(key_encoded1.cmp(&key_encoded2) == std::cmp::Ordering::Less); - Ok(()) + fn it_cmp() -> Result<()> { + let min = DbKey::mix().encode()?; + let max = DbKey::max().encode()?; + assert!(min.cmp(&min) == std::cmp::Ordering::Equal); + assert!(min.cmp(&max) == std::cmp::Ordering::Less); } } diff --git a/src/storage/src/db_store.rs b/src/storage/src/db_store.rs index 0eb43348..ed664ab8 100644 --- a/src/storage/src/db_store.rs +++ b/src/storage/src/db_store.rs @@ -17,9 +17,10 @@ use super::db_key::DbKey; use bytes::BytesMut; -use db3_crypto::db3_address::DB3Address; +use db3_crypto::{db3_address::DB3Address, id::DbId, id::TxId}; use db3_error::{DB3Error, Result}; -use db3_proto::db3_database_proto::Database; +use db3_proto::db3_database_proto::{Collection, Database}; +use db3_proto::db3_mutation_proto::DatabaseMutation; use merkdb::proofs::{query::Query, Op as ProofOp}; use merkdb::{BatchEntry, Merk, Op}; use prost::Message; @@ -34,10 +35,36 @@ impl DbStore { Self {} } - fn convert(db: &Database, addr: &DB3Address) -> Result<(BatchEntry, usize)> { - let key = DbKey(*addr, db.name.as_bytes().as_ref()); + fn from(id: &DbId, sender: &DB3Address, txid: &TxId, mutation: &DatabaseMutation) -> Database { + //TODO check the duplicated collection id + let collections: Vec = mutation + .index_mutations + .iter() + .map(move |x| Collection { + name: x.collection_id.to_string(), + index_list: x.index.to_vec(), + }) + .collect(); + Database { + address: id.as_ref().to_vec(), + sender: sender.as_ref().to_vec(), + tx: txid.as_ref().to_vec(), + collections, + } + } + + fn convert( + sender: &DB3Address, + nonce: u64, + tx: &TxId, + mutation: &DatabaseMutation, + ) -> Result<(BatchEntry, usize)> { + let dbid = DbId::try_from((sender, nonce))?; + let key = DbKey(dbid); let encoded_key = key.encode()?; - let mut buf = BytesMut::with_capacity(1024 * 4); + let db = Self::from(&dbid, sender, tx, mutation); + //TODO limit the key length + let mut buf = BytesMut::with_capacity(1024 * 2); db.encode(&mut buf) .map_err(|e| DB3Error::ApplyDatabaseError(format!("{e}")))?; let buf = buf.freeze(); @@ -48,23 +75,15 @@ impl DbStore { )) } - pub fn apply_del(db: Pin<&mut Merk>, addr: &DB3Address, name: &str) -> Result<()> { + pub fn apply_mutation( + db: Pin<&mut Merk>, + sender: &DB3Address, + nonce: u64, + tx: &TxId, + mutation: &DatabaseMutation, + ) -> Result<()> { let mut entries: Vec = Vec::new(); - let key = DbKey(*addr, name.as_bytes().as_ref()); - let encoded_key = key.encode()?; - let entry = (encoded_key, Op::Delete); - entries.push(entry); - unsafe { - Pin::get_unchecked_mut(db) - .apply(&entries, &[]) - .map_err(|e| DB3Error::ApplyDatabaseError(format!("{e}")))?; - } - Ok(()) - } - - pub fn apply_add(db: Pin<&mut Merk>, addr: &DB3Address, database: &Database) -> Result<()> { - let mut entries: Vec = Vec::new(); - let (batch_entry, _) = Self::convert(database, addr)?; + let (batch_entry, _) = Self::convert(sender, nonce, tx, mutation)?; entries.push(batch_entry); unsafe { Pin::get_unchecked_mut(db) @@ -75,8 +94,8 @@ impl DbStore { } pub fn get_databases(db: Pin<&Merk>, addr: &DB3Address) -> Result> { - let start_key = DbKey(*addr, "".as_bytes().as_ref()); - let end_key = DbKey(*addr, "~~".as_bytes().as_ref()); + let start_key = DbKey::min(); + let end_key = DbKey::max(); let range = Range { start: start_key.encode()?, end: end_key.encode()?, @@ -113,26 +132,13 @@ mod tests { let addr = gen_address(); let merk = Merk::open(tmp_dir_path).unwrap(); let mut db = Box::pin(merk); - let usdt = Erc20Token { - symbal: "usdt".to_string(), - units: vec!["cent".to_string(), "usdt".to_string()], - scalar: vec![1, 10], - }; - let price = Price { - amount: 1, - unit: "cent".to_string(), - token: Some(usdt), - }; - let query_price = QueryPrice { - price: Some(price), - query_count: 1000, - }; let ns = Database { name: "test1".to_string(), price: Some(query_price), ts: 1000, description: "test".to_string(), }; + let db_m: Pin<&mut Merk> = Pin::as_mut(&mut db); let result = DbStore::apply_add(db_m, &addr, &ns); assert!(result.is_ok()); From ea90d62c686414d664956f44175d02fd3416b123 Mon Sep 17 00:00:00 2001 From: imotai Date: Mon, 30 Jan 2023 21:23:06 +0800 Subject: [PATCH 4/9] feat: add collection and add database --- src/cmd/src/lib.rs | 39 +----- src/crypto/src/id.rs | 21 ++++ src/error/src/lib.rs | 2 + src/node/src/abci_impl.rs | 25 ++-- src/node/src/auth_storage.rs | 20 ++- src/node/tests/node_test.rs | 111 +++++++---------- src/proto/proto/db3_database.proto | 33 +---- src/proto/proto/db3_mutation.proto | 17 ++- src/sdk/src/store_sdk.rs | 1 - src/storage/src/db_key.rs | 6 +- src/storage/src/db_store.rs | 194 +++++++++++++++++++++++++---- 11 files changed, 276 insertions(+), 193 deletions(-) diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs index 50b6e71a..8945c0bc 100644 --- a/src/cmd/src/lib.rs +++ b/src/cmd/src/lib.rs @@ -15,7 +15,7 @@ // limitations under the License. // -use db3_base::{get_address_from_pk, strings}; +use db3_base::strings; use db3_crypto::db3_keypair::EncodeDecodeBase64; use db3_crypto::{db3_keypair::DB3KeyPair, key_derive, signature_scheme::SignatureScheme}; use db3_proto::db3_account_proto::Account; @@ -39,13 +39,9 @@ use std::process::exit; const HELP: &str = r#"the help of db3 command help show all command -put write pairs of key and value to db3 e.g. put ns1 key1 value1 key2 values -del delete key from db3 e.g. del ns1 key1 key2 -get get value from db3 e.g. get ns1 key1 key2 -range get a range from db3 e.g. range ns1 start_key end_key +db new create a new database account get balance of current account blocks get latest blocks -session info get session info e.g session info quit quit command line console "#; @@ -161,6 +157,7 @@ async fn close_session( } } } + /// restart session when current session is invalid/closed/blocked async fn refresh_session( store_sdk: &mut StoreSDK, @@ -202,7 +199,7 @@ pub async fn process_cmd( println!("{}", HELP); return true; } - + "db" => {} "gen_key" => { get_key_pair(true).unwrap(); return true; @@ -220,33 +217,6 @@ pub async fn process_cmd( // show_account(&account); return true; } - "session" => { - if parts.len() < 2 { - println!("no enough command, e.g. session info | session restart"); - return false; - } - let op = parts[1]; - match op { - "info" => { - // TODO(chenjing): show history session list - if session.is_none() { - println!("start a session before query session info"); - return true; - } - if let Ok(session_info) = store_sdk - .get_session_info(&session.as_ref().unwrap().session_token) - .await - { - println!("{:?}", session_info); - return true; - } else { - println!("empty set"); - return false; - } - } - _ => {} - } - } "range" | "blocks" => { println!("to be provided"); return false; @@ -260,6 +230,7 @@ pub async fn process_cmd( let ns = parts[1]; let mut pairs: Vec = Vec::new(); + match cmd { "get" => { if !refresh_session(store_sdk, session).await { diff --git a/src/crypto/src/id.rs b/src/crypto/src/id.rs index 51969e87..1df0985f 100644 --- a/src/crypto/src/id.rs +++ b/src/crypto/src/id.rs @@ -38,6 +38,13 @@ pub struct TxId { data: [u8; 32], } +impl TxId { + #[inline] + pub fn zero() -> Self { + Self { data: [0; 32] } + } +} + impl From<&[u8]> for TxId { fn from(message: &[u8]) -> Self { let id = sha256::Hash::hash(message); @@ -73,6 +80,11 @@ impl DbId { pub fn max_id() -> DbId { DbId::from(&[std::u8::MAX; DB3_ADDRESS_LENGTH]) } + + #[inline] + pub fn to_hex(&self) -> String { + format!("0x{}", hex::encode(self.addr.as_ref())) + } } impl AsRef<[u8]> for DbId { @@ -89,6 +101,15 @@ impl From<&[u8; DB3_ADDRESS_LENGTH]> for DbId { } } +impl TryFrom<&[u8]> for DbId { + type Error = DB3Error; + fn try_from(data: &[u8]) -> std::result::Result { + Ok(Self { + addr: DB3Address::try_from(data)?, + }) + } +} + impl From for DbId { fn from(addr: DB3Address) -> Self { Self { addr } diff --git a/src/error/src/lib.rs b/src/error/src/lib.rs index 3b0a2100..649da2bf 100644 --- a/src/error/src/lib.rs +++ b/src/error/src/lib.rs @@ -67,6 +67,8 @@ pub enum DB3Error { QuerySessionVerifyError(String), #[error("fail to query database {0}")] QueryDatabaseError(String), + #[error("database with addr {0} was not found")] + DatabaseNotFound(String), #[error("the address does not match the public key")] InvalidSigner, #[error("fail to generate key for {0}")] diff --git a/src/node/src/abci_impl.rs b/src/node/src/abci_impl.rs index c913f7e5..55331a1c 100644 --- a/src/node/src/abci_impl.rs +++ b/src/node/src/abci_impl.rs @@ -134,7 +134,7 @@ impl Application for AbciImpl { let payload_type = PayloadType::from_i32(request.payload_type); match payload_type { Some(PayloadType::DatabasePayload) => { - match DatabaseRequest::decode(request.payload.as_ref()) { + match DatabaseMutation::decode(request.payload.as_ref()) { Ok(_) => { return ResponseCheckTx { code: 0, @@ -241,7 +241,7 @@ impl Application for AbciImpl { fn deliver_tx(&self, request: RequestDeliverTx) -> ResponseDeliverTx { //TODO match the hash fucntion with tendermint - let txId = TxId::from(request.tx.as_ref()); + let tx_id = TxId::from(request.tx.as_ref()); if let Ok(wrequest) = WriteRequest::decode(request.tx.as_ref()) { if let Ok(account_id) = db3_verifier::DB3Verifier::verify( wrequest.payload.as_ref(), @@ -253,7 +253,7 @@ impl Application for AbciImpl { if let Ok(dr) = DatabaseMutation::decode(wrequest.payload.as_ref()) { match self.pending_databases.lock() { Ok(mut s) => { - s.push((account_id.addr, dr, txId)); + s.push((account_id.addr, dr, tx_id)); return ResponseDeliverTx { code: 0, data: Bytes::new(), @@ -279,11 +279,7 @@ impl Application for AbciImpl { match self.pending_mutation.lock() { Ok(mut s) => { //TODO add gas check - s.push(( - account_id.addr, - mutation_id.as_ref().clone(), - mutation, - )); + s.push((account_id.addr, tx_id, mutation)); return ResponseDeliverTx { code: 0, data: Bytes::new(), @@ -313,7 +309,7 @@ impl Application for AbciImpl { s.push(( client_account_id.addr, account_id.addr, - mutation_id.as_ref().clone(), + tx_id, query_session.node_query_session_info.unwrap(), )); return ResponseDeliverTx { @@ -358,7 +354,7 @@ impl Application for AbciImpl { } fn commit(&self) -> ResponseCommit { - let pending_mutation: Vec<(AccountAddress, Hash, Mutation)> = + let pending_mutation: Vec<(AccountAddress, TxId, Mutation)> = match self.pending_mutation.lock() { Ok(mut q) => { let clone_q = q.drain(..).collect(); @@ -368,7 +364,7 @@ impl Application for AbciImpl { todo!(); } }; - let pending_query_session: Vec<(AccountAddress, AccountAddress, Hash, QuerySessionInfo)> = + let pending_query_session: Vec<(AccountAddress, AccountAddress, TxId, QuerySessionInfo)> = match self.pending_query_session.lock() { Ok(mut q) => { let clone_q = q.drain(..).collect(); @@ -378,7 +374,7 @@ impl Application for AbciImpl { todo!(); } }; - let pending_databases: Vec<(AccountAddress, DatabaseMutation)> = + let pending_databases: Vec<(AccountAddress, DatabaseMutation, TxId)> = match self.pending_databases.lock() { Ok(mut q) => { let clone_q = q.drain(..).collect(); @@ -427,14 +423,14 @@ impl Application for AbciImpl { } let pending_databases_len = pending_databases.len(); for item in pending_databases { - match s.apply_database(&item.0, &item.1) { + match s.apply_database(&item.0, 1, &item.2, &item.1) { Ok(_) => {} Err(_) => { todo!() } } } - + span.exit(); if pending_mutation_len > 0 || pending_query_session_len > 0 || pending_databases_len > 0 @@ -456,7 +452,6 @@ impl Application for AbciImpl { retain_height: 0, } } - span.exit(); } Err(_) => { todo!(); diff --git a/src/node/src/auth_storage.rs b/src/node/src/auth_storage.rs index 5f65f8b8..62348558 100644 --- a/src/node/src/auth_storage.rs +++ b/src/node/src/auth_storage.rs @@ -15,15 +15,13 @@ // limitations under the License. // -use db3_crypto::db3_address::DB3Address; +use db3_crypto::{db3_address::DB3Address, id::TxId}; use db3_error::Result; use db3_proto::db3_account_proto::Account; use db3_proto::db3_base_proto::Units; use db3_proto::db3_bill_proto::{Bill, BillType}; use db3_proto::db3_database_proto::Database; -use db3_proto::db3_mutation_proto::{ - DatabaseMutation, KvPair, Mutation, MutationAction, -}; +use db3_proto::db3_mutation_proto::{DatabaseMutation, KvPair, Mutation, MutationAction}; use db3_proto::db3_node_proto::{BatchGetKey, BatchGetValue, RangeKey, RangeValue}; use db3_proto::db3_session_proto::QuerySessionInfo; use db3_storage::account_store::AccountStore; @@ -40,7 +38,7 @@ use merkdb::Merk; use prost::Message; use std::boxed::Box; use std::pin::Pin; -use tracing::{info, warn}; +use tracing::info; pub const HASH_LENGTH: usize = 32; pub type Hash = [u8; HASH_LENGTH]; @@ -172,8 +170,8 @@ impl AuthStorage { AccountStore::get_account(self.db.as_ref(), addr) } - pub fn get_database(&self, addr: &DB3Address) -> Result> { - let ops = DbStore::get_databases(self.db.as_ref(), addr)?; + pub fn get_database(&self, _addr: &DB3Address) -> Result> { + let ops = DbStore::get_databases(self.db.as_ref())?; let mut db_list: Vec = Vec::new(); for op in ops { match op { @@ -216,7 +214,7 @@ impl AuthStorage { &mut self, addr: &DB3Address, query_addr: &DB3Address, - mutation_id: &Hash, + tx_id: &TxId, query_session_info: &QuerySessionInfo, ) -> Result { let mut account = AccountStore::get_account(self.db.as_ref(), &addr)?; @@ -228,7 +226,7 @@ impl AuthStorage { bill_id: self.current_block_state.bill_id_counter, bill_type: BillType::BillForQuery.into(), time: self.current_block_state.block_time, - bill_target_id: mutation_id.to_vec(), + bill_target_id: tx_id.as_ref().to_vec(), owner: addr.to_vec(), query_addr: query_addr.to_vec(), }; @@ -259,7 +257,7 @@ impl AuthStorage { pub fn apply_mutation( &mut self, addr: &DB3Address, - mutation_id: &Hash, + tx_id: &TxId, mutation: &Mutation, ) -> Result<(Units, u64)> { let mut account = AccountStore::get_account(self.db.as_ref(), &addr)?; @@ -276,7 +274,7 @@ impl AuthStorage { bill_id: self.current_block_state.bill_id_counter, bill_type: BillType::BillForMutation.into(), time: self.current_block_state.block_time, - bill_target_id: mutation_id.to_vec(), + bill_target_id: tx_id.as_ref().to_vec(), owner: addr.to_vec(), query_addr: vec![], }; diff --git a/src/node/tests/node_test.rs b/src/node/tests/node_test.rs index cb5aa91c..9d907982 100644 --- a/src/node/tests/node_test.rs +++ b/src/node/tests/node_test.rs @@ -5,10 +5,9 @@ mod node_integration { use db3_base::get_a_random_nonce; use db3_crypto::db3_signer::Db3MultiSchemeSigner; use db3_proto::db3_base_proto::{ChainId, ChainRole, Erc20Token, Price, UnitType, Units}; - use db3_proto::db3_database_proto::{Database, QueryPrice}; + use db3_proto::db3_database_proto::Database; use db3_proto::db3_mutation_proto::{ - database_request::Body, DatabaseRequest, KvPair, Mutation, MutationAction, PayloadType, - WriteRequest, + DatabaseMutation, KvPair, Mutation, MutationAction, PayloadType, WriteRequest, }; use db3_proto::db3_node_proto::storage_node_client::StorageNodeClient; use db3_sdk::mutation_sdk::MutationSDK; @@ -54,72 +53,46 @@ mod node_integration { } } - #[actix_web::test] - async fn json_rpc_database_smoke_test() { - let json_rpc_url = "http://127.0.0.1:26670"; - let client = awc::Client::default(); - let kp = db3_cmd::get_key_pair(false).unwrap(); - let signer = Db3MultiSchemeSigner::new(kp); - let usdt = Erc20Token { - symbal: "usdt".to_string(), - units: vec!["cent".to_string(), "usdt".to_string()], - scalar: vec![1, 10], - }; - let price = Price { - amount: 1, - unit: "cent".to_string(), - token: Some(usdt), - }; - let query_price = QueryPrice { - price: Some(price), - query_count: 1000, - }; - let database = Database { - name: "test1".to_string(), - price: Some(query_price), - ts: 1000, - description: "test".to_string(), - }; - - let db = Body::Database(database); - let request = DatabaseRequest { - body: Some(db), - meta: None, - }; - let mut mbuf = BytesMut::with_capacity(1024 * 4); - request.encode(&mut mbuf).unwrap(); - let mbuf = mbuf.freeze(); - let signature = signer.sign(mbuf.as_ref()).unwrap(); - let request = WriteRequest { - signature: signature.as_ref().to_vec(), - payload: mbuf.as_ref().to_vec().to_owned(), - payload_type: PayloadType::DatabasePayload.into(), - }; - let mut buf = BytesMut::with_capacity(1024 * 4); - request.encode(&mut buf).unwrap(); - let buf = buf.freeze(); - // encode request to base64 - let data = base64::encode(buf.as_ref()); - let base64_str = String::from_utf8_lossy(data.as_ref()).to_string(); - let request = serde_json::json!( - {"method": "broadcast", - "params": vec![base64_str], - "id": 1, - "jsonrpc": "2.0" - } - ); - let mut response = client.post(json_rpc_url).send_json(&request).await.unwrap(); - if let serde_json::Value::Object(val) = response.json::().await.unwrap() - { - if let Some(serde_json::Value::String(s)) = val.get("result") { - assert!(s.len() > 0); - } else { - assert!(false) - } - } else { - assert!(false) - } - } + //#[actix_web::test] + //async fn json_rpc_database_smoke_test() { + // let json_rpc_url = "http://127.0.0.1:26670"; + // let client = awc::Client::default(); + // let kp = db3_cmd::get_key_pair(false).unwrap(); + // let signer = Db3MultiSchemeSigner::new(kp); + // let mut mbuf = BytesMut::with_capacity(1024 * 4); + // request.encode(&mut mbuf).unwrap(); + // let mbuf = mbuf.freeze(); + // let signature = signer.sign(mbuf.as_ref()).unwrap(); + // let request = WriteRequest { + // signature: signature.as_ref().to_vec(), + // payload: mbuf.as_ref().to_vec().to_owned(), + // payload_type: PayloadType::DatabasePayload.into(), + // }; + // let mut buf = BytesMut::with_capacity(1024 * 4); + // request.encode(&mut buf).unwrap(); + // let buf = buf.freeze(); + // // encode request to base64 + // let data = base64::encode(buf.as_ref()); + // let base64_str = String::from_utf8_lossy(data.as_ref()).to_string(); + // let request = serde_json::json!( + // {"method": "broadcast", + // "params": vec![base64_str], + // "id": 1, + // "jsonrpc": "2.0" + // } + // ); + // let mut response = client.post(json_rpc_url).send_json(&request).await.unwrap(); + // if let serde_json::Value::Object(val) = response.json::().await.unwrap() + // { + // if let Some(serde_json::Value::String(s)) = val.get("result") { + // assert!(s.len() > 0); + // } else { + // assert!(false) + // } + // } else { + // assert!(false) + // } + //} #[actix_web::test] async fn json_rpc_smoke_test() { diff --git a/src/proto/proto/db3_database.proto b/src/proto/proto/db3_database.proto index 2ee9630b..58bb6797 100644 --- a/src/proto/proto/db3_database.proto +++ b/src/proto/proto/db3_database.proto @@ -27,7 +27,8 @@ message Database { bytes address = 1; // the owner of the Database bytes sender = 2; - bytes tx = 3; + // the history of database modification + repeated bytes tx = 3; repeated Collection collections = 4; } @@ -37,22 +38,6 @@ message Collection { } message Index { - // Query Scope defines the scope at which a query is run. This is specified on - // a StructuredQuery's `from` field. - enum QueryScope { - // The query scope is unspecified. Not a valid option. - QUERY_SCOPE_UNSPECIFIED = 0; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified - // at query time, and that has the collection id specified by the index. - COLLECTION = 1; - - // Indexes with a collection group query scope specified allow queries - // against all collections that has the collection id specified by the - // index. - COLLECTION_GROUP = 2; - } // A field in an index. // The field_path describes which field is indexed, the value_mode describes @@ -131,15 +116,6 @@ message Index { // For single field indexes, this field will be empty. string name = 1; - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified at - // query time, and that has the same collection id. - // - // Indexes with a collection group query scope specified allow queries against - // all collections descended from a specific document, specified at query - // time, and that have the same collection id as this index. - QueryScope query_scope = 2; - // The fields supported by this index. // // For composite indexes, this is always 2 or more fields. @@ -151,8 +127,5 @@ message Index { // // For single field indexes, this will always be exactly one entry with a // field path equal to the field path of the associated field. - repeated IndexField fields = 3; - - // Output only. The serving state of the index. - State state = 4; + repeated IndexField fields = 2; } diff --git a/src/proto/proto/db3_mutation.proto b/src/proto/proto/db3_mutation.proto index c5dea49a..2e3f456f 100644 --- a/src/proto/proto/db3_mutation.proto +++ b/src/proto/proto/db3_mutation.proto @@ -22,17 +22,24 @@ import "db3_database.proto"; package db3_mutation_proto; enum MutationAction { - InsertKv=0; - DeleteKv=1; - Nonce= 2; + InsertKv = 0; + DeleteKv = 1; + Nonce = 2; +} + +enum DatabaseAction { + CreateDB = 0; + AddCollection = 1; } message DatabaseMutation { db3_base_proto.BroadcastMeta meta = 1; - repeated IndexMutation index_mutations = 2; + repeated CollectionMutation collection_mutations = 2; + bytes db_address = 3; + DatabaseAction action = 4; } -message IndexMutation { +message CollectionMutation { repeated db3_database_proto.Index index = 1; string collection_id = 2; } diff --git a/src/sdk/src/store_sdk.rs b/src/sdk/src/store_sdk.rs index 477048ec..c297fe4b 100644 --- a/src/sdk/src/store_sdk.rs +++ b/src/sdk/src/store_sdk.rs @@ -403,7 +403,6 @@ mod tests { #[tokio::test] async fn close_session_happy_path() { let nonce = get_a_random_nonce(); - let ep = "http://127.0.0.1:26659"; let rpc_endpoint = Endpoint::new(ep.to_string()).unwrap(); let channel = rpc_endpoint.connect_lazy(); diff --git a/src/storage/src/db_key.rs b/src/storage/src/db_key.rs index 871f9e6c..93568ead 100644 --- a/src/storage/src/db_key.rs +++ b/src/storage/src/db_key.rs @@ -65,7 +65,8 @@ impl DbKey { mod tests { use super::*; use db3_crypto::key_derive; - use db3_crypto::signature_scheme::SignatureScheme; + use db3_crypto::{db3_address::DB3Address, signature_scheme::SignatureScheme}; + fn gen_address() -> DB3Address { let seed: [u8; 32] = [0; 32]; let (address, _) = @@ -88,9 +89,10 @@ mod tests { #[test] fn it_cmp() -> Result<()> { - let min = DbKey::mix().encode()?; + let min = DbKey::min().encode()?; let max = DbKey::max().encode()?; assert!(min.cmp(&min) == std::cmp::Ordering::Equal); assert!(min.cmp(&max) == std::cmp::Ordering::Less); + Ok(()) } } diff --git a/src/storage/src/db_store.rs b/src/storage/src/db_store.rs index ed664ab8..e187a814 100644 --- a/src/storage/src/db_store.rs +++ b/src/storage/src/db_store.rs @@ -20,13 +20,15 @@ use bytes::BytesMut; use db3_crypto::{db3_address::DB3Address, id::DbId, id::TxId}; use db3_error::{DB3Error, Result}; use db3_proto::db3_database_proto::{Collection, Database}; -use db3_proto::db3_mutation_proto::DatabaseMutation; +use db3_proto::db3_mutation_proto::{DatabaseAction, DatabaseMutation}; use merkdb::proofs::{query::Query, Op as ProofOp}; use merkdb::{BatchEntry, Merk, Op}; use prost::Message; +use std::collections::HashSet; use std::collections::LinkedList; use std::ops::Range; use std::pin::Pin; +use tracing::{info, warn}; pub struct DbStore {} @@ -35,20 +37,60 @@ impl DbStore { Self {} } - fn from(id: &DbId, sender: &DB3Address, txid: &TxId, mutation: &DatabaseMutation) -> Database { + fn update_database( + old_db: &Database, + mutation: &DatabaseMutation, + tx_id: &TxId, + ) -> Result { + let collection_ids: HashSet = + HashSet::from_iter(old_db.collections.iter().map(|x| x.name.to_string())); + let new_collections: Vec = mutation + .collection_mutations + .iter() + .filter(|x| !collection_ids.contains(&x.collection_id)) + .map(|x| Collection { + name: x.collection_id.to_string(), + index_list: x.index.to_vec(), + }) + .collect(); + if new_collections.len() != mutation.collection_mutations.len() { + Err(DB3Error::ApplyDatabaseError( + "duplicated collection names".to_string(), + )) + } else { + let mut collections = old_db.collections.to_vec(); + collections.extend_from_slice(new_collections.as_ref()); + let mut tx_list = old_db.tx.to_vec(); + tx_list.push(tx_id.as_ref().to_vec()); + Ok(Database { + address: old_db.address.to_vec(), + sender: old_db.sender.to_vec(), + tx: tx_list, + collections, + }) + } + } + + fn new_database( + id: &DbId, + sender: &DB3Address, + txid: &TxId, + mutation: &DatabaseMutation, + ) -> Database { //TODO check the duplicated collection id let collections: Vec = mutation - .index_mutations + .collection_mutations .iter() .map(move |x| Collection { name: x.collection_id.to_string(), index_list: x.index.to_vec(), }) .collect(); + Database { address: id.as_ref().to_vec(), sender: sender.as_ref().to_vec(), - tx: txid.as_ref().to_vec(), + tx: vec![txid.as_ref().to_vec()], collections, } } @@ -60,12 +102,16 @@ impl DbStore { mutation: &DatabaseMutation, ) -> Result<(BatchEntry, usize)> { let dbid = DbId::try_from((sender, nonce))?; + let db = Self::new_database(&dbid, sender, tx, mutation); + Self::encode_database(dbid, &db) + } + + fn encode_database(dbid: DbId, database: &Database) -> Result<(BatchEntry, usize)> { let key = DbKey(dbid); let encoded_key = key.encode()?; - let db = Self::from(&dbid, sender, tx, mutation); - //TODO limit the key length let mut buf = BytesMut::with_capacity(1024 * 2); - db.encode(&mut buf) + database + .encode(&mut buf) .map_err(|e| DB3Error::ApplyDatabaseError(format!("{e}")))?; let buf = buf.freeze(); let total_in_bytes = encoded_key.len() + buf.as_ref().len(); @@ -75,7 +121,10 @@ impl DbStore { )) } - pub fn apply_mutation( + // + // create a new database + // + fn create_database( db: Pin<&mut Merk>, sender: &DB3Address, nonce: u64, @@ -93,7 +142,80 @@ impl DbStore { Ok(()) } - pub fn get_databases(db: Pin<&Merk>, addr: &DB3Address) -> Result> { + // + // add a new collection to database + // + fn add_collection( + db: Pin<&mut Merk>, + sender: &DB3Address, + tx: &TxId, + mutation: &DatabaseMutation, + ) -> Result<()> { + let addr_ref: &[u8] = mutation.db_address.as_ref(); + let db_id = DbId::try_from(addr_ref)?; + let database = Self::get_database(db.as_ref(), &db_id)?; + match database { + Some(d) => { + let sender_ref: &[u8] = d.sender.as_ref(); + if sender_ref != sender.as_ref() { + warn!( + "no permission to add collection to database {}", + db_id.to_hex() + ); + } else { + let mut entries: Vec = Vec::new(); + let new_db = Self::update_database(&d, mutation, tx)?; + let (entry, _) = Self::encode_database(db_id, &new_db)?; + entries.push(entry); + unsafe { + Pin::get_unchecked_mut(db) + .apply(&entries, &[]) + .map_err(|e| DB3Error::ApplyDatabaseError(format!("{e}")))?; + } + } + } + None => { + warn!("database not found with addr {}", db_id.to_hex()); + } + } + Ok(()) + } + + pub fn apply_mutation( + db: Pin<&mut Merk>, + sender: &DB3Address, + nonce: u64, + tx: &TxId, + mutation: &DatabaseMutation, + ) -> Result<()> { + let action = DatabaseAction::from_i32(mutation.action); + match action { + Some(DatabaseAction::CreateDb) => { + Self::create_database(db, sender, nonce, tx, mutation) + } + Some(DatabaseAction::AddCollection) => Self::add_collection(db, sender, tx, mutation), + None => Ok(()), + } + } + + pub fn get_database(db: Pin<&Merk>, id: &DbId) -> Result> { + //TODO use reference + let key = DbKey(id.clone()); + let encoded_key = key.encode()?; + let value = db + .get(encoded_key.as_ref()) + .map_err(|e| DB3Error::QueryDatabaseError(format!("{e}")))?; + if let Some(v) = value { + match Database::decode(v.as_ref()) { + Ok(database) => Ok(Some(database)), + Err(e) => Err(DB3Error::QueryDatabaseError(format!("{e}"))), + } + } else { + Ok(None) + } + } + + pub fn get_databases(db: Pin<&Merk>) -> Result> { let start_key = DbKey::min(); let end_key = DbKey::max(); let range = Range { @@ -114,8 +236,12 @@ mod tests { use super::*; use db3_crypto::key_derive; use db3_crypto::signature_scheme::SignatureScheme; - use db3_proto::db3_base_proto::{Erc20Token, Price}; - use db3_proto::db3_database_proto::QueryPrice; + use db3_proto::db3_database_proto::{ + index::index_field::{Order, ValueMode}, + index::IndexField, + Index, + }; + use db3_proto::db3_mutation_proto::CollectionMutation; use std::boxed::Box; use tempdir::TempDir; @@ -126,31 +252,47 @@ mod tests { address } + fn build_database_mutation() -> DatabaseMutation { + let index_field = IndexField { + field_path: "test1".to_string(), + value_mode: Some(ValueMode::Order(Order::Ascending as i32)), + }; + + let index = Index { + name: "idx1".to_string(), + fields: vec![index_field], + }; + + let index_mutation = CollectionMutation { + index: vec![index], + collection_id: "collection1".to_string(), + }; + + let dm = DatabaseMutation { + meta: None, + collection_mutations: vec![index_mutation], + db_address: vec![], + action: DatabaseAction::CreateDb.into(), + }; + let json_data = serde_json::to_string(&dm).unwrap(); + println!("{json_data}"); + dm + } + #[test] fn db_store_smoke_test() { - let tmp_dir_path = TempDir::new("assign_partition").expect("create temp dir"); + let tmp_dir_path = TempDir::new("db_store_test").expect("create temp dir"); let addr = gen_address(); let merk = Merk::open(tmp_dir_path).unwrap(); let mut db = Box::pin(merk); - let ns = Database { - name: "test1".to_string(), - price: Some(query_price), - ts: 1000, - description: "test".to_string(), - }; - + let db_mutation = build_database_mutation(); let db_m: Pin<&mut Merk> = Pin::as_mut(&mut db); - let result = DbStore::apply_add(db_m, &addr, &ns); + let result = DbStore::apply_mutation(db_m, &addr, 1, &TxId::zero(), &db_mutation); assert!(result.is_ok()); - if let Ok(ops) = DbStore::get_databases(db.as_ref(), &addr) { + if let Ok(ops) = DbStore::get_databases(db.as_ref()) { assert_eq!(1, ops.len()); } else { assert!(false); } - let db_m: Pin<&mut Merk> = Pin::as_mut(&mut db); - let result = DbStore::apply_del(db_m, &addr, "test1"); - assert!(result.is_ok()); - let result = DbStore::get_databases(db.as_ref(), &addr); - assert!(result.is_err()); } } From 28d3f8675beab64248bbb8fab17f90868514ac9e Mon Sep 17 00:00:00 2001 From: imotai Date: Wed, 1 Feb 2023 12:25:43 +0800 Subject: [PATCH 5/9] feat: add create database in cli --- src/cmd/Cargo.toml | 9 +- src/cmd/src/command.rs | 113 ++++++++++ src/cmd/src/console.rs | 99 +++++++++ src/cmd/src/keystore.rs | 134 +++++++++++ src/cmd/src/lib.rs | 379 +------------------------------ src/cmd/src/shell.rs | 381 ++++++++++++++++++++++++++++++++ src/crypto/Cargo.toml | 3 +- src/crypto/src/db3_signature.rs | 1 - src/crypto/src/db3_signer.rs | 6 + src/crypto/src/id.rs | 22 +- src/node/src/abci_impl.rs | 39 ++-- src/node/src/command.rs | 340 ++++++++++++++++++++++++++++ src/node/src/lib.rs | 1 + src/node/src/main.rs | 338 +--------------------------- src/proto/proto/db3_base.proto | 16 -- src/sdk/Cargo.toml | 4 +- src/sdk/src/mutation_sdk.rs | 74 ++++++- src/sdk/src/store_sdk.rs | 10 +- tools/start_localnet.sh | 2 +- 19 files changed, 1206 insertions(+), 765 deletions(-) create mode 100644 src/cmd/src/command.rs create mode 100644 src/cmd/src/console.rs create mode 100644 src/cmd/src/keystore.rs create mode 100644 src/cmd/src/shell.rs create mode 100644 src/node/src/command.rs diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 0e3aad8d..4e2c6fd4 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -25,7 +25,14 @@ rand = "0.7" rand_core = "0.6" dirs = "4.0.0" bip32 = "0.4.0" - +rustyline = "9.1.2" +rustyline-derive = "0.7.0" +colored = "2.0.0" +unescape = "0.1.0" +shell-words = "1.1.0" +clap = { version = "4.0.20", features = ["derive"] } +async-trait = "0.1.64" +anyhow = "1.0.68" [dev-dependencies] db3-session={ path = "../session"} db3-crypto={ path = "../crypto"} diff --git a/src/cmd/src/command.rs b/src/cmd/src/command.rs new file mode 100644 index 00000000..441bac33 --- /dev/null +++ b/src/cmd/src/command.rs @@ -0,0 +1,113 @@ +// +// command.rs +// Copyright (C) 2023 db3.network Author imotai +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use clap::*; + +use crate::keystore::KeyStore; +use db3_proto::db3_base_proto::{BroadcastMeta, ChainId, ChainRole}; +use db3_proto::db3_mutation_proto::{DatabaseAction, DatabaseMutation}; +use db3_sdk::mutation_sdk::MutationSDK; +use prettytable::{format, Table}; +use std::time::{SystemTime, UNIX_EPOCH}; + +pub struct DB3ClientContext { + pub mutation_sdk: Option, +} + +#[derive(Debug, Parser)] +#[clap(rename_all = "kebab-case")] +pub enum DB3ClientCommand { + /// Init the client config file + #[clap(name = "init")] + Init {}, + /// Create a new key + #[clap(name = "show-key")] + ShowKey {}, + /// Create a database + #[clap(name = "new-db")] + NewDB {}, + /// Create a new collection + #[clap(name = "new-collection")] + NewCollection { + /// the address of database + #[clap(long)] + addr: String, + /// the name of collection + #[clap(long)] + name: String, + /// the json style config of index + #[clap(long)] + config: String, + }, +} + +impl DB3ClientCommand { + fn current_seconds() -> u64 { + match SystemTime::now().duration_since(UNIX_EPOCH) { + Ok(n) => n.as_secs(), + Err(_) => 0, + } + } + pub async fn execute(self, ctx: &mut DB3ClientContext) { + match self { + DB3ClientCommand::Init {} => { + if let Ok(_) = KeyStore::recover_keypair() { + println!("Init key successfully!"); + } + } + + DB3ClientCommand::ShowKey {} => { + if let Ok(ks) = KeyStore::recover_keypair() { + ks.show_key(); + } + } + + DB3ClientCommand::NewDB {} => { + let meta = BroadcastMeta { + //TODO get from network + nonce: Self::current_seconds(), + //TODO use config + chain_id: ChainId::DevNet.into(), + //TODO use config + chain_role: ChainRole::StorageShardChain.into(), + }; + let dm = DatabaseMutation { + meta: Some(meta), + collection_mutations: vec![], + db_address: vec![], + action: DatabaseAction::CreateDb.into(), + }; + if let Ok((db_id, tx_id)) = ctx + .mutation_sdk + .as_ref() + .unwrap() + .create_database(&dm) + .await + { + let mut table = Table::new(); + table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); + table.set_titles(row!["database address", "transaction id"]); + table.add_row(row![db_id.to_hex(), tx_id.to_base64()]); + table.printstd(); + } else { + println!("fail to create database"); + } + } + _ => {} + } + } +} diff --git a/src/cmd/src/console.rs b/src/cmd/src/console.rs new file mode 100644 index 00000000..38d6d06b --- /dev/null +++ b/src/cmd/src/console.rs @@ -0,0 +1,99 @@ +// +// console.rs +// Copyright (C) 2023 db3.network Author imotai +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use std::io::{stderr, Write}; +use std::ops::Deref; + +use async_trait::async_trait; +use clap::Command; +use clap::CommandFactory; +use clap::FromArgMatches; +use clap::Parser; +use colored::Colorize; + +use crate::command::{DB3ClientCommand, DB3ClientContext}; +use crate::shell::{ + install_shell_plugins, AsyncHandler, CacheKey, CommandStructure, CompletionCache, Shell, +}; +const DB3: &str = " +██████╗ ██████╗ ██████╗ +██╔══██╗██╔══██╗╚════██╗ +██║ ██║██████╔╝ █████╔╝ +██║ ██║██╔══██╗ ╚═══██╗ +██████╔╝██████╔╝██████╔╝ +╚═════╝ ╚═════╝ ╚═════╝ +@db3.network🚀🚀🚀"; + +#[derive(Parser)] +#[clap(name = "", rename_all = "kebab-case", no_binary_name = true)] +pub struct ConsoleOpts { + #[clap(subcommand)] + pub command: DB3ClientCommand, +} + +pub async fn start_console( + ctx: DB3ClientContext, + out: &mut (dyn Write + Send), + err: &mut (dyn Write + Send), +) -> Result<(), anyhow::Error> { + writeln!(out, "{DB3}"); + let app: Command = DB3ClientCommand::command(); + let mut shell = Shell::new( + "db3>-$ ", + ctx, + ClientCommandHandler, + CommandStructure::from_clap(&install_shell_plugins(app)), + ); + shell.run_async(out, err).await +} + +struct ClientCommandHandler; + +#[async_trait] +impl AsyncHandler for ClientCommandHandler { + async fn handle_async( + &self, + args: Vec, + context: &mut DB3ClientContext, + completion_cache: CompletionCache, + ) -> bool { + match handle_command(get_command(args), context, completion_cache).await { + Err(e) => { + let _err = writeln!(stderr(), "{}", e.to_string().red()); + false + } + Ok(return_value) => return_value, + } + } +} + +fn get_command(args: Vec) -> Result { + let app: Command = install_shell_plugins(ConsoleOpts::command()); + Ok(ConsoleOpts::from_arg_matches( + &app.try_get_matches_from(args)?, + )?) +} + +async fn handle_command( + opts: Result, + ctx: &mut DB3ClientContext, + completion_cache: CompletionCache, +) -> Result { + let opts = opts?; + opts.command.execute(ctx).await; + Ok(false) +} diff --git a/src/cmd/src/keystore.rs b/src/cmd/src/keystore.rs new file mode 100644 index 00000000..2e4da502 --- /dev/null +++ b/src/cmd/src/keystore.rs @@ -0,0 +1,134 @@ +// +// keystore.rs +// Copyright (C) 2023 db3.network Author imotai +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use bip32::Mnemonic; +use db3_crypto::{ + db3_address::DB3Address, + db3_keypair::{DB3KeyPair, EncodeDecodeBase64}, + id::AccountId, + key_derive, + signature_scheme::SignatureScheme, +}; +use db3_error::Result; +use dirs; +use rand_core::OsRng; +use std::fs::File; +use std::io::Write; +use std::io::{Error, ErrorKind}; +use std::str::FromStr; + +use prettytable::{format, Table}; + +pub struct KeyStore { + key_pair: DB3KeyPair, +} + +impl KeyStore { + pub fn new(key_pair: DB3KeyPair) -> Self { + Self { key_pair } + } + + // + // generate the keypair for a new user + // + // + pub fn generate_keypair() -> Result<(AccountId, DB3KeyPair, String)> { + let mnemonic = Mnemonic::random(&mut OsRng, Default::default()); + let (address, keypair) = key_derive::derive_key_pair_from_path( + mnemonic.entropy(), + None, + &SignatureScheme::Secp256k1, + )?; + Ok(( + AccountId::new(address), + keypair, + mnemonic.phrase().to_string(), + )) + } + + pub fn has_key() -> bool { + let mut home_dir = dirs::home_dir().unwrap(); + home_dir.push(".db3"); + home_dir.push(".default"); + let key_path = home_dir.as_path(); + key_path.exists() + } + + // + // recover the from local filesystem + // + pub fn recover_keypair() -> std::io::Result { + let mut home_dir = dirs::home_dir().unwrap(); + home_dir.push(".db3"); + let user_dir = home_dir.as_path(); + std::fs::create_dir_all(user_dir)?; + home_dir.push(".default"); + let key_path = home_dir.as_path(); + if key_path.exists() { + let kp_bytes = std::fs::read(key_path)?; + let b64_str = std::str::from_utf8(kp_bytes.as_ref()).unwrap(); + let key_pair = DB3KeyPair::from_str(b64_str).unwrap(); + Ok(KeyStore::new(key_pair)) + } else { + let (_, kp, _) = Self::generate_keypair().unwrap(); + let b64_str = kp.encode_base64(); + let mut f = File::create(key_path)?; + f.write_all(b64_str.as_bytes())?; + f.sync_all()?; + Ok(KeyStore::new(kp)) + } + } + + pub fn get_keypair() -> std::io::Result { + if Self::has_key() { + let mut home_dir = dirs::home_dir().unwrap(); + home_dir.push(".db3"); + home_dir.push(".default"); + let key_path = home_dir.as_path(); + let kp_bytes = std::fs::read(key_path)?; + let b64_str = std::str::from_utf8(kp_bytes.as_ref()).unwrap(); + let key_pair = DB3KeyPair::from_str(b64_str).unwrap(); + Ok(key_pair) + } else { + Err(Error::new(ErrorKind::Other, "no key was found")) + } + } + + pub fn show_key(&self) { + let mut table = Table::new(); + table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); + table.set_titles(row!["address", "scheme",]); + let pk = self.key_pair.public(); + let id = AccountId::new(DB3Address::from(&pk)); + match &self.key_pair { + DB3KeyPair::Ed25519(_) => { + table.add_row(row![id.to_hex(), "ed25519"]); + table.printstd(); + } + DB3KeyPair::Secp256k1(_) => { + table.add_row(row![id.to_hex(), "secp256k1"]); + table.printstd(); + } + } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn it_works() {} +} diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs index 8945c0bc..cb182192 100644 --- a/src/cmd/src/lib.rs +++ b/src/cmd/src/lib.rs @@ -1,6 +1,6 @@ // // lib.rs -// Copyright (C) 2022 db3.network Author imotai +// Copyright (C) 2023 db3.network Author imotai // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,378 +14,9 @@ // See the License for the specific language governing permissions and // limitations under the License. // - -use db3_base::strings; -use db3_crypto::db3_keypair::EncodeDecodeBase64; -use db3_crypto::{db3_keypair::DB3KeyPair, key_derive, signature_scheme::SignatureScheme}; -use db3_proto::db3_account_proto::Account; -use db3_proto::db3_base_proto::{ChainId, ChainRole, UnitType, Units}; -use db3_proto::db3_mutation_proto::{KvPair, Mutation, MutationAction}; -use db3_proto::db3_node_proto::OpenSessionResponse; -use db3_sdk::mutation_sdk::MutationSDK; -use db3_sdk::store_sdk::StoreSDK; -use dirs; -use std::fs::File; -use std::io::Write; -use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; #[macro_use] extern crate prettytable; -use bip32::Mnemonic; -use db3_session::session_manager::SessionStatus; -use prettytable::{format, Table}; -use rand_core::OsRng; -use std::process::exit; - -const HELP: &str = r#"the help of db3 command -help show all command -db new create a new database -account get balance of current account -blocks get latest blocks -quit quit command line console -"#; - -fn current_seconds() -> u64 { - match SystemTime::now().duration_since(UNIX_EPOCH) { - Ok(n) => n.as_secs(), - Err(_) => 0, - } -} - -pub fn generate_keypair() -> std::io::Result { - let mnemonic = Mnemonic::random(&mut OsRng, Default::default()); - println!("Your secret seed \n {}", mnemonic.phrase()); - let (_, keypair) = key_derive::derive_key_pair_from_path( - mnemonic.entropy(), - None, - &SignatureScheme::Secp256k1, - ) - .unwrap(); - Ok(keypair) -} - -pub fn get_key_pair(warning: bool) -> std::io::Result { - let mut home_dir = dirs::home_dir().unwrap(); - home_dir.push(".db3"); - let user_dir = home_dir.as_path(); - std::fs::create_dir_all(user_dir)?; - home_dir.push("user.key"); - let key_path = home_dir.as_path(); - if warning { - println!( - "WARNING, db3 will generate private key and save it to {}", - key_path.to_string_lossy() - ); - } - if key_path.exists() { - let kp_bytes = std::fs::read(key_path)?; - let b64_str = std::str::from_utf8(kp_bytes.as_ref()).unwrap(); - let key_pair = DB3KeyPair::from_str(b64_str).unwrap(); - Ok(key_pair) - } else { - let kp = generate_keypair().unwrap(); - let b64_str = kp.encode_base64(); - let mut f = File::create(key_path)?; - f.write_all(b64_str.as_bytes())?; - f.sync_all()?; - Ok(kp) - } -} - -fn show_account(account: &Account) { - let mut table = Table::new(); - table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); - table.add_row(row![ - "total bills", - "storage used", - "mutation", - "querys", - "credits" - ]); - let inner_account = account.clone(); - let bills = inner_account.total_bills; - let credits = inner_account.credits; - table.add_row(row![ - strings::units_to_readable_num_str(&bills.unwrap()), - strings::bytes_to_readable_num_str(account.total_storage_in_bytes), - account.total_mutation_count, - account.total_query_session_count, - strings::units_to_readable_num_str(&credits.unwrap()) - ]); - table.printstd(); -} - -/// open new session -async fn open_session(store_sdk: &mut StoreSDK, session: &mut Option) -> bool { - match store_sdk.open_session().await { - Ok(open_session_info) => { - *session = Some(open_session_info); - println!("Open Session Successfully!\n{:?}", session.as_ref()); - return true; - } - Err(e) => { - println!("Open Session Error: {e}"); - return false; - } - } -} - -/// close current session -async fn close_session( - store_sdk: &mut StoreSDK, - session: &mut Option, -) -> bool { - if session.is_none() { - return true; - } - match store_sdk - .close_session(&session.as_ref().unwrap().session_token) - .await - { - Ok((sess_info_node, sess_info_client, hash)) => { - println!( - "Close Session Successfully!\nNode session {:?}\nClient session: {:?}\nSubmit query session tx: {}", - sess_info_node, sess_info_client, hash - ); - // set session_id to 0 - *session = None; - return true; - } - Err(e) => { - println!("Close Session Error: {}", e); - return false; - } - } -} - -/// restart session when current session is invalid/closed/blocked -async fn refresh_session( - store_sdk: &mut StoreSDK, - session: &mut Option, -) -> bool { - if session.is_none() { - return open_session(store_sdk, session).await; - } - let (_, status) = store_sdk - .get_session_info(&session.as_ref().unwrap().session_token) - .await - .map_err(|e| { - println!("{:?}", e); - return false; - }) - .unwrap(); - if status != SessionStatus::Running { - println!("Refresh session..."); - return close_session(store_sdk, session).await && open_session(store_sdk, session).await; - } - return true; -} - -pub async fn process_cmd( - sdk: &MutationSDK, - store_sdk: &mut StoreSDK, - cmd: &str, - session: &mut Option, -) -> bool { - let parts: Vec<&str> = cmd.split(" ").collect(); - if parts.len() < 1 { - println!("{}", HELP); - return false; - } - let cmd = parts[0]; - // session info: {session_id, max_query_limit, - match cmd { - "help" => { - println!("{}", HELP); - return true; - } - "db" => {} - "gen_key" => { - get_key_pair(true).unwrap(); - return true; - } - - "quit" => { - close_session(store_sdk, session).await; - println!("Good bye!"); - exit(1); - } - "account" => { - // let kp = get_key_pair(false).unwrap(); - // let addr = get_address_from_pk(&kp.public); - // let account = store_sdk.get_account(&addr).await.unwrap(); - // show_account(&account); - return true; - } - "range" | "blocks" => { - println!("to be provided"); - return false; - } - _ => {} - } - if parts.len() < 3 { - println!("no enough command, e.g. put n1 k1 v1 k2 v2 k3 v3"); - return false; - } - - let ns = parts[1]; - let mut pairs: Vec = Vec::new(); - - match cmd { - "get" => { - if !refresh_session(store_sdk, session).await { - return false; - } - - let mut keys: Vec> = Vec::new(); - for i in 2..parts.len() { - keys.push(parts[i].as_bytes().to_vec()); - } - if let Ok(Some(values)) = store_sdk - .batch_get( - ns.as_bytes(), - keys, - &session.as_ref().unwrap().session_token, - ) - .await - .map_err(|e| { - println!("{:?}", e); - return false; - }) - { - for kv in values.values { - println!( - "{} -> {}", - std::str::from_utf8(kv.key.as_ref()).unwrap(), - std::str::from_utf8(kv.value.as_ref()).unwrap() - ); - } - return true; - } - } - "put" => { - if parts.len() < 4 { - println!("no enough command, e.g. put n1 k1 v1 k2 v2 k3 v3"); - return false; - } - for i in 1..parts.len() / 2 { - pairs.push(KvPair { - key: parts[i * 2].as_bytes().to_vec(), - value: parts[i * 2 + 1].as_bytes().to_vec(), - action: MutationAction::InsertKv.into(), - }); - } - } - "del" => { - for i in 2..parts.len() { - pairs.push(KvPair { - key: parts[i].as_bytes().to_vec(), - value: vec![], - action: MutationAction::DeleteKv.into(), - }); - } - } - _ => todo!(), - } - let mutation = Mutation { - ns: ns.as_bytes().to_vec(), - kv_pairs: pairs.to_owned(), - nonce: current_seconds(), - gas_price: Some(Units { - utype: UnitType::Tai.into(), - amount: 100, - }), - gas: 100, - chain_id: ChainId::DevNet.into(), - chain_role: ChainRole::StorageShardChain.into(), - }; - - if let Ok(_) = sdk.submit_mutation(&mutation).await { - println!("submit mutation to mempool done!"); - return true; - } else { - println!("fail to submit mutation to mempool"); - return false; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use db3_crypto::db3_signer::Db3MultiSchemeSigner; - use db3_crypto::{key_derive, signature_scheme::SignatureScheme}; - use db3_proto::db3_node_proto::storage_node_client::StorageNodeClient; - use db3_session::session_manager::DEFAULT_SESSION_QUERY_LIMIT; - use std::sync::Arc; - use std::{thread, time}; - use tonic::transport::Endpoint; - - fn get_a_static_keypair() -> DB3KeyPair { - let seed: [u8; 32] = [0; 32]; - let (_, keypair) = - key_derive::derive_key_pair_from_path(&seed, None, &SignatureScheme::Secp256k1) - .unwrap(); - keypair - } - - #[tokio::test] - async fn cmd_smoke_test() { - let ep = "http://127.0.0.1:26659"; - let rpc_endpoint = Endpoint::new(ep.to_string()).unwrap(); - let channel = rpc_endpoint.connect_lazy(); - let client = Arc::new(StorageNodeClient::new(channel)); - let mclient = client.clone(); - - let kp = get_a_static_keypair(); - let signer = Db3MultiSchemeSigner::new(kp); - let msdk = MutationSDK::new(mclient, signer); - let kp = get_a_static_keypair(); - let signer = Db3MultiSchemeSigner::new(kp); - let mut sdk = StoreSDK::new(client, signer); - let mut session: Option = None; - - // Put kv store - assert!( - process_cmd( - &msdk, - &mut sdk, - "put cmd_smoke_test k1 v1 k2 v2 k3 v3", - &mut session - ) - .await - ); - thread::sleep(time::Duration::from_millis(2000)); - - // Get kv store - assert!(process_cmd(&msdk, &mut sdk, "get cmd_smoke_test k1 k2 k3", &mut session).await); - - // Refresh session - let session_token1 = session.as_ref().unwrap().session_token.clone(); - assert!(!session_token1.is_empty()); - for _ in 0..(DEFAULT_SESSION_QUERY_LIMIT + 10) { - assert!( - process_cmd(&msdk, &mut sdk, "get cmd_smoke_test k1 k2 k3", &mut session).await - ); - } - let session_token2 = session.as_ref().unwrap().session_token.clone(); - assert_ne!(session_token2, session_token1); - - // Del kv store - assert!(process_cmd(&msdk, &mut sdk, "del cmd_smoke_test k1", &mut session).await); - thread::sleep(time::Duration::from_millis(2000)); - } - #[tokio::test] - async fn open_session_test() { - let ep = "http://127.0.0.1:26659"; - let rpc_endpoint = Endpoint::new(ep.to_string()).unwrap(); - let channel = rpc_endpoint.connect_lazy(); - let client = Arc::new(StorageNodeClient::new(channel)); - - let kp = get_a_static_keypair(); - let signer = Db3MultiSchemeSigner::new(kp); - let mut sdk = StoreSDK::new(client, signer); - let mut session: Option = None; - assert!(open_session(&mut sdk, &mut session).await); - assert!(!session.as_ref().unwrap().session_token.is_empty()); - } -} +pub mod command; +pub mod console; +pub mod keystore; +pub mod shell; diff --git a/src/cmd/src/shell.rs b/src/cmd/src/shell.rs new file mode 100644 index 00000000..df098d29 --- /dev/null +++ b/src/cmd/src/shell.rs @@ -0,0 +1,381 @@ +// +// shell.rs +// Copyright (C) 2023 db3.network Author imotai +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use anyhow::anyhow; +use std::borrow::Cow; +use std::borrow::Cow::Owned; +use std::cmp::Ordering; +use std::collections::BTreeMap; +use std::env; +use std::fmt::Display; +use std::io::Write; +use std::sync::{Arc, RwLock}; + +use async_trait::async_trait; +use clap::*; +use colored::Colorize; +use rustyline::completion::{Completer, Pair}; +use rustyline::error::ReadlineError; +use rustyline::highlight::Highlighter; +use rustyline::hint::Hinter; +use rustyline::validate::Validator; +use rustyline::{Config, Context, Editor}; +use rustyline_derive::Helper; +use unescape::unescape; + +/// A interactive command line shell with history and completion support +pub struct Shell { + prompt: P, + state: S, + handler: H, + command: CommandStructure, +} + +impl> Shell { + pub fn new(prompt: P, state: S, handler: H, mut command: CommandStructure) -> Self { + // Add help to auto complete + let help = CommandStructure { + name: "help".to_string(), + completions: command.completions.clone(), + children: vec![], + }; + command.children.push(help); + command.completions.extend(["help".to_string()]); + Self { + prompt, + state, + handler, + command, + } + } + + pub async fn run_async( + &mut self, + out: &mut (dyn Write + Send), + err: &mut (dyn Write + Send), + ) -> Result<(), anyhow::Error> { + let config = Config::builder() + .auto_add_history(true) + .history_ignore_space(true) + .history_ignore_dups(true) + .build(); + + let mut rl = Editor::with_config(config); + + let completion_cache = Arc::new(RwLock::new(BTreeMap::new())); + + rl.set_helper(Some(ShellHelper { + command: self.command.clone(), + completion_cache: completion_cache.clone(), + })); + + loop { + // Read a line + let readline = rl.readline(&self.prompt.to_string()); + let line = match readline { + Ok(rl_line) => rl_line, + Err(ReadlineError::Interrupted | ReadlineError::Eof) => break, + Err(err) => return Err(err.into()), + }; + + let line = substitute_env_variables(line); + + // Runs the line + match split_and_unescape(line.trim()) { + Ok(line) => { + if let Some(s) = line.first() { + // These are shell only commands. + match s.as_str() { + "quit" | "exit" => { + writeln!(out, "Bye!")?; + break; + } + "clear" => { + // Clear screen and move cursor to top left + write!(out, "\x1B[2J\x1B[1;1H")?; + continue; + } + "history" => { + for (pos, history) in rl.history().iter().enumerate() { + println!(" {} {}", pos + 1, history); + } + continue; + } + _ => {} + } + } else { + // do nothing if line is empty + continue; + } + + if self + .handler + .handle_async(line, &mut self.state, completion_cache.clone()) + .await + { + break; + }; + } + Err(e) => writeln!(err, "{}", e.to_string().red())?, + } + } + Ok(()) + } +} + +fn split_and_unescape(line: &str) -> Result, anyhow::Error> { + let mut commands = Vec::new(); + let split: Vec = shell_words::split(line)?; + + for word in split { + let command = + unescape(&word).ok_or_else(|| anyhow!("Error: Unhandled escape sequence {word}"))?; + commands.push(command); + } + Ok(commands) +} + +fn substitute_env_variables(s: String) -> String { + if !s.contains('$') { + return s; + } + let mut env = env::vars().collect::>(); + // Sort variable name by the length in descending order, to prevent wrong substitution by variable with partial same name. + env.sort_by(|(k1, _), (k2, _)| Ord::cmp(&k2.len(), &k1.len())); + + for (key, value) in env { + let var = format!("${key}"); + if s.contains(&var) { + let result = s.replace(var.as_str(), value.as_str()); + return if result.contains('$') { + substitute_env_variables(result) + } else { + result + }; + } + } + s +} + +pub fn install_shell_plugins(clap: Command) -> Command { + clap.subcommand( + Command::new("exit") + .alias("quit") + .about("Exit the interactive shell"), + ) + .subcommand(Command::new("clear").about("Clear screen")) + .subcommand(Command::new("history").about("Print history")) +} + +#[derive(Helper)] +struct ShellHelper { + pub command: CommandStructure, + pub completion_cache: CompletionCache, +} + +impl Hinter for ShellHelper { + type Hint = String; +} + +impl Highlighter for ShellHelper { + fn highlight_prompt<'b, 's: 'b, 'p: 'b>( + &'s self, + prompt: &'p str, + _default: bool, + ) -> Cow<'b, str> { + Owned(prompt.bold().green().to_string()) + } +} + +impl Validator for ShellHelper {} + +impl Completer for ShellHelper { + type Candidate = Pair; + fn complete( + &self, + line: &str, + _pos: usize, + _ctx: &Context<'_>, + ) -> Result<(usize, Vec), rustyline::error::ReadlineError> { + let line = format!("{line}_"); + // split line + let mut tokens = line.split_whitespace(); + let mut last_token = tokens.next_back().unwrap().to_string(); + last_token.pop(); + + let mut command = &self.command; + let mut previous_tokens = Vec::new(); + for tok in tokens { + let next_cmd = command.get_child(tok); + if let Some(next_command) = next_cmd { + command = next_command; + } + previous_tokens.push(tok.to_string()); + } + + let completions = command.completions.clone(); + let cache_key = CacheKey { + command: Some(command.name.clone()), + flag: previous_tokens.last().cloned().unwrap_or_default(), + }; + let mut completion_from_cache = self + .completion_cache + .read() + .map(|cache| cache.get(&cache_key).cloned().unwrap_or_default()) + .unwrap_or_default(); + + completion_from_cache.extend(completions); + + let candidates = completion_from_cache + .into_iter() + .filter(|string| string.starts_with(&last_token) && !previous_tokens.contains(string)) + .collect::>(); + + Ok(( + line.len() - last_token.len() - 1, + candidates + .iter() + .map(|cmd| Pair { + display: cmd.to_string(), + replacement: cmd.to_string(), + }) + .collect(), + )) + } +} + +#[derive(Clone)] +pub struct CommandStructure { + pub name: String, + pub completions: Vec, + pub children: Vec, +} + +impl CommandStructure { + /// Create CommandStructure using clap::Command, currently only support 1 level of subcommands + pub fn from_clap(app: &Command) -> Self { + let subcommands = app + .get_subcommands() + .map(|it| { + let name = it.get_name(); + CommandStructure { + name: name.to_string(), + completions: it + .get_opts() + .map(|it| match it.get_long() { + Some(long) => format!("--{}", long), + None => format!("--{}", name), + }) + .collect::>(), + children: vec![], + } + }) + .collect::>(); + + Self::from_children("", subcommands) + } + + fn from_children(name: &str, children: Vec) -> Self { + let completions = children + .iter() + .map(|child| child.name.to_string()) + .collect(); + Self { + name: name.to_string(), + completions, + children, + } + } + + fn get_child(&self, name: &str) -> Option<&CommandStructure> { + self.children + .iter() + .find(|&subcommand| subcommand.name == name) + } +} + +#[async_trait] +pub trait AsyncHandler { + async fn handle_async( + &self, + args: Vec, + state: &mut T, + completion_cache: CompletionCache, + ) -> bool; +} + +pub type CompletionCache = Arc>>>; + +#[derive(PartialEq)] +/// A special key for `CompletionCache` which will perform wildcard key matching. +/// Command field is optional and it will be treated as wildcard if `None` +pub struct CacheKey { + command: Option, + flag: String, +} +impl CacheKey { + pub fn new(command: &str, flag: &str) -> Self { + Self { + command: Some(command.to_string()), + flag: flag.to_string(), + } + } + pub fn flag(flag: &str) -> Self { + Self { + command: None, + flag: flag.to_string(), + } + } +} +impl Eq for CacheKey {} + +impl PartialOrd for CacheKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +/// This custom ordering for `CacheKey` enable wildcard matching, +/// the command field for `CacheKey` is optional and can be used as a wildcard when equal `None` +/// # Examples +/// ``` +/// use std::cmp::Ordering; +/// use std::collections::BTreeMap; +/// use sui::shell::CacheKey; +/// +/// assert_eq!(Ordering::Equal, CacheKey::flag("--flag").cmp(&CacheKey::new("any command", "--flag"))); +/// +/// let mut data = BTreeMap::new(); +/// data.insert(CacheKey::flag("--flag"), "Some Data"); +/// +/// assert_eq!(Some(&"Some Data"), data.get(&CacheKey::new("This can be anything", "--flag"))); +/// assert_eq!(Some(&"Some Data"), data.get(&CacheKey::flag("--flag"))); +/// ``` +impl Ord for CacheKey { + fn cmp(&self, other: &Self) -> Ordering { + let cmd_eq = if self.command.is_none() || other.command.is_none() { + Ordering::Equal + } else { + self.command.cmp(&other.command) + }; + + if cmd_eq != Ordering::Equal { + cmd_eq + } else { + self.flag.cmp(&other.flag) + } + } +} diff --git a/src/crypto/Cargo.toml b/src/crypto/Cargo.toml index 4758bae3..d06cbfa1 100644 --- a/src/crypto/Cargo.toml +++ b/src/crypto/Cargo.toml @@ -20,6 +20,7 @@ prost-types = "0.11" rand = "0.8.5" bytes = "1" hex = "0.4.3" +base64ct = { version = "1.5.3", features = ["alloc"] } schemars ="0.8.10" serde = { version = "1.0.144", features = ["derive"] } serde-name = "0.2.1" @@ -37,5 +38,3 @@ bip32 = "0.4.0" slip10_ed25519 = "0.1.3" byteorder = "1.4.3" rust_secp256k1 = { version = "0.24.0", package = "secp256k1", features = ["bitcoin_hashes"] } - - diff --git a/src/crypto/src/db3_signature.rs b/src/crypto/src/db3_signature.rs index 5b36a787..b37470c6 100644 --- a/src/crypto/src/db3_signature.rs +++ b/src/crypto/src/db3_signature.rs @@ -235,7 +235,6 @@ pub trait DB3Signature: Sized + signature::Signature { fn signature_bytes(&self) -> &[u8]; fn public_key_bytes(&self) -> &[u8]; fn scheme(&self) -> SignatureScheme; - fn verify(&self, value: &[u8]) -> Result; } diff --git a/src/crypto/src/db3_signer.rs b/src/crypto/src/db3_signer.rs index 819c6fda..1cd3fd2c 100644 --- a/src/crypto/src/db3_signer.rs +++ b/src/crypto/src/db3_signer.rs @@ -15,6 +15,7 @@ // limitations under the License. // +use crate::db3_address::DB3Address; use crate::db3_keypair::DB3KeyPair; use crate::db3_signature::Signature; use db3_error::{DB3Error, Result}; @@ -37,6 +38,11 @@ impl Db3MultiSchemeSigner { .map_err(|e| DB3Error::SignMessageError(format!("{e}")))?; Ok(signature) } + + pub fn get_address(&self) -> Result { + let pk = self.kp.public(); + Ok(DB3Address::from(&pk)) + } } #[cfg(test)] diff --git a/src/crypto/src/id.rs b/src/crypto/src/id.rs index 1df0985f..06f6e1ad 100644 --- a/src/crypto/src/id.rs +++ b/src/crypto/src/id.rs @@ -16,6 +16,7 @@ // use crate::db3_address::{DB3Address, DB3_ADDRESS_LENGTH}; +use base64ct::Encoding as _; use byteorder::{BigEndian, WriteBytesExt}; use db3_error::DB3Error; use fastcrypto::hash::{HashFunction, Sha3_256}; @@ -31,17 +32,28 @@ impl AccountId { pub fn new(addr: DB3Address) -> Self { Self { addr } } + #[inline] + pub fn to_hex(&self) -> String { + format!("0x{}", hex::encode(self.addr.as_ref())) + } } +pub const TX_ID_LENGTH: usize = 32; #[derive(Eq, Default, PartialEq, Ord, PartialOrd, Copy, Clone)] pub struct TxId { - data: [u8; 32], + data: [u8; TX_ID_LENGTH], } impl TxId { #[inline] pub fn zero() -> Self { - Self { data: [0; 32] } + Self { + data: [0; TX_ID_LENGTH], + } + } + + pub fn to_base64(&self) -> String { + base64ct::Base64::encode_string(self.as_ref()) } } @@ -52,6 +64,12 @@ impl From<&[u8]> for TxId { } } +impl From<[u8; TX_ID_LENGTH]> for TxId { + fn from(data: [u8; TX_ID_LENGTH]) -> Self { + Self { data } + } +} + impl AsRef<[u8]> for TxId { fn as_ref(&self) -> &[u8] { &self.data[..] diff --git a/src/node/src/abci_impl.rs b/src/node/src/abci_impl.rs index 55331a1c..2f51677f 100644 --- a/src/node/src/abci_impl.rs +++ b/src/node/src/abci_impl.rs @@ -135,19 +135,25 @@ impl Application for AbciImpl { match payload_type { Some(PayloadType::DatabasePayload) => { match DatabaseMutation::decode(request.payload.as_ref()) { - Ok(_) => { - return ResponseCheckTx { - code: 0, - data: Bytes::new(), - log: "".to_string(), - info: "".to_string(), - gas_wanted: 1, - gas_used: 0, - events: vec![], - codespace: "".to_string(), - ..Default::default() - }; - } + Ok(dm) => match &dm.meta { + Some(_) => { + return ResponseCheckTx { + code: 0, + data: Bytes::new(), + log: "".to_string(), + info: "".to_string(), + gas_wanted: 1, + gas_used: 0, + events: vec![], + codespace: "".to_string(), + ..Default::default() + }; + } + None => { + //TODO add event + warn!("no meta for database mutation"); + } + }, Err(_) => { //TODO add event ? warn!("invalid database byte data"); @@ -423,7 +429,12 @@ impl Application for AbciImpl { } let pending_databases_len = pending_databases.len(); for item in pending_databases { - match s.apply_database(&item.0, 1, &item.2, &item.1) { + let nonce: u64 = match &item.1.meta { + Some(m) => m.nonce, + //TODO will not go to here + None => 1, + }; + match s.apply_database(&item.0, nonce, &item.2, &item.1) { Ok(_) => {} Err(_) => { todo!() diff --git a/src/node/src/command.rs b/src/node/src/command.rs new file mode 100644 index 00000000..dcfc4deb --- /dev/null +++ b/src/node/src/command.rs @@ -0,0 +1,340 @@ +// +// command.rs +// Copyright (C) 2023 db3.network Author imotai +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use crate::abci_impl::{AbciImpl, NodeState}; +use crate::auth_storage::AuthStorage; +use crate::context::Context; +use crate::json_rpc_impl; +use crate::node_storage::NodeStorage; +use crate::storage_node_impl::StorageNodeImpl; +use actix_cors::Cors; +use actix_web::{rt, web, App, HttpServer}; +use clap::Parser; +use db3_cmd::command::{DB3ClientCommand, DB3ClientContext}; +use db3_crypto::db3_signer::Db3MultiSchemeSigner; +use db3_proto::db3_node_proto::storage_node_client::StorageNodeClient; +use db3_proto::db3_node_proto::storage_node_server::StorageNodeServer; +use db3_proto::db3_node_proto::OpenSessionResponse; +use db3_sdk::mutation_sdk::MutationSDK; +use db3_sdk::store_sdk::StoreSDK; +use http::Uri; +use merkdb::Merk; +use std::boxed::Box; +use std::io::{stderr, stdout}; +use std::pin::Pin; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::sync::Mutex; +use std::thread; +use std::thread::JoinHandle; +use std::time::Duration; +use tendermint_abci::ServerBuilder; +use tendermint_rpc::HttpClient; +use tonic::codegen::http::Method; +use tonic::transport::{ClientTlsConfig, Endpoint, Server}; +use tower_http::cors::{Any, CorsLayer}; +use tracing::{info, warn}; +use tracing_subscriber::filter::LevelFilter; + +const ABOUT: &str = " +██████╗ ██████╗ ██████╗ +██╔══██╗██╔══██╗╚════██╗ +██║ ██║██████╔╝ █████╔╝ +██║ ██║██╔══██╗ ╚═══██╗ +██████╔╝██████╔╝██████╔╝ +╚═════╝ ╚═════╝ ╚═════╝ +@db3.network🚀🚀🚀"; + +#[derive(Debug, Parser)] +#[clap(name = "db3")] +#[clap(about = ABOUT, long_about = None)] +pub enum DB3Command { + /// Start db3 network + #[clap(name = "start")] + Start { + /// Bind the gprc server to this . + #[clap(long, default_value = "127.0.0.1")] + public_host: String, + /// The port of grpc api + #[clap(long, default_value = "26659")] + public_grpc_port: u16, + #[clap(long, default_value = "26670")] + public_json_rpc_port: u16, + /// Bind the abci server to this port. + #[clap(long, default_value = "26658")] + abci_port: u16, + /// The porf of tendemint + #[clap(long, default_value = "26657")] + tendermint_port: u16, + /// The default server read buffer size, in bytes, for each incoming client + /// connection. + #[clap(short, long, default_value = "1048576")] + read_buf_size: usize, + /// Increase output logging verbosity to DEBUG level. + #[clap(short, long)] + verbose: bool, + /// Suppress all output logging (overrides --verbose). + #[clap(short, long)] + quiet: bool, + #[clap(short, long, default_value = "./db")] + db_path: String, + #[clap(long, default_value = "16")] + db_tree_level_in_memory: u8, + /// disable grpc-web + #[clap(long, default_value = "false")] + disable_grpc_web: bool, + }, + + /// Start db3 interactive console + #[clap(name = "console")] + Console { + /// the url of db3 grpc api + #[clap(long = "url", global = true, default_value = "http://127.0.0.1:26659")] + public_grpc_url: String, + }, + + /// Run db3 client + #[clap(name = "client")] + Client { + /// the url of db3 grpc api + #[clap(long = "url", global = true, default_value = "http://127.0.0.1:26659")] + public_grpc_url: String, + /// the subcommand + #[clap(subcommand)] + cmd: Option, + }, +} + +impl DB3Command { + fn build_context(public_grpc_url: &str) -> DB3ClientContext { + let uri = public_grpc_url.parse::().unwrap(); + let endpoint = match uri.scheme_str() == Some("https") { + true => { + let rpc_endpoint = Endpoint::new(public_grpc_url.to_string()) + .unwrap() + .tls_config(ClientTlsConfig::new()) + .unwrap(); + rpc_endpoint + } + false => { + let rpc_endpoint = Endpoint::new(public_grpc_url.to_string()).unwrap(); + rpc_endpoint + } + }; + let channel = endpoint.connect_lazy(); + let node = Arc::new(StorageNodeClient::new(channel)); + if !db3_cmd::keystore::KeyStore::has_key() { + db3_cmd::keystore::KeyStore::recover_keypair().unwrap(); + } + let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); + let signer = Db3MultiSchemeSigner::new(kp); + let sdk = MutationSDK::new(node, signer); + DB3ClientContext { + mutation_sdk: Some(sdk), + } + } + + pub async fn execute(self) { + match self { + DB3Command::Console { public_grpc_url } => { + let ctx = Self::build_context(public_grpc_url.as_ref()); + db3_cmd::console::start_console(ctx, &mut stdout(), &mut stderr()) + .await + .unwrap(); + } + DB3Command::Client { + cmd, + public_grpc_url, + } => { + let mut ctx = Self::build_context(public_grpc_url.as_ref()); + if let Some(c) = cmd { + c.execute(&mut ctx).await; + } + } + DB3Command::Start { + public_host, + public_grpc_port, + public_json_rpc_port, + abci_port, + tendermint_port, + read_buf_size, + verbose, + quiet, + db_path, + db_tree_level_in_memory, + disable_grpc_web, + } => { + let log_level = if quiet { + LevelFilter::OFF + } else if verbose { + LevelFilter::DEBUG + } else { + LevelFilter::INFO + }; + tracing_subscriber::fmt().with_max_level(log_level).init(); + info!("{ABOUT}"); + let opts = Merk::default_db_opts(); + let merk = Merk::open_opt(&db_path, opts, db_tree_level_in_memory).unwrap(); + let node_store = Arc::new(Mutex::new(Box::pin(NodeStorage::new( + AuthStorage::new(merk), + )))); + match node_store.lock() { + Ok(mut store) => { + if store.get_auth_store().init().is_err() { + warn!("Fail to init auth storage!"); + return; + } + } + _ => todo!(), + } + let (_node_state, abci_handler) = + Self::start_abci_service(abci_port, read_buf_size, node_store.clone()); + let tm_addr = format!("http://127.0.0.1:{tendermint_port}"); + info!("db3 json rpc server will connect to tendermint {tm_addr}"); + let client = HttpClient::new(tm_addr.as_str()).unwrap(); + let context = Context { + node_store: node_store.clone(), + client, + }; + let json_rpc_handler = Self::start_json_rpc_service( + &public_host, + public_json_rpc_port, + context.clone(), + ); + Self::start_grpc_service(&public_host, public_grpc_port, disable_grpc_web, context) + .await; + let running = Arc::new(AtomicBool::new(true)); + let r = running.clone(); + ctrlc::set_handler(move || { + r.store(false, Ordering::SeqCst); + }) + .expect("Error setting Ctrl-C handler"); + loop { + if running.load(Ordering::SeqCst) { + let ten_millis = Duration::from_millis(10); + thread::sleep(ten_millis); + } else { + info!("stop db3..."); + abci_handler.join().unwrap(); + json_rpc_handler.join().unwrap(); + break; + } + } + } + _ => {} + } + } + + /// Start GRPC Service + async fn start_grpc_service( + public_host: &str, + public_grpc_port: u16, + disable_grpc_web: bool, + context: Context, + ) { + let addr = format!("{public_host}:{public_grpc_port}"); + let kp = crate::node_key::get_key_pair(None).unwrap(); + let signer = Db3MultiSchemeSigner::new(kp); + let storage_node = StorageNodeImpl::new(context, signer); + info!("start db3 storage node on public addr {}", addr); + if disable_grpc_web { + Server::builder() + .add_service(StorageNodeServer::new(storage_node)) + .serve(addr.parse().unwrap()) + .await + .unwrap(); + } else { + let cors_layer = CorsLayer::new() + .allow_methods([Method::GET, Method::POST, Method::OPTIONS]) + .allow_headers(Any) + .allow_origin(Any); + Server::builder() + .accept_http1(true) + .layer(cors_layer) + .layer(tonic_web::GrpcWebLayer::new()) + .add_service(StorageNodeServer::new(storage_node)) + .serve(addr.parse().unwrap()) + .await + .unwrap(); + } + } + + /// + /// Start JSON RPC Service + /// + fn start_json_rpc_service( + public_host: &str, + public_json_rpc_port: u16, + context: Context, + ) -> JoinHandle<()> { + let local_public_host = public_host.to_string(); + let addr = format!("{local_public_host}:{public_json_rpc_port}"); + info!("start json rpc server with addr {}", addr.as_str()); + let handler = thread::spawn(move || { + rt::System::new() + .block_on(async { + HttpServer::new(move || { + let cors = Cors::default() + .allow_any_origin() + .allow_any_method() + .allow_any_header() + .max_age(3600); + App::new() + .app_data(web::Data::new(context.clone())) + .wrap(cors) + .service( + web::resource("/").route(web::post().to(json_rpc_impl::rpc_router)), + ) + }) + .disable_signals() + .bind((local_public_host, public_json_rpc_port)) + .unwrap() + .run() + .await + }) + .unwrap(); + }); + handler + } + + /// + /// Start ABCI service + /// + fn start_abci_service( + abci_port: u16, + read_buf_size: usize, + store: Arc>>>, + ) -> (Arc, JoinHandle<()>) { + let addr = format!("{}:{}", "127.0.0.1", abci_port); + let abci_impl = AbciImpl::new(store); + let node_state = abci_impl.get_node_state().clone(); + let handler = thread::spawn(move || { + let server = ServerBuilder::new(read_buf_size).bind(addr, abci_impl); + match server { + Ok(s) => { + if let Err(e) = s.listen() { + warn!("fail to listen addr for error {}", e); + } + } + Err(e) => { + warn!("fail to bind addr for error {}", e); + } + } + }); + (node_state, handler) + } +} diff --git a/src/node/src/lib.rs b/src/node/src/lib.rs index 7cb9fa8e..41bc770e 100644 --- a/src/node/src/lib.rs +++ b/src/node/src/lib.rs @@ -17,6 +17,7 @@ pub mod abci_impl; pub mod auth_storage; +pub mod command; pub mod context; mod hash_util; mod json_rpc; diff --git a/src/node/src/main.rs b/src/node/src/main.rs index 9c57c6a9..ae8c2755 100644 --- a/src/node/src/main.rs +++ b/src/node/src/main.rs @@ -17,341 +17,11 @@ // // // -use shadow_rs::shadow; -shadow!(build); -use actix_cors::Cors; -use actix_web::{rt, web, App, HttpServer}; -use clap::{Parser, Subcommand}; -use db3_crypto::db3_signer::Db3MultiSchemeSigner; -use db3_node::abci_impl::{AbciImpl, NodeState}; -use db3_node::auth_storage::AuthStorage; -use db3_node::context::Context; -use db3_node::json_rpc_impl; -use db3_node::node_storage::NodeStorage; -use db3_node::storage_node_impl::StorageNodeImpl; -use db3_proto::db3_node_proto::storage_node_client::StorageNodeClient; -use db3_proto::db3_node_proto::storage_node_server::StorageNodeServer; -use db3_proto::db3_node_proto::OpenSessionResponse; -use db3_sdk::mutation_sdk::MutationSDK; -use db3_sdk::store_sdk::StoreSDK; -use http::Uri; -use merkdb::Merk; -use std::io::stdout; -use std::io::Write; -use std::io::{self, BufRead}; -use std::pin::Pin; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::sync::Mutex; -use std::thread; -use std::thread::JoinHandle; -use std::time::Duration; -use tendermint_abci::ServerBuilder; -use tendermint_rpc::HttpClient; -use tonic::codegen::http::Method; -use tonic::transport::{ClientTlsConfig, Endpoint, Server}; -use tower_http::cors::{Any, CorsLayer}; -use tracing::{info, warn}; -use tracing_subscriber::filter::LevelFilter; -const ABOUT: &str = " -██████╗ ██████╗ ██████╗ -██╔══██╗██╔══██╗╚════██╗ -██║ ██║██████╔╝ █████╔╝ -██║ ██║██╔══██╗ ╚═══██╗ -██████╔╝██████╔╝██████╔╝ -╚═════╝ ╚═════╝ ╚═════╝ -@db3.network🚀🚀🚀"; - -#[derive(Debug, Parser)] -#[clap(name = "db3")] -#[clap(about = ABOUT, long_about = None)] -struct Cli { - #[clap(subcommand)] - command: Commands, -} - -#[derive(Debug, Subcommand)] -enum Commands { - /// Start a interactive shell - #[clap()] - Shell { - /// the url of db3 grpc api - #[clap(long, default_value = "http://127.0.0.1:26659")] - public_grpc_url: String, - }, - - /// Start DB3 node server - #[clap()] - Node { - /// Bind the gprc server to this . - #[clap(long, default_value = "127.0.0.1")] - public_host: String, - /// The port of grpc api - #[clap(long, default_value = "26659")] - public_grpc_port: u16, - #[clap(long, default_value = "26670")] - public_json_rpc_port: u16, - /// Bind the abci server to this port. - #[clap(long, default_value = "26658")] - abci_port: u16, - /// The porf of tendemint - #[clap(long, default_value = "26657")] - tm_port: u16, - /// The default server read buffer size, in bytes, for each incoming client - /// connection. - #[clap(short, long, default_value = "1048576")] - read_buf_size: usize, - /// Increase output logging verbosity to DEBUG level. - #[clap(short, long)] - verbose: bool, - /// Suppress all output logging (overrides --verbose). - #[clap(short, long)] - quiet: bool, - #[clap(short, long, default_value = "./db")] - db_path: String, - #[clap(long, default_value = "16")] - db_tree_level_in_memory: u8, - /// disable grpc-web - #[clap(long, default_value = "false")] - disable_grpc_web: bool, - }, - - /// Get the version of DB3 - #[clap()] - Version {}, -} - -/// -/// Start ABCI Service for tendermint and only local process can connect to this service -/// -fn start_abci_service( - abci_port: u16, - read_buf_size: usize, - store: Arc>>>, -) -> (Arc, JoinHandle<()>) { - let addr = format!("{}:{}", "127.0.0.1", abci_port); - let abci_impl = AbciImpl::new(store); - let node_state = abci_impl.get_node_state().clone(); - let handler = thread::spawn(move || { - let server = ServerBuilder::new(read_buf_size).bind(addr, abci_impl); - match server { - Ok(s) => { - if let Err(e) = s.listen() { - warn!("fail to listen addr for error {}", e); - } - } - Err(e) => { - warn!("fail to bind addr for error {}", e); - } - } - }); - (node_state, handler) -} - -/// Start GRPC Service -async fn start_grpc_service( - public_host: &str, - public_grpc_port: u16, - disable_grpc_web: bool, - context: Context, -) { - let addr = format!("{}:{}", public_host, public_grpc_port); - let kp = db3_node::node_key::get_key_pair(None).unwrap(); - let signer = Db3MultiSchemeSigner::new(kp); - let storage_node = StorageNodeImpl::new(context, signer); - info!("start db3 storage node on public addr {}", addr); - if disable_grpc_web { - Server::builder() - .add_service(StorageNodeServer::new(storage_node)) - .serve(addr.parse().unwrap()) - .await - .unwrap(); - } else { - let cors_layer = CorsLayer::new() - .allow_methods([Method::GET, Method::POST, Method::OPTIONS]) - .allow_headers(Any) - .allow_origin(Any); - Server::builder() - .accept_http1(true) - .layer(cors_layer) - .layer(tonic_web::GrpcWebLayer::new()) - .add_service(StorageNodeServer::new(storage_node)) - .serve(addr.parse().unwrap()) - .await - .unwrap(); - } -} - -/// -/// Start JSON RPC Service -/// -fn start_json_rpc_service( - public_host: &str, - public_json_rpc_port: u16, - context: Context, -) -> JoinHandle<()> { - let local_public_host = public_host.to_string(); - let addr = format!("{}:{}", local_public_host, public_json_rpc_port); - info!("start json rpc server with addr {}", addr.as_str()); - let handler = thread::spawn(move || { - rt::System::new() - .block_on(async { - HttpServer::new(move || { - let cors = Cors::default() - .allow_any_origin() - .allow_any_method() - .allow_any_header() - .max_age(3600); - App::new() - .app_data(web::Data::new(context.clone())) - .wrap(cors) - .service( - web::resource("/").route(web::post().to(json_rpc_impl::rpc_router)), - ) - }) - .disable_signals() - .bind((local_public_host, public_json_rpc_port)) - .unwrap() - .run() - .await - }) - .unwrap(); - }); - handler -} - -async fn start_node(cmd: Commands) { - if let Commands::Node { - public_host, - public_grpc_port, - public_json_rpc_port, - abci_port, - tm_port, - read_buf_size, - verbose, - quiet, - db_path, - db_tree_level_in_memory, - disable_grpc_web, - } = cmd - { - let log_level = if quiet { - LevelFilter::OFF - } else if verbose { - LevelFilter::DEBUG - } else { - LevelFilter::INFO - }; - tracing_subscriber::fmt().with_max_level(log_level).init(); - info!("{}", ABOUT); - let opts = Merk::default_db_opts(); - let merk = Merk::open_opt(&db_path, opts, db_tree_level_in_memory).unwrap(); - let node_store = Arc::new(Mutex::new(Box::pin(NodeStorage::new(AuthStorage::new( - merk, - ))))); - match node_store.lock() { - Ok(mut store) => { - if store.get_auth_store().init().is_err() { - warn!("Fail to init auth storage!"); - return; - } - } - _ => todo!(), - } - //TODO recover storage - let (_node_state, abci_handler) = - start_abci_service(abci_port, read_buf_size, node_store.clone()); - let tm_addr = format!("http://127.0.0.1:{}", tm_port); - info!("db3 json rpc server will connect to tendermint {}", tm_addr); - let client = HttpClient::new(tm_addr.as_str()).unwrap(); - let context = Context { - node_store: node_store.clone(), - client, - }; - let json_rpc_handler = - start_json_rpc_service(&public_host, public_json_rpc_port, context.clone()); - start_grpc_service(&public_host, public_grpc_port, disable_grpc_web, context).await; - let running = Arc::new(AtomicBool::new(true)); - let r = running.clone(); - ctrlc::set_handler(move || { - r.store(false, Ordering::SeqCst); - }) - .expect("Error setting Ctrl-C handler"); - loop { - if running.load(Ordering::SeqCst) { - let ten_millis = Duration::from_millis(10); - thread::sleep(ten_millis); - } else { - info!("stop db3..."); - abci_handler.join().unwrap(); - json_rpc_handler.join().unwrap(); - break; - } - } - } -} - -async fn start_shell(cmd: Commands) { - if let Commands::Shell { public_grpc_url } = cmd { - println!("{}", ABOUT); - // broadcast client - let uri = public_grpc_url.parse::().unwrap(); - let endpoint = match uri.scheme_str() == Some("https") { - true => { - let rpc_endpoint = Endpoint::new(public_grpc_url) - .unwrap() - .tls_config(ClientTlsConfig::new()) - .unwrap(); - rpc_endpoint - } - false => { - let rpc_endpoint = Endpoint::new(public_grpc_url).unwrap(); - rpc_endpoint - } - }; - let channel = endpoint.connect_lazy(); - let client = Arc::new(StorageNodeClient::new(channel)); - let kp = db3_cmd::get_key_pair(true).unwrap(); - let signer = Db3MultiSchemeSigner::new(kp); - let sdk = MutationSDK::new(client.clone(), signer); - let kp = db3_cmd::get_key_pair(false).unwrap(); - let signer = Db3MultiSchemeSigner::new(kp); - let mut store_sdk = StoreSDK::new(client, signer); - print!(">"); - stdout().flush().unwrap(); - let mut session: Option = None; - let stdin = io::stdin(); - for line in stdin.lock().lines() { - match line { - Err(_) => { - return; - } - Ok(s) => { - db3_cmd::process_cmd(&sdk, &mut store_sdk, s.as_str(), &mut session).await; - print!(">"); - stdout().flush().unwrap(); - } - } - } - } -} +use clap::Parser; +use db3_node::command::DB3Command; #[tokio::main] async fn main() { - let args = Cli::parse(); - match args.command { - Commands::Shell { .. } => start_shell(args.command).await, - Commands::Node { .. } => start_node(args.command).await, - Commands::Version { .. } => { - if shadow_rs::tag().len() > 0 { - println!("version:{}", shadow_rs::tag()); - } else { - println!( - "warning: a development version being used in branch {}", - shadow_rs::branch() - ); - } - println!("commit:{}", build::SHORT_COMMIT); - } - } + let command = DB3Command::parse(); + command.execute().await } diff --git a/src/proto/proto/db3_base.proto b/src/proto/proto/db3_base.proto index d3ac36dd..8364724d 100644 --- a/src/proto/proto/db3_base.proto +++ b/src/proto/proto/db3_base.proto @@ -42,22 +42,6 @@ enum ChainId { DevNet = 20; } - -message Erc20Token { - // for example USDT - string symbal = 1; - // for example cent, usdt - repeated string units = 2; - // for example 1, 10 which means 1 usdt = 10 cent - repeated uint64 scalar = 3; -} - -message Price { - uint64 amount = 1; - string unit = 2; - Erc20Token token = 3; -} - message BroadcastMeta { uint64 nonce = 1; // the chain id of db3 diff --git a/src/sdk/Cargo.toml b/src/sdk/Cargo.toml index dca21567..e5cdd867 100644 --- a/src/sdk/Cargo.toml +++ b/src/sdk/Cargo.toml @@ -20,8 +20,6 @@ tonic = { version = "0.8.3", features = ["tls-roots"]} tonic-web = "0.5.0" prost = "0.11" prost-types = "0.11" -ethereum-types = { version = "0.14.0", default-features = false } -subtle-encoding = { version = "0.5", default-features = false, features = ["bech32-preview", "base64"] } chrono = "0.4.22" enum-primitive-derive = "^0.1" num-traits = "^0.1" @@ -42,4 +40,4 @@ name = "sdk_benchmark" harness = false [[bench]] name = "submit_mutation_benchmark" -harness = false \ No newline at end of file +harness = false diff --git a/src/sdk/src/mutation_sdk.rs b/src/sdk/src/mutation_sdk.rs index f98584bd..cf3cf71d 100644 --- a/src/sdk/src/mutation_sdk.rs +++ b/src/sdk/src/mutation_sdk.rs @@ -16,13 +16,15 @@ // use bytes::BytesMut; -use db3_crypto::db3_signer::Db3MultiSchemeSigner; +use db3_crypto::{ + db3_signer::Db3MultiSchemeSigner, + id::{DbId, TxId, TX_ID_LENGTH}, +}; use db3_error::{DB3Error, Result}; -use db3_proto::db3_mutation_proto::{Mutation, PayloadType, WriteRequest}; +use db3_proto::db3_mutation_proto::{DatabaseMutation, Mutation, PayloadType, WriteRequest}; use db3_proto::db3_node_proto::{storage_node_client::StorageNodeClient, BroadcastRequest}; use prost::Message; use std::sync::Arc; -use subtle_encoding::base64; pub struct MutationSDK { signer: Db3MultiSchemeSigner, @@ -37,12 +39,62 @@ impl MutationSDK { Self { client, signer } } - pub async fn submit_mutation(&self, mutation: &Mutation) -> Result { + pub async fn create_database( + &self, + database_mutation: &DatabaseMutation, + ) -> Result<(DbId, TxId)> { + let nonce: u64 = match &database_mutation.meta { + Some(m) => Ok(m.nonce), + None => Err(DB3Error::SubmitMutationError( + "meta in mutation is none".to_string(), + )), + }?; + let mut mbuf = BytesMut::with_capacity(1024 * 4); + database_mutation + .encode(&mut mbuf) + .map_err(|e| DB3Error::SubmitMutationError(format!("{e}")))?; + let mbuf = mbuf.freeze(); + let signature = self.signer.sign(mbuf.as_ref())?; + let request = WriteRequest { + signature: signature.as_ref().to_vec().to_owned(), + payload: mbuf.as_ref().to_vec().to_owned(), + payload_type: PayloadType::DatabasePayload.into(), + }; + // + //TODO generate the address from local currently + // + let mut buf = BytesMut::with_capacity(1024 * 4); + request + .encode(&mut buf) + .map_err(|e| DB3Error::SubmitMutationError(format!("{e}")))?; + let buf = buf.freeze(); + let r = BroadcastRequest { + body: buf.as_ref().to_vec(), + }; + + let request = tonic::Request::new(r); + let mut client = self.client.as_ref().clone(); + let response = client + .broadcast(request) + .await + .map_err(|e| DB3Error::SubmitMutationError(format!("{e}")))? + .into_inner(); + let hash: [u8; TX_ID_LENGTH] = response + .hash + .try_into() + .map_err(|_| DB3Error::InvalidAddress)?; + let tx_id = TxId::from(hash); + let sender = self.signer.get_address()?; + let db_id = DbId::try_from((&sender, nonce))?; + Ok((db_id, tx_id)) + } + + pub async fn submit_mutation(&self, mutation: &Mutation) -> Result { //TODO update gas and nonce let mut mbuf = BytesMut::with_capacity(1024 * 4); mutation .encode(&mut mbuf) - .map_err(|e| DB3Error::SubmitMutationError(format!("{}", e)))?; + .map_err(|e| DB3Error::SubmitMutationError(format!("{e}")))?; let mbuf = mbuf.freeze(); let signature = self.signer.sign(mbuf.as_ref())?; let request = WriteRequest { @@ -55,7 +107,7 @@ impl MutationSDK { let mut buf = BytesMut::with_capacity(1024 * 4); request .encode(&mut buf) - .map_err(|e| DB3Error::SubmitMutationError(format!("{}", e)))?; + .map_err(|e| DB3Error::SubmitMutationError(format!("{e}")))?; let buf = buf.freeze(); let r = BroadcastRequest { body: buf.as_ref().to_vec(), @@ -65,10 +117,14 @@ impl MutationSDK { let response = client .broadcast(request) .await - .map_err(|e| DB3Error::SubmitMutationError(format!("{}", e)))? + .map_err(|e| DB3Error::SubmitMutationError(format!("{e}")))? .into_inner(); - let base64_byte = base64::encode(response.hash); - Ok(String::from_utf8_lossy(base64_byte.as_ref()).to_string()) + let hash: [u8; TX_ID_LENGTH] = response + .hash + .try_into() + .map_err(|_| DB3Error::InvalidAddress)?; + let tx_id = TxId::from(hash); + Ok(tx_id) } } diff --git a/src/sdk/src/store_sdk.rs b/src/sdk/src/store_sdk.rs index c297fe4b..fe7c3aab 100644 --- a/src/sdk/src/store_sdk.rs +++ b/src/sdk/src/store_sdk.rs @@ -32,7 +32,6 @@ use db3_session::session_manager::{SessionPool, SessionStatus}; use num_traits::cast::FromPrimitive; use prost::Message; use std::sync::Arc; -use subtle_encoding::base64; use tonic::Status; use uuid::Uuid; @@ -94,7 +93,7 @@ impl StoreSDK { pub async fn close_session( &mut self, token: &String, - ) -> std::result::Result<(QuerySessionInfo, QuerySessionInfo, String), Status> { + ) -> std::result::Result<(QuerySessionInfo, QuerySessionInfo), Status> { match self.session_pool.get_session(token) { Some(sess) => { let query_session_info = sess.get_session_info(); @@ -126,12 +125,7 @@ impl StoreSDK { Ok(response) => match self.session_pool.remove_session(token) { Ok(_) => { let response = response.into_inner(); - let base64_byte = base64::encode(response.hash); - Ok(( - response.query_session_info.unwrap(), - query_session_info, - String::from_utf8_lossy(base64_byte.as_ref()).to_string(), - )) + Ok((response.query_session_info.unwrap(), query_session_info)) } Err(e) => Err(Status::internal(format!("{}", e))), }, diff --git a/tools/start_localnet.sh b/tools/start_localnet.sh index 54198dc9..41afec6e 100644 --- a/tools/start_localnet.sh +++ b/tools/start_localnet.sh @@ -33,7 +33,7 @@ then rm -rf db fi ./tendermint init -../target/${BUILD_MODE}/db3 node >db3.log 2>&1 & +../target/${BUILD_MODE}/db3 start >db3.log 2>&1 & sleep 1 ./tendermint unsafe_reset_all && ./tendermint start sleep 1 From 26b2b8d5e91c456e20c27ef942229b2157d7ba7e Mon Sep 17 00:00:00 2001 From: imotai Date: Wed, 1 Feb 2023 18:07:09 +0800 Subject: [PATCH 6/9] feat: case passed --- .github/workflows/ci.yml | 3 - docs/{ => old}/account_model.md | 0 docs/{ => old}/background.md | 0 docs/{ => old}/bills.md | 0 docs/{ => old}/dvm.md | 0 docs/{ => old}/json_rpc.md | 0 docs/{ => old}/mutation.md | 0 docs/{ => old}/query.md | 0 src/cmd/Cargo.toml | 2 + src/cmd/src/command.rs | 157 +- src/cmd/src/console.rs | 9 +- src/cmd/src/lib.rs | 2 + src/cmd/src/shell.rs | 17 +- src/crypto/src/id.rs | 28 + src/node/src/auth_storage.rs | 19 +- src/node/src/command.rs | 10 +- src/node/src/storage_node_impl.rs | 25 +- src/node/tests/node_test.rs | 8 +- src/proto/proto/README.md | 2 - src/proto/proto/compile.sh | 27 - src/proto/proto/db3_node.proto | 5 +- src/proto/proto/firestore/bundle.proto | 121 - src/proto/proto/firestore_bundle_proto.ts | 93 - src/proto/proto/firestore_proto_api.ts | 1289 -------- src/proto/proto/google/api/annotations.proto | 31 - src/proto/proto/google/api/client.proto | 99 - .../proto/google/api/field_behavior.proto | 90 - src/proto/proto/google/api/http.proto | 375 --- .../google/firestore/admin/v1/database.proto | 129 - .../google/firestore/admin/v1/field.proto | 136 - .../firestore/admin/v1/firestore_admin.proto | 457 --- .../firestore_admin_grpc_service_config.json | 61 - .../firestore/admin/v1/firestore_gapic.yaml | 5 - .../firestore/admin/v1/firestore_v1.yaml | 75 - .../google/firestore/admin/v1/index.proto | 156 - .../google/firestore/admin/v1/location.proto | 31 - .../google/firestore/admin/v1/operation.proto | 223 -- .../admin/v1beta1/firestore_admin.proto | 370 --- .../firestore/admin/v1beta1/index.proto | 101 - .../firestore/admin/v1beta1/location.proto | 33 - .../firestore/admin/v1beta2/field.proto | 92 - .../admin/v1beta2/firestore_admin.proto | 278 -- .../firestore/admin/v1beta2/index.proto | 150 - .../firestore/admin/v1beta2/operation.proto | 202 -- .../firestore/v1/aggregation_result.proto | 42 - .../proto/google/firestore/v1/common.proto | 83 - .../proto/google/firestore/v1/document.proto | 150 - .../proto/google/firestore/v1/firestore.proto | 980 ------ .../proto/google/firestore/v1/query.proto | 355 --- .../proto/google/firestore/v1/write.proto | 264 -- src/proto/proto/google/protobuf/any.proto | 155 - .../proto/google/protobuf/descriptor.proto | 882 ----- src/proto/proto/google/protobuf/empty.proto | 52 - src/proto/proto/google/protobuf/struct.proto | 96 - .../proto/google/protobuf/timestamp.proto | 137 - .../proto/google/protobuf/wrappers.proto | 123 - src/proto/proto/google/rpc/status.proto | 47 - src/proto/proto/google/type/latlng.proto | 37 - src/proto/proto/protos.json | 2825 ----------------- src/proto/proto/update.sh | 80 - src/sdk/src/mutation_sdk.rs | 3 +- src/sdk/src/store_sdk.rs | 60 +- src/session/src/session_manager.rs | 11 + src/storage/src/db_key.rs | 2 +- src/storage/src/db_store.rs | 2 +- tools/start_localnet.sh | 2 +- 66 files changed, 285 insertions(+), 11014 deletions(-) rename docs/{ => old}/account_model.md (100%) rename docs/{ => old}/background.md (100%) rename docs/{ => old}/bills.md (100%) rename docs/{ => old}/dvm.md (100%) rename docs/{ => old}/json_rpc.md (100%) rename docs/{ => old}/mutation.md (100%) rename docs/{ => old}/query.md (100%) delete mode 100644 src/proto/proto/README.md delete mode 100755 src/proto/proto/compile.sh delete mode 100644 src/proto/proto/firestore/bundle.proto delete mode 100644 src/proto/proto/firestore_bundle_proto.ts delete mode 100644 src/proto/proto/firestore_proto_api.ts delete mode 100644 src/proto/proto/google/api/annotations.proto delete mode 100644 src/proto/proto/google/api/client.proto delete mode 100644 src/proto/proto/google/api/field_behavior.proto delete mode 100644 src/proto/proto/google/api/http.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1/database.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1/field.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1/firestore_admin.proto delete mode 100755 src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json delete mode 100644 src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml delete mode 100644 src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml delete mode 100644 src/proto/proto/google/firestore/admin/v1/index.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1/location.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1/operation.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta1/index.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta1/location.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta2/field.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta2/index.proto delete mode 100644 src/proto/proto/google/firestore/admin/v1beta2/operation.proto delete mode 100644 src/proto/proto/google/firestore/v1/aggregation_result.proto delete mode 100644 src/proto/proto/google/firestore/v1/common.proto delete mode 100644 src/proto/proto/google/firestore/v1/document.proto delete mode 100644 src/proto/proto/google/firestore/v1/firestore.proto delete mode 100644 src/proto/proto/google/firestore/v1/query.proto delete mode 100644 src/proto/proto/google/firestore/v1/write.proto delete mode 100644 src/proto/proto/google/protobuf/any.proto delete mode 100644 src/proto/proto/google/protobuf/descriptor.proto delete mode 100644 src/proto/proto/google/protobuf/empty.proto delete mode 100644 src/proto/proto/google/protobuf/struct.proto delete mode 100644 src/proto/proto/google/protobuf/timestamp.proto delete mode 100644 src/proto/proto/google/protobuf/wrappers.proto delete mode 100644 src/proto/proto/google/rpc/status.proto delete mode 100644 src/proto/proto/google/type/latlng.proto delete mode 100644 src/proto/proto/protos.json delete mode 100755 src/proto/proto/update.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 817b6149..3c0e3169 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -99,7 +99,4 @@ jobs: cargo build --release cd tools sh start_localnet.sh release >tm.log 2>&1 & - - name: Benchmark rust sdk - run: | - cargo bench --package db3-sdk diff --git a/docs/account_model.md b/docs/old/account_model.md similarity index 100% rename from docs/account_model.md rename to docs/old/account_model.md diff --git a/docs/background.md b/docs/old/background.md similarity index 100% rename from docs/background.md rename to docs/old/background.md diff --git a/docs/bills.md b/docs/old/bills.md similarity index 100% rename from docs/bills.md rename to docs/old/bills.md diff --git a/docs/dvm.md b/docs/old/dvm.md similarity index 100% rename from docs/dvm.md rename to docs/old/dvm.md diff --git a/docs/json_rpc.md b/docs/old/json_rpc.md similarity index 100% rename from docs/json_rpc.md rename to docs/old/json_rpc.md diff --git a/docs/mutation.md b/docs/old/mutation.md similarity index 100% rename from docs/mutation.md rename to docs/old/mutation.md diff --git a/docs/query.md b/docs/old/query.md similarity index 100% rename from docs/query.md rename to docs/old/query.md diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 4e2c6fd4..cbcc8eed 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -33,6 +33,8 @@ shell-words = "1.1.0" clap = { version = "4.0.20", features = ["derive"] } async-trait = "0.1.64" anyhow = "1.0.68" +serde = { version = "1.0.144", features = ["derive"] } +serde_json = "1.0.88" [dev-dependencies] db3-session={ path = "../session"} db3-crypto={ path = "../crypto"} diff --git a/src/cmd/src/command.rs b/src/cmd/src/command.rs index 441bac33..68abfa9a 100644 --- a/src/cmd/src/command.rs +++ b/src/cmd/src/command.rs @@ -18,14 +18,17 @@ use clap::*; use crate::keystore::KeyStore; +use db3_crypto::id::{AccountId, DbId, TxId}; use db3_proto::db3_base_proto::{BroadcastMeta, ChainId, ChainRole}; -use db3_proto::db3_mutation_proto::{DatabaseAction, DatabaseMutation}; -use db3_sdk::mutation_sdk::MutationSDK; +use db3_proto::db3_database_proto::{Database, Index}; +use db3_proto::db3_mutation_proto::{CollectionMutation, DatabaseAction, DatabaseMutation}; +use db3_sdk::{mutation_sdk::MutationSDK, store_sdk::StoreSDK}; use prettytable::{format, Table}; use std::time::{SystemTime, UNIX_EPOCH}; pub struct DB3ClientContext { pub mutation_sdk: Option, + pub store_sdk: Option, } #[derive(Debug, Parser)] @@ -40,6 +43,13 @@ pub enum DB3ClientCommand { /// Create a database #[clap(name = "new-db")] NewDB {}, + /// Show the database with an address + #[clap(name = "show-db")] + ShowDB { + /// the address of database + #[clap(long)] + addr: String, + }, /// Create a new collection #[clap(name = "new-collection")] NewCollection { @@ -50,8 +60,14 @@ pub enum DB3ClientCommand { #[clap(long)] name: String, /// the json style config of index + #[clap(long = "index")] + index_list: Vec, + }, + #[clap(name = "show-collection")] + ShowCollection { + /// the address of database #[clap(long)] - config: String, + addr: String, }, } @@ -62,6 +78,55 @@ impl DB3ClientCommand { Err(_) => 0, } } + + fn show_collection(database: &Database) { + let mut table = Table::new(); + table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); + table.set_titles(row!["name", "index",]); + for collection in &database.collections { + let index_str: String = collection + .index_list + .iter() + .map(|i| serde_json::to_string(&i).unwrap()) + .intersperse("\n ".to_string()) + .collect(); + table.add_row(row![collection.name, index_str]); + } + table.printstd(); + } + + fn show_database(database: &Database) { + let mut table = Table::new(); + table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); + table.set_titles(row![ + "database address", + "sender address", + "releated transactions", + "collections" + ]); + let tx_list: String = database + .tx + .iter() + .map(|tx| TxId::try_from_bytes(tx).unwrap().to_base64()) + .intersperse("\n ".to_string()) + .collect(); + let collections: String = database + .collections + .iter() + .map(|c| c.name.to_string()) + .intersperse("\n ".to_string()) + .collect(); + let address_ref: &[u8] = database.address.as_ref(); + let sender_ref: &[u8] = database.sender.as_ref(); + table.add_row(row![ + DbId::try_from(address_ref).unwrap().to_hex(), + AccountId::try_from(sender_ref).unwrap().to_hex(), + tx_list, + collections + ]); + table.printstd(); + } + pub async fn execute(self, ctx: &mut DB3ClientContext) { match self { DB3ClientCommand::Init {} => { @@ -73,6 +138,89 @@ impl DB3ClientCommand { DB3ClientCommand::ShowKey {} => { if let Ok(ks) = KeyStore::recover_keypair() { ks.show_key(); + } else { + println!("no key was found, you can use init command to create a new one"); + } + } + DB3ClientCommand::NewCollection { + addr, + name, + index_list, + } => { + //TODO validate the index + let index_vec: Vec = index_list + .iter() + .map(|i| serde_json::from_str::(i.as_str()).unwrap()) + .collect(); + let collection = CollectionMutation { + index: index_vec.to_owned(), + collection_id: name.to_string(), + }; + //TODO check database id and collection name + let db_id = DbId::try_from(addr.as_str()).unwrap(); + let meta = BroadcastMeta { + //TODO get from network + nonce: Self::current_seconds(), + //TODO use config + chain_id: ChainId::DevNet.into(), + //TODO use config + chain_role: ChainRole::StorageShardChain.into(), + }; + let dm = DatabaseMutation { + meta: Some(meta), + collection_mutations: vec![collection], + db_address: db_id.as_ref().to_vec(), + action: DatabaseAction::AddCollection.into(), + }; + if let Ok((_, tx_id)) = ctx + .mutation_sdk + .as_ref() + .unwrap() + .submit_database_mutation(&dm) + .await + { + println!("send add collection done with tx\n{}", tx_id.to_base64()); + } else { + println!("fail to add collection"); + } + } + DB3ClientCommand::ShowCollection { addr } => { + match ctx + .store_sdk + .as_mut() + .unwrap() + .get_database(addr.as_ref()) + .await + { + Ok(Some(database)) => { + Self::show_collection(&database); + } + Ok(None) => { + println!("no collection with target address"); + } + Err(e) => { + println!("fail to show collections with error {e}"); + } + } + } + + DB3ClientCommand::ShowDB { addr } => { + match ctx + .store_sdk + .as_mut() + .unwrap() + .get_database(addr.as_ref()) + .await + { + Ok(Some(database)) => { + Self::show_database(&database); + } + Ok(None) => { + println!("no database with target address"); + } + Err(e) => { + println!("fail to show database with error {e}"); + } } } @@ -95,7 +243,7 @@ impl DB3ClientCommand { .mutation_sdk .as_ref() .unwrap() - .create_database(&dm) + .submit_database_mutation(&dm) .await { let mut table = Table::new(); @@ -107,7 +255,6 @@ impl DB3ClientCommand { println!("fail to create database"); } } - _ => {} } } } diff --git a/src/cmd/src/console.rs b/src/cmd/src/console.rs index 38d6d06b..b4b8e357 100644 --- a/src/cmd/src/console.rs +++ b/src/cmd/src/console.rs @@ -16,7 +16,6 @@ // use std::io::{stderr, Write}; -use std::ops::Deref; use async_trait::async_trait; use clap::Command; @@ -26,9 +25,7 @@ use clap::Parser; use colored::Colorize; use crate::command::{DB3ClientCommand, DB3ClientContext}; -use crate::shell::{ - install_shell_plugins, AsyncHandler, CacheKey, CommandStructure, CompletionCache, Shell, -}; +use crate::shell::{install_shell_plugins, AsyncHandler, CommandStructure, CompletionCache, Shell}; const DB3: &str = " ██████╗ ██████╗ ██████╗ ██╔══██╗██╔══██╗╚════██╗ @@ -50,7 +47,7 @@ pub async fn start_console( out: &mut (dyn Write + Send), err: &mut (dyn Write + Send), ) -> Result<(), anyhow::Error> { - writeln!(out, "{DB3}"); + writeln!(out, "{DB3}").unwrap(); let app: Command = DB3ClientCommand::command(); let mut shell = Shell::new( "db3>-$ ", @@ -91,7 +88,7 @@ fn get_command(args: Vec) -> Result { async fn handle_command( opts: Result, ctx: &mut DB3ClientContext, - completion_cache: CompletionCache, + _completion_cache: CompletionCache, ) -> Result { let opts = opts?; opts.command.execute(ctx).await; diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs index cb182192..52373fa7 100644 --- a/src/cmd/src/lib.rs +++ b/src/cmd/src/lib.rs @@ -14,6 +14,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // + +#![feature(iter_intersperse)] #[macro_use] extern crate prettytable; pub mod command; diff --git a/src/cmd/src/shell.rs b/src/cmd/src/shell.rs index df098d29..9a8b873d 100644 --- a/src/cmd/src/shell.rs +++ b/src/cmd/src/shell.rs @@ -348,22 +348,7 @@ impl PartialOrd for CacheKey { Some(self.cmp(other)) } } -/// This custom ordering for `CacheKey` enable wildcard matching, -/// the command field for `CacheKey` is optional and can be used as a wildcard when equal `None` -/// # Examples -/// ``` -/// use std::cmp::Ordering; -/// use std::collections::BTreeMap; -/// use sui::shell::CacheKey; -/// -/// assert_eq!(Ordering::Equal, CacheKey::flag("--flag").cmp(&CacheKey::new("any command", "--flag"))); -/// -/// let mut data = BTreeMap::new(); -/// data.insert(CacheKey::flag("--flag"), "Some Data"); -/// -/// assert_eq!(Some(&"Some Data"), data.get(&CacheKey::new("This can be anything", "--flag"))); -/// assert_eq!(Some(&"Some Data"), data.get(&CacheKey::flag("--flag"))); -/// ``` + impl Ord for CacheKey { fn cmp(&self, other: &Self) -> Ordering { let cmd_eq = if self.command.is_none() || other.command.is_none() { diff --git a/src/crypto/src/id.rs b/src/crypto/src/id.rs index 06f6e1ad..c0acb0a1 100644 --- a/src/crypto/src/id.rs +++ b/src/crypto/src/id.rs @@ -38,6 +38,15 @@ impl AccountId { } } +impl TryFrom<&[u8]> for AccountId { + type Error = DB3Error; + fn try_from(data: &[u8]) -> std::result::Result { + Ok(Self { + addr: DB3Address::try_from(data)?, + }) + } +} + pub const TX_ID_LENGTH: usize = 32; #[derive(Eq, Default, PartialEq, Ord, PartialOrd, Copy, Clone)] pub struct TxId { @@ -55,6 +64,11 @@ impl TxId { pub fn to_base64(&self) -> String { base64ct::Base64::encode_string(self.as_ref()) } + + pub fn try_from_bytes(data: &[u8]) -> std::result::Result { + let arr: [u8; TX_ID_LENGTH] = data.try_into().map_err(|_| DB3Error::InvalidAddress)?; + Ok(Self { data: arr }) + } } impl From<&[u8]> for TxId { @@ -103,6 +117,11 @@ impl DbId { pub fn to_hex(&self) -> String { format!("0x{}", hex::encode(self.addr.as_ref())) } + + #[inline] + pub fn address(&self) -> &DB3Address { + &self.addr + } } impl AsRef<[u8]> for DbId { @@ -119,6 +138,15 @@ impl From<&[u8; DB3_ADDRESS_LENGTH]> for DbId { } } +impl TryFrom<&str> for DbId { + type Error = DB3Error; + fn try_from(addr: &str) -> std::result::Result { + Ok(Self { + addr: DB3Address::try_from(addr)?, + }) + } +} + impl TryFrom<&[u8]> for DbId { type Error = DB3Error; fn try_from(data: &[u8]) -> std::result::Result { diff --git a/src/node/src/auth_storage.rs b/src/node/src/auth_storage.rs index 62348558..bd6558b8 100644 --- a/src/node/src/auth_storage.rs +++ b/src/node/src/auth_storage.rs @@ -15,6 +15,7 @@ // limitations under the License. // +use db3_crypto::id::DbId; use db3_crypto::{db3_address::DB3Address, id::TxId}; use db3_error::Result; use db3_proto::db3_account_proto::Account; @@ -170,22 +171,8 @@ impl AuthStorage { AccountStore::get_account(self.db.as_ref(), addr) } - pub fn get_database(&self, _addr: &DB3Address) -> Result> { - let ops = DbStore::get_databases(self.db.as_ref())?; - let mut db_list: Vec = Vec::new(); - for op in ops { - match op { - ProofOp::Push(Node::KV(_, v)) => { - if let Ok(b) = Database::decode(v.as_ref()) { - db_list.push(b); - } else { - todo!(); - } - } - _ => {} - } - } - Ok(db_list) + pub fn get_database(&self, id: &DbId) -> Result> { + DbStore::get_database(self.db.as_ref(), id) } pub fn get_bills(&self, height: u64, start_id: u64, end_id: u64) -> Result> { diff --git a/src/node/src/command.rs b/src/node/src/command.rs index dcfc4deb..188bd55e 100644 --- a/src/node/src/command.rs +++ b/src/node/src/command.rs @@ -28,7 +28,6 @@ use db3_cmd::command::{DB3ClientCommand, DB3ClientContext}; use db3_crypto::db3_signer::Db3MultiSchemeSigner; use db3_proto::db3_node_proto::storage_node_client::StorageNodeClient; use db3_proto::db3_node_proto::storage_node_server::StorageNodeServer; -use db3_proto::db3_node_proto::OpenSessionResponse; use db3_sdk::mutation_sdk::MutationSDK; use db3_sdk::store_sdk::StoreSDK; use http::Uri; @@ -142,9 +141,13 @@ impl DB3Command { } let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); let signer = Db3MultiSchemeSigner::new(kp); - let sdk = MutationSDK::new(node, signer); + let mutation_sdk = MutationSDK::new(node.clone(), signer); + let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); + let signer = Db3MultiSchemeSigner::new(kp); + let store_sdk = StoreSDK::new(node, signer); DB3ClientContext { - mutation_sdk: Some(sdk), + mutation_sdk: Some(mutation_sdk), + store_sdk: Some(store_sdk), } } @@ -235,7 +238,6 @@ impl DB3Command { } } } - _ => {} } } diff --git a/src/node/src/storage_node_impl.rs b/src/node/src/storage_node_impl.rs index 736f45d5..b5573f8b 100644 --- a/src/node/src/storage_node_impl.rs +++ b/src/node/src/storage_node_impl.rs @@ -18,7 +18,7 @@ use super::context::Context; use db3_crypto::db3_address::DB3Address; use db3_crypto::db3_signer::Db3MultiSchemeSigner; -use db3_crypto::db3_verifier::DB3Verifier; +use db3_crypto::{db3_verifier::DB3Verifier, id::DbId}; use db3_proto::db3_account_proto::Account; use db3_proto::db3_base_proto::{ChainId, ChainRole}; use db3_proto::db3_mutation_proto::{PayloadType, WriteRequest}; @@ -66,6 +66,11 @@ impl StorageNode for StorageNodeImpl { let show_database_req = request.into_inner(); match self.context.node_store.lock() { Ok(mut node_store) => { + // get database id + let address_ref: &str = show_database_req.address.as_ref(); + let db_id = DbId::try_from(address_ref) + .map_err(|e| Status::internal(format!("invalid database address {e}")))?; + // validate the session id match node_store .get_session_store() .get_session_mut(&show_database_req.session_token) @@ -79,29 +84,17 @@ impl StorageNode for StorageNodeImpl { } None => return Err(Status::internal("Fail to create session")), } - let addr = node_store - .get_session_store() - .get_address(&show_database_req.session_token); - if addr.is_none() { - return Err(Status::internal(format!( - "not address found related to current token {}", - &show_database_req.session_token - ))); - } - let real_addr = addr.unwrap(); - let db_list = node_store + let db = node_store .get_auth_store() - .get_database(&real_addr) + .get_database(&db_id) .map_err(|e| Status::internal(format!("{:?}", e)))?; - node_store .get_session_store() .get_session_mut(&show_database_req.session_token) .unwrap() .increase_query(1); - Ok(Response::new(ShowDatabaseResponse { db_list })) + Ok(Response::new(ShowDatabaseResponse { db })) } - Err(e) => Err(Status::internal(format!("Fail to get lock {}", e))), } } diff --git a/src/node/tests/node_test.rs b/src/node/tests/node_test.rs index 9d907982..f48c5093 100644 --- a/src/node/tests/node_test.rs +++ b/src/node/tests/node_test.rs @@ -4,7 +4,7 @@ mod node_integration { use bytes::BytesMut; use db3_base::get_a_random_nonce; use db3_crypto::db3_signer::Db3MultiSchemeSigner; - use db3_proto::db3_base_proto::{ChainId, ChainRole, Erc20Token, Price, UnitType, Units}; + use db3_proto::db3_base_proto::{ChainId, ChainRole, UnitType, Units}; use db3_proto::db3_database_proto::Database; use db3_proto::db3_mutation_proto::{ DatabaseMutation, KvPair, Mutation, MutationAction, PayloadType, WriteRequest, @@ -25,7 +25,7 @@ mod node_integration { fn get_mutation_sdk() -> MutationSDK { let public_grpc_url = "http://127.0.0.1:26659"; // create storage node sdk - let kp = db3_cmd::get_key_pair(false).unwrap(); + let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); let signer = Db3MultiSchemeSigner::new(kp); let rpc_endpoint = Endpoint::new(public_grpc_url).unwrap(); let channel = rpc_endpoint.connect_lazy(); @@ -38,7 +38,7 @@ mod node_integration { fn get_store_sdk() -> StoreSDK { let public_grpc_url = "http://127.0.0.1:26659"; // create storage node sdk - let kp = db3_cmd::get_key_pair(false).unwrap(); + let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); let signer = Db3MultiSchemeSigner::new(kp); let rpc_endpoint = Endpoint::new(public_grpc_url).unwrap(); let channel = rpc_endpoint.connect_lazy(); @@ -99,7 +99,7 @@ mod node_integration { let nonce = get_a_random_nonce(); let json_rpc_url = "http://127.0.0.1:26670"; let client = awc::Client::default(); - let kp = db3_cmd::get_key_pair(false).unwrap(); + let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); let signer = Db3MultiSchemeSigner::new(kp); let kv = KvPair { key: format!("kkkkk_tt{}", 1).as_bytes().to_vec(), diff --git a/src/proto/proto/README.md b/src/proto/proto/README.md deleted file mode 100644 index 24919e59..00000000 --- a/src/proto/proto/README.md +++ /dev/null @@ -1,2 +0,0 @@ -These protos are copied from https://github.com/googleapis/googleapis and -https://github.com/google/protobuf. Run update.sh to update them. diff --git a/src/proto/proto/compile.sh b/src/proto/proto/compile.sh deleted file mode 100755 index 26c46d1a..00000000 --- a/src/proto/proto/compile.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -# Variables -PROTOS_DIR="." -PBJS="$(npm bin)/pbjs" - -"${PBJS}" --proto_path=. --target=json -o protos.json \ - -r firestore_v1 \ - "${PROTOS_DIR}/google/firestore/v1/*.proto" \ - "${PROTOS_DIR}/google/protobuf/*.proto" "${PROTOS_DIR}/google/type/*.proto" \ - "${PROTOS_DIR}/google/rpc/*.proto" "${PROTOS_DIR}/google/api/*.proto" diff --git a/src/proto/proto/db3_node.proto b/src/proto/proto/db3_node.proto index 92702be1..789bfd8d 100644 --- a/src/proto/proto/db3_node.proto +++ b/src/proto/proto/db3_node.proto @@ -131,11 +131,12 @@ message BroadcastResponse { message ShowDatabaseRequest { string session_token = 1; - repeated string names = 2; + // a hex string + string address = 2; } message ShowDatabaseResponse { - repeated db3_database_proto.Database db_list = 1; + db3_database_proto.Database db = 1; } service StorageNode { diff --git a/src/proto/proto/firestore/bundle.proto b/src/proto/proto/firestore/bundle.proto deleted file mode 100644 index ee7954e6..00000000 --- a/src/proto/proto/firestore/bundle.proto +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2020 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// This file defines the format of Firestore bundle file/stream. It is not a part of the -// Firestore API, only a specification used by Server and Client SDK to write and read -// bundles. - -syntax = "proto3"; - -package firestore; - -import "google/firestore/v1/document.proto"; -import "google/firestore/v1/query.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Firestore.Proto"; -option go_package = "google.golang.org/genproto/firestore/proto;firestore"; -option java_multiple_files = true; -option java_outer_classname = "BundleProto"; -option java_package = "com.google.firestore.proto"; -option objc_class_prefix = "FSTPB"; -option php_namespace = "Firestore\\Proto"; - -// Describes a query saved in the bundle. -message BundledQuery { - // The parent resource name. - string parent = 1; - - // The query to run. - oneof query_type { - // A structured query. - google.firestore.v1.StructuredQuery structured_query = 2; - } - - // If the query is a limit query, should the limit be applied to the beginning or - // the end of results. - enum LimitType { - FIRST = 0; - LAST = 1; - } - LimitType limit_type = 3; -} - -// A Query associated with a name, created as part of the bundle file, and can be read -// by client SDKs once the bundle containing them is loaded. -message NamedQuery { - // Name of the query, such that client can use the name to load this query - // from bundle, and resume from when the query results are materialized - // into this bundle. - string name = 1; - - // The query saved in the bundle. - BundledQuery bundled_query = 2; - - // The read time of the query, when it is used to build the bundle. This is useful to - // resume the query from the bundle, once it is loaded by client SDKs. - google.protobuf.Timestamp read_time = 3; -} - -// Metadata describing a Firestore document saved in the bundle. -message BundledDocumentMetadata { - // The document key of a bundled document. - string name = 1; - - // The snapshot version of the document data bundled. - google.protobuf.Timestamp read_time = 2; - - // Whether the document exists. - bool exists = 3; - - // The names of the queries in this bundle that this document matches to. - repeated string queries = 4; -} - -// Metadata describing the bundle file/stream. -message BundleMetadata { - // The ID of the bundle. - string id = 1; - - // Time at which the documents snapshot is taken for this bundle. - google.protobuf.Timestamp create_time = 2; - - // The schema version of the bundle. - uint32 version = 3; - - // The number of documents in the bundle. - uint32 total_documents = 4; - - // The size of the bundle in bytes, excluding this `BundleMetadata`. - uint64 total_bytes = 5; -} - -// A Firestore bundle is a length-prefixed stream of JSON representations of -// `BundleElement`. -// Only one `BundleMetadata` is expected, and it should be the first element. -// The named queries follow after `metadata`. If a document exists when the -// bundle is built, `document_metadata` is immediately followed by the -// `document`, otherwise `document_metadata` will present by itself. -message BundleElement { - oneof element_type { - BundleMetadata metadata = 1; - - NamedQuery named_query = 2; - - BundledDocumentMetadata document_metadata = 3; - - google.firestore.v1.Document document = 4; - } -} diff --git a/src/proto/proto/firestore_bundle_proto.ts b/src/proto/proto/firestore_bundle_proto.ts deleted file mode 100644 index d51da229..00000000 --- a/src/proto/proto/firestore_bundle_proto.ts +++ /dev/null @@ -1,93 +0,0 @@ -/** - * @license - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { StructuredQuery, Timestamp, Document } from './firestore_proto_api'; - -/** Properties of a BundledQuery. */ -export interface BundledQuery { - /** BundledQuery parent */ - parent?: string | null; - - /** BundledQuery structuredQuery */ - structuredQuery?: StructuredQuery | null; - - /** BundledQuery limitType */ - limitType?: LimitType | null; -} - -/** LimitType enum. */ -export type LimitType = 'FIRST' | 'LAST'; - -/** Properties of a NamedQuery. */ -export interface NamedQuery { - /** NamedQuery name */ - name?: string | null; - - /** NamedQuery bundledQuery */ - bundledQuery?: BundledQuery | null; - - /** NamedQuery readTime */ - readTime?: Timestamp | null; -} - -/** Properties of a BundledDocumentMetadata. */ -export interface BundledDocumentMetadata { - /** BundledDocumentMetadata name */ - name?: string | null; - - /** BundledDocumentMetadata readTime */ - readTime?: Timestamp | null; - - /** BundledDocumentMetadata exists */ - exists?: boolean | null; - - /** The names of the queries in this bundle that this document matches to. */ - queries?: string[]; -} - -/** Properties of a BundleMetadata. */ -export interface BundleMetadata { - /** BundleMetadata id */ - id?: string | null; - - /** BundleMetadata createTime */ - createTime?: Timestamp | null; - - /** BundleMetadata version */ - version?: number | null; - - /** BundleMetadata totalDocuments */ - totalDocuments?: number | null; - - /** BundleMetadata totalBytes */ - totalBytes?: number | null; -} - -/** Properties of a BundleElement. */ -export interface BundleElement { - /** BundleElement metadata */ - metadata?: BundleMetadata | null; - - /** BundleElement namedQuery */ - namedQuery?: NamedQuery | null; - - /** BundleElement documentMetadata */ - documentMetadata?: BundledDocumentMetadata | null; - - /** BundleElement document */ - document?: Document | null; -} diff --git a/src/proto/proto/firestore_proto_api.ts b/src/proto/proto/firestore_proto_api.ts deleted file mode 100644 index 46b00e0e..00000000 --- a/src/proto/proto/firestore_proto_api.ts +++ /dev/null @@ -1,1289 +0,0 @@ -/** - * @license - * Copyright 2019 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Rather than pull these in from other protos, we just alias them to any. -/* - eslint-disable - camelcase, @typescript-eslint/no-explicit-any, - @typescript-eslint/naming-convention -*/ -export declare type ApiClientHookFactory = any; -export declare type PromiseRequestService = any; -export interface ApiClientObjectMap { - [k: string]: T; -} -export declare type Timestamp = - | string - | { seconds?: string | number; nanos?: number }; - -export declare type CompositeFilterOp = 'OPERATOR_UNSPECIFIED' | 'AND' | 'OR'; -export interface ICompositeFilterOpEnum { - OPERATOR_UNSPECIFIED: CompositeFilterOp; - AND: CompositeFilterOp; - values(): CompositeFilterOp[]; -} -export declare const CompositeFilterOpEnum: ICompositeFilterOpEnum; -export declare type FieldFilterOp = - | 'OPERATOR_UNSPECIFIED' - | 'LESS_THAN' - | 'LESS_THAN_OR_EQUAL' - | 'GREATER_THAN' - | 'GREATER_THAN_OR_EQUAL' - | 'EQUAL' - | 'NOT_EQUAL' - | 'ARRAY_CONTAINS' - | 'IN' - | 'ARRAY_CONTAINS_ANY' - | 'NOT_IN'; -export interface IFieldFilterOpEnum { - OPERATOR_UNSPECIFIED: FieldFilterOp; - LESS_THAN: FieldFilterOp; - LESS_THAN_OR_EQUAL: FieldFilterOp; - GREATER_THAN: FieldFilterOp; - GREATER_THAN_OR_EQUAL: FieldFilterOp; - EQUAL: FieldFilterOp; - NOT_EQUAL: FieldFilterOp; - ARRAY_CONTAINS: FieldFilterOp; - IN: FieldFilterOp; - ARRAY_CONTAINS_ANY: FieldFilterOp; - NOT_IN: FieldFilterOp; - values(): FieldFilterOp[]; -} -export declare const FieldFilterOpEnum: IFieldFilterOpEnum; -export declare type FieldTransformSetToServerValue = - | 'SERVER_VALUE_UNSPECIFIED' - | 'REQUEST_TIME'; -export interface IFieldTransformSetToServerValueEnum { - SERVER_VALUE_UNSPECIFIED: FieldTransformSetToServerValue; - REQUEST_TIME: FieldTransformSetToServerValue; - values(): FieldTransformSetToServerValue[]; -} -export declare const FieldTransformSetToServerValueEnum: IFieldTransformSetToServerValueEnum; -export declare type IndexFieldMode = - | 'MODE_UNSPECIFIED' - | 'ASCENDING' - | 'DESCENDING'; -export interface IIndexFieldModeEnum { - MODE_UNSPECIFIED: IndexFieldMode; - ASCENDING: IndexFieldMode; - DESCENDING: IndexFieldMode; - values(): IndexFieldMode[]; -} -export declare const IndexFieldModeEnum: IIndexFieldModeEnum; -export declare type IndexState = - | 'STATE_UNSPECIFIED' - | 'CREATING' - | 'READY' - | 'ERROR'; -export interface IIndexStateEnum { - STATE_UNSPECIFIED: IndexState; - CREATING: IndexState; - READY: IndexState; - ERROR: IndexState; - values(): IndexState[]; -} -export declare const IndexStateEnum: IIndexStateEnum; -export declare type OrderDirection = - | 'DIRECTION_UNSPECIFIED' - | 'ASCENDING' - | 'DESCENDING'; -export interface IOrderDirectionEnum { - DIRECTION_UNSPECIFIED: OrderDirection; - ASCENDING: OrderDirection; - DESCENDING: OrderDirection; - values(): OrderDirection[]; -} -export declare const OrderDirectionEnum: IOrderDirectionEnum; -export declare type TargetChangeTargetChangeType = - | 'NO_CHANGE' - | 'ADD' - | 'REMOVE' - | 'CURRENT' - | 'RESET'; -export interface ITargetChangeTargetChangeTypeEnum { - NO_CHANGE: TargetChangeTargetChangeType; - ADD: TargetChangeTargetChangeType; - REMOVE: TargetChangeTargetChangeType; - CURRENT: TargetChangeTargetChangeType; - RESET: TargetChangeTargetChangeType; - values(): TargetChangeTargetChangeType[]; -} -export declare const TargetChangeTargetChangeTypeEnum: ITargetChangeTargetChangeTypeEnum; -export declare type UnaryFilterOp = - | 'OPERATOR_UNSPECIFIED' - | 'IS_NAN' - | 'IS_NULL' - | 'IS_NOT_NAN' - | 'IS_NOT_NULL'; -export interface IUnaryFilterOpEnum { - OPERATOR_UNSPECIFIED: UnaryFilterOp; - IS_NAN: UnaryFilterOp; - IS_NULL: UnaryFilterOp; - IS_NOT_NAN: UnaryFilterOp; - IS_NOT_NULL: UnaryFilterOp; - values(): UnaryFilterOp[]; -} -export declare const UnaryFilterOpEnum: IUnaryFilterOpEnum; -export declare type ValueNullValue = 'NULL_VALUE'; -export interface IValueNullValueEnum { - NULL_VALUE: ValueNullValue; - values(): ValueNullValue[]; -} -export declare const ValueNullValueEnum: IValueNullValueEnum; -export declare namespace firestoreV1ApiClientInterfaces { - interface ArrayValue { - values?: Value[]; - } - interface BatchGetDocumentsRequest { - database?: string; - documents?: string[]; - mask?: DocumentMask; - transaction?: string; - newTransaction?: TransactionOptions; - readTime?: string; - } - interface BatchGetDocumentsResponse { - found?: Document; - missing?: string; - transaction?: string; - readTime?: string; - } - interface BeginTransactionRequest { - options?: TransactionOptions; - } - interface BeginTransactionResponse { - transaction?: string; - } - interface CollectionSelector { - collectionId?: string; - allDescendants?: boolean; - } - interface CommitRequest { - database?: string; - writes?: Write[]; - transaction?: string; - } - interface CommitResponse { - writeResults?: WriteResult[]; - commitTime?: string; - } - interface CompositeFilter { - op?: CompositeFilterOp; - filters?: Filter[]; - } - interface Cursor { - values?: Value[]; - before?: boolean; - } - interface Document { - name?: string; - fields?: ApiClientObjectMap; - createTime?: Timestamp; - updateTime?: Timestamp; - } - interface DocumentChange { - document?: Document; - targetIds?: number[]; - removedTargetIds?: number[]; - } - interface DocumentDelete { - document?: string; - removedTargetIds?: number[]; - readTime?: Timestamp; - } - interface DocumentMask { - fieldPaths?: string[]; - } - interface DocumentRemove { - document?: string; - removedTargetIds?: number[]; - readTime?: string; - } - interface DocumentTransform { - document?: string; - fieldTransforms?: FieldTransform[]; - } - interface DocumentsTarget { - documents?: string[]; - } - interface Empty {} - interface ExistenceFilter { - targetId?: number; - count?: number; - } - interface FieldFilter { - field?: FieldReference; - op?: FieldFilterOp; - value?: Value; - } - interface FieldReference { - fieldPath?: string; - } - interface FieldTransform { - fieldPath?: string; - setToServerValue?: FieldTransformSetToServerValue; - appendMissingElements?: ArrayValue; - removeAllFromArray?: ArrayValue; - increment?: Value; - } - interface Filter { - compositeFilter?: CompositeFilter; - fieldFilter?: FieldFilter; - unaryFilter?: UnaryFilter; - } - interface Index { - name?: string; - collectionId?: string; - fields?: IndexField[]; - state?: IndexState; - } - interface IndexField { - fieldPath?: string; - mode?: IndexFieldMode; - } - interface LatLng { - latitude?: number; - longitude?: number; - } - interface ListCollectionIdsRequest { - pageSize?: number; - pageToken?: string; - } - interface ListCollectionIdsResponse { - collectionIds?: string[]; - nextPageToken?: string; - } - interface ListDocumentsResponse { - documents?: Document[]; - nextPageToken?: string; - } - interface ListIndexesResponse { - indexes?: Index[]; - nextPageToken?: string; - } - interface ListenRequest { - addTarget?: Target; - removeTarget?: number; - labels?: ApiClientObjectMap; - } - interface ListenResponse { - targetChange?: TargetChange; - documentChange?: DocumentChange; - documentDelete?: DocumentDelete; - documentRemove?: DocumentRemove; - filter?: ExistenceFilter; - } - interface MapValue { - fields?: ApiClientObjectMap; - } - interface Operation { - name?: string; - metadata?: ApiClientObjectMap; - done?: boolean; - error?: Status; - response?: ApiClientObjectMap; - } - interface Order { - field?: FieldReference; - direction?: OrderDirection; - } - interface Precondition { - exists?: boolean; - updateTime?: Timestamp; - } - interface Projection { - fields?: FieldReference[]; - } - interface QueryTarget { - parent?: string; - structuredQuery?: StructuredQuery; - } - interface ReadOnly { - readTime?: string; - } - interface ReadWrite { - retryTransaction?: string; - } - interface RollbackRequest { - transaction?: string; - } - interface RunQueryRequest { - parent?: string; - structuredQuery?: StructuredQuery; - transaction?: string; - newTransaction?: TransactionOptions; - readTime?: string; - } - interface RunQueryResponse { - transaction?: string; - document?: Document; - readTime?: string; - skippedResults?: number; - } - interface RunAggregationQueryRequest { - parent?: string; - structuredAggregationQuery?: StructuredAggregationQuery; - transaction?: string; - newTransaction?: TransactionOptions; - readTime?: string; - } - interface RunAggregationQueryResponse { - result?: AggregationResult; - transaction?: string; - readTime?: string; - } - interface AggregationResult { - aggregateFields?: ApiClientObjectMap; - } - interface StructuredAggregationQuery { - structuredQuery?: StructuredQuery; - aggregations?: Aggregation[]; - } - interface Aggregation { - count?: Count; - alias?: string; - } - interface Count { - upTo?: number; - } - interface Status { - code?: number; - message?: string; - details?: Array>; - } - interface StructuredQuery { - select?: Projection; - from?: CollectionSelector[]; - where?: Filter; - orderBy?: Order[]; - startAt?: Cursor; - endAt?: Cursor; - offset?: number; - limit?: number | { value: number }; - } - interface Target { - query?: QueryTarget; - documents?: DocumentsTarget; - resumeToken?: string | Uint8Array; - readTime?: Timestamp; - targetId?: number; - once?: boolean; - } - interface TargetChange { - targetChangeType?: TargetChangeTargetChangeType; - targetIds?: number[]; - cause?: Status; - resumeToken?: string | Uint8Array; - readTime?: Timestamp; - } - interface TransactionOptions { - readOnly?: ReadOnly; - readWrite?: ReadWrite; - } - interface UnaryFilter { - op?: UnaryFilterOp; - field?: FieldReference; - } - interface Value { - nullValue?: ValueNullValue; - booleanValue?: boolean; - integerValue?: string | number; - doubleValue?: string | number; - timestampValue?: Timestamp; - stringValue?: string; - bytesValue?: string | Uint8Array; - referenceValue?: string; - geoPointValue?: LatLng; - arrayValue?: ArrayValue; - mapValue?: MapValue; - } - interface Write { - update?: Document; - delete?: string; - verify?: string; - transform?: DocumentTransform; - updateMask?: DocumentMask; - updateTransforms?: FieldTransform[]; - currentDocument?: Precondition; - } - interface WriteRequest { - streamId?: string; - writes?: Write[]; - streamToken?: string | Uint8Array; - labels?: ApiClientObjectMap; - } - interface WriteResponse { - streamId?: string; - streamToken?: string | Uint8Array; - writeResults?: WriteResult[]; - commitTime?: Timestamp; - } - interface WriteResult { - updateTime?: Timestamp; - transformResults?: Value[]; - } -} -export declare type ArrayValue = firestoreV1ApiClientInterfaces.ArrayValue; -export declare type BatchGetDocumentsRequest = - firestoreV1ApiClientInterfaces.BatchGetDocumentsRequest; -export declare type BatchGetDocumentsResponse = - firestoreV1ApiClientInterfaces.BatchGetDocumentsResponse; -export declare type BeginTransactionRequest = - firestoreV1ApiClientInterfaces.BeginTransactionRequest; -export declare type BeginTransactionResponse = - firestoreV1ApiClientInterfaces.BeginTransactionResponse; -export declare type CollectionSelector = - firestoreV1ApiClientInterfaces.CollectionSelector; -export declare type CommitRequest = - firestoreV1ApiClientInterfaces.CommitRequest; -export declare type CommitResponse = - firestoreV1ApiClientInterfaces.CommitResponse; -export declare type CompositeFilter = - firestoreV1ApiClientInterfaces.CompositeFilter; -export declare type Cursor = firestoreV1ApiClientInterfaces.Cursor; -export declare type Document = firestoreV1ApiClientInterfaces.Document; -export declare type DocumentChange = - firestoreV1ApiClientInterfaces.DocumentChange; -export declare type DocumentDelete = - firestoreV1ApiClientInterfaces.DocumentDelete; -export declare type DocumentMask = firestoreV1ApiClientInterfaces.DocumentMask; -export declare type DocumentRemove = - firestoreV1ApiClientInterfaces.DocumentRemove; -export declare type DocumentTransform = - firestoreV1ApiClientInterfaces.DocumentTransform; -export declare type DocumentsTarget = - firestoreV1ApiClientInterfaces.DocumentsTarget; -export declare type Empty = firestoreV1ApiClientInterfaces.Empty; -export declare type ExistenceFilter = - firestoreV1ApiClientInterfaces.ExistenceFilter; -export declare type FieldFilter = firestoreV1ApiClientInterfaces.FieldFilter; -export declare type FieldReference = - firestoreV1ApiClientInterfaces.FieldReference; -export declare type FieldTransform = - firestoreV1ApiClientInterfaces.FieldTransform; -export declare type Filter = firestoreV1ApiClientInterfaces.Filter; -export declare type Index = firestoreV1ApiClientInterfaces.Index; -export declare type IndexField = firestoreV1ApiClientInterfaces.IndexField; -export declare type LatLng = firestoreV1ApiClientInterfaces.LatLng; -export declare type ListCollectionIdsRequest = - firestoreV1ApiClientInterfaces.ListCollectionIdsRequest; -export declare type ListCollectionIdsResponse = - firestoreV1ApiClientInterfaces.ListCollectionIdsResponse; -export declare type ListDocumentsResponse = - firestoreV1ApiClientInterfaces.ListDocumentsResponse; -export declare type ListIndexesResponse = - firestoreV1ApiClientInterfaces.ListIndexesResponse; -export declare type ListenRequest = - firestoreV1ApiClientInterfaces.ListenRequest; -export declare type ListenResponse = - firestoreV1ApiClientInterfaces.ListenResponse; -export declare type MapValue = firestoreV1ApiClientInterfaces.MapValue; -export declare type Operation = firestoreV1ApiClientInterfaces.Operation; -export declare type Order = firestoreV1ApiClientInterfaces.Order; -export declare type Precondition = firestoreV1ApiClientInterfaces.Precondition; -export declare type Projection = firestoreV1ApiClientInterfaces.Projection; -export declare type QueryTarget = firestoreV1ApiClientInterfaces.QueryTarget; -export declare type ReadOnly = firestoreV1ApiClientInterfaces.ReadOnly; -export declare type ReadWrite = firestoreV1ApiClientInterfaces.ReadWrite; -export declare type RollbackRequest = - firestoreV1ApiClientInterfaces.RollbackRequest; -export declare type RunQueryRequest = - firestoreV1ApiClientInterfaces.RunQueryRequest; -export declare type RunQueryResponse = - firestoreV1ApiClientInterfaces.RunQueryResponse; -export declare type RunAggregationQueryRequest = - firestoreV1ApiClientInterfaces.RunAggregationQueryRequest; -export declare type RunAggregationQueryResponse = - firestoreV1ApiClientInterfaces.RunAggregationQueryResponse; -export declare type Status = firestoreV1ApiClientInterfaces.Status; -export declare type StructuredQuery = - firestoreV1ApiClientInterfaces.StructuredQuery; -export declare type Target = firestoreV1ApiClientInterfaces.Target; -export declare type TargetChange = firestoreV1ApiClientInterfaces.TargetChange; -export declare type TransactionOptions = - firestoreV1ApiClientInterfaces.TransactionOptions; -export declare type UnaryFilter = firestoreV1ApiClientInterfaces.UnaryFilter; -export declare type Value = firestoreV1ApiClientInterfaces.Value; -export declare type Write = firestoreV1ApiClientInterfaces.Write; -export declare type WriteRequest = firestoreV1ApiClientInterfaces.WriteRequest; -export declare type WriteResponse = - firestoreV1ApiClientInterfaces.WriteResponse; -export declare type WriteResult = firestoreV1ApiClientInterfaces.WriteResult; -export declare type ProjectsDatabasesDocumentsApiClient$Xgafv = '1' | '2'; -export interface IProjectsDatabasesDocumentsApiClient$XgafvEnum { - 1: ProjectsDatabasesDocumentsApiClient$Xgafv; - 2: ProjectsDatabasesDocumentsApiClient$Xgafv; - values(): ProjectsDatabasesDocumentsApiClient$Xgafv[]; -} -export declare const ProjectsDatabasesDocumentsApiClient$XgafvEnum: IProjectsDatabasesDocumentsApiClient$XgafvEnum; -export declare type ProjectsDatabasesDocumentsApiClientAlt = - | 'json' - | 'media' - | 'proto'; -export interface IProjectsDatabasesDocumentsApiClientAltEnum { - JSON: ProjectsDatabasesDocumentsApiClientAlt; - MEDIA: ProjectsDatabasesDocumentsApiClientAlt; - PROTO: ProjectsDatabasesDocumentsApiClientAlt; - values(): ProjectsDatabasesDocumentsApiClientAlt[]; -} -export declare const ProjectsDatabasesDocumentsApiClientAltEnum: IProjectsDatabasesDocumentsApiClientAltEnum; -export interface ProjectsDatabasesDocumentsBatchGetNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsBeginTransactionNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsCommitNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsCreateDocumentNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; - documentId?: string; - maskFieldPaths?: string[]; -} -export interface ProjectsDatabasesDocumentsDeleteNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; - currentDocumentExists?: boolean; - currentDocumentUpdateTime?: string; -} -export interface ProjectsDatabasesDocumentsGetNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; - maskFieldPaths?: string[]; - transaction?: string; - readTime?: string; -} -export interface ProjectsDatabasesDocumentsListCollectionIdsNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsListNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; - pageSize?: number; - pageToken?: string; - orderBy?: string; - maskFieldPaths?: string[]; - transaction?: string; - readTime?: string; - showMissing?: boolean; -} -export interface ProjectsDatabasesDocumentsListenNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsPatchNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; - updateMaskFieldPaths?: string[]; - maskFieldPaths?: string[]; - currentDocumentExists?: boolean; - currentDocumentUpdateTime?: string; -} -export interface ProjectsDatabasesDocumentsRollbackNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsRunQueryNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export interface ProjectsDatabasesDocumentsWriteNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesDocumentsApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesDocumentsApiClient$Xgafv; -} -export abstract class ProjectsDatabasesDocumentsApiClient { - private constructor() {} - abstract batchGet( - database: string, - $requestBody: BatchGetDocumentsRequest, - __namedParams__?: ProjectsDatabasesDocumentsBatchGetNamedParameters & object - ): Promise; - abstract beginTransaction( - database: string, - $requestBody: BeginTransactionRequest, - __namedParams__?: ProjectsDatabasesDocumentsBeginTransactionNamedParameters & - object - ): Promise; - abstract commit( - database: string, - $requestBody: CommitRequest, - __namedParams__?: ProjectsDatabasesDocumentsCommitNamedParameters & object - ): Promise; - abstract createDocument( - parent: string, - collectionId: string, - $requestBody: Document, - __namedParams__?: ProjectsDatabasesDocumentsCreateDocumentNamedParameters & - object - ): Promise; - abstract delete( - name: string, - __namedParams__?: ProjectsDatabasesDocumentsDeleteNamedParameters & object - ): Promise; - abstract get( - name: string, - __namedParams__?: ProjectsDatabasesDocumentsGetNamedParameters & object - ): Promise; - abstract list( - parent: string, - collectionId: string, - __namedParams__?: ProjectsDatabasesDocumentsListNamedParameters & object - ): Promise; - abstract listCollectionIds( - parent: string, - $requestBody: ListCollectionIdsRequest, - __namedParams__?: ProjectsDatabasesDocumentsListCollectionIdsNamedParameters & - object - ): Promise; - abstract listen( - database: string, - $requestBody: ListenRequest, - __namedParams__?: ProjectsDatabasesDocumentsListenNamedParameters & object - ): Promise; - abstract patch( - name: string, - $requestBody: Document, - __namedParams__?: ProjectsDatabasesDocumentsPatchNamedParameters & object - ): Promise; - abstract rollback( - database: string, - $requestBody: RollbackRequest, - __namedParams__?: ProjectsDatabasesDocumentsRollbackNamedParameters & object - ): Promise; - abstract runQuery( - parent: string, - $requestBody: RunQueryRequest, - __namedParams__?: ProjectsDatabasesDocumentsRunQueryNamedParameters & object - ): Promise; - abstract write( - database: string, - $requestBody: WriteRequest, - __namedParams__?: ProjectsDatabasesDocumentsWriteNamedParameters & object - ): Promise; -} -export declare class ProjectsDatabasesDocumentsApiClientImpl - implements ProjectsDatabasesDocumentsApiClient -{ - private gapiVersion; - private $apiClient; - constructor( - gapiVersion: string, - gapiRequestService: PromiseRequestService, - apiClientHookFactory?: ApiClientHookFactory | null - ); - batchGet( - database: string, - $requestBody: BatchGetDocumentsRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsBatchGetNamedParameters & object - ): Promise; - beginTransaction( - database: string, - $requestBody: BeginTransactionRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsBeginTransactionNamedParameters & object - ): Promise; - commit( - database: string, - $requestBody: CommitRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsCommitNamedParameters & object - ): Promise; - createDocument( - parent: string, - collectionId: string, - $requestBody: Document, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - documentId, - fields, - key, - maskFieldPaths, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsCreateDocumentNamedParameters & object - ): Promise; - delete( - name: string, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - currentDocumentExists, - currentDocumentUpdateTime, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsDeleteNamedParameters & object - ): Promise; - get( - name: string, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - maskFieldPaths, - oauth_token, - pp, - prettyPrint, - quotaUser, - readTime, - transaction, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsGetNamedParameters & object - ): Promise; - list( - parent: string, - collectionId: string, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - maskFieldPaths, - oauth_token, - orderBy, - pageSize, - pageToken, - pp, - prettyPrint, - quotaUser, - readTime, - showMissing, - transaction, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsListNamedParameters & object - ): Promise; - listCollectionIds( - parent: string, - $requestBody: ListCollectionIdsRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsListCollectionIdsNamedParameters & object - ): Promise; - listen( - database: string, - $requestBody: ListenRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsListenNamedParameters & object - ): Promise; - patch( - name: string, - $requestBody: Document, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - currentDocumentExists, - currentDocumentUpdateTime, - fields, - key, - maskFieldPaths, - oauth_token, - pp, - prettyPrint, - quotaUser, - updateMaskFieldPaths, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsPatchNamedParameters & object - ): Promise; - rollback( - database: string, - $requestBody: RollbackRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsRollbackNamedParameters & object - ): Promise; - runQuery( - parent: string, - $requestBody: RunQueryRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsRunQueryNamedParameters & object - ): Promise; - write( - database: string, - $requestBody: WriteRequest, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesDocumentsWriteNamedParameters & object - ): Promise; -} -export declare type ProjectsDatabasesIndexesApiClient$Xgafv = '1' | '2'; -export interface IProjectsDatabasesIndexesApiClient$XgafvEnum { - 1: ProjectsDatabasesIndexesApiClient$Xgafv; - 2: ProjectsDatabasesIndexesApiClient$Xgafv; - values(): ProjectsDatabasesIndexesApiClient$Xgafv[]; -} -export declare const ProjectsDatabasesIndexesApiClient$XgafvEnum: IProjectsDatabasesIndexesApiClient$XgafvEnum; -export declare type ProjectsDatabasesIndexesApiClientAlt = - | 'json' - | 'media' - | 'proto'; -export interface IProjectsDatabasesIndexesApiClientAltEnum { - JSON: ProjectsDatabasesIndexesApiClientAlt; - MEDIA: ProjectsDatabasesIndexesApiClientAlt; - PROTO: ProjectsDatabasesIndexesApiClientAlt; - values(): ProjectsDatabasesIndexesApiClientAlt[]; -} -export declare const ProjectsDatabasesIndexesApiClientAltEnum: IProjectsDatabasesIndexesApiClientAltEnum; -export interface ProjectsDatabasesIndexesCreateNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesIndexesApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; -} -export interface ProjectsDatabasesIndexesDeleteNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesIndexesApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; -} -export interface ProjectsDatabasesIndexesGetNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesIndexesApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; -} -export interface ProjectsDatabasesIndexesListNamedParameters { - access_token?: string; - alt?: ProjectsDatabasesIndexesApiClientAlt; - bearer_token?: string; - callback?: string; - fields?: string; - key?: string; - oauth_token?: string; - pp?: boolean; - prettyPrint?: boolean; - quotaUser?: string; - upload_protocol?: string; - uploadType?: string; - $Xgafv?: ProjectsDatabasesIndexesApiClient$Xgafv; - filter?: string; - pageSize?: number; - pageToken?: string; -} -export abstract class ProjectsDatabasesIndexesApiClient { - private constructor() {} - abstract create( - parent: string, - $requestBody: Index, - __namedParams__?: ProjectsDatabasesIndexesCreateNamedParameters & object - ): Promise; - abstract delete( - name: string, - __namedParams__?: ProjectsDatabasesIndexesDeleteNamedParameters & object - ): Promise; - abstract get( - name: string, - __namedParams__?: ProjectsDatabasesIndexesGetNamedParameters & object - ): Promise; - abstract list( - parent: string, - __namedParams__?: ProjectsDatabasesIndexesListNamedParameters & object - ): Promise; -} -export declare class ProjectsDatabasesIndexesApiClientImpl - implements ProjectsDatabasesIndexesApiClient -{ - private gapiVersion; - private $apiClient; - constructor( - gapiVersion: string, - gapiRequestService: PromiseRequestService, - apiClientHookFactory?: ApiClientHookFactory | null - ); - create( - parent: string, - $requestBody: Index, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesIndexesCreateNamedParameters & object - ): Promise; - delete( - name: string, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesIndexesDeleteNamedParameters & object - ): Promise; - get( - name: string, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - key, - oauth_token, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesIndexesGetNamedParameters & object - ): Promise; - list( - parent: string, - { - $Xgafv, - access_token, - alt, - bearer_token, - callback, - fields, - filter, - key, - oauth_token, - pageSize, - pageToken, - pp, - prettyPrint, - quotaUser, - uploadType, - upload_protocol - }?: ProjectsDatabasesIndexesListNamedParameters & object - ): Promise; -} diff --git a/src/proto/proto/google/api/annotations.proto b/src/proto/proto/google/api/annotations.proto deleted file mode 100644 index efdab3db..00000000 --- a/src/proto/proto/google/api/annotations.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/http.proto"; -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "AnnotationsProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // See `HttpRule`. - HttpRule http = 72295728; -} diff --git a/src/proto/proto/google/api/client.proto b/src/proto/proto/google/api/client.proto deleted file mode 100644 index 3b3fd0c4..00000000 --- a/src/proto/proto/google/api/client.proto +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ClientProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // A definition of a client library method signature. - // - // In client libraries, each proto RPC corresponds to one or more methods - // which the end user is able to call, and calls the underlying RPC. - // Normally, this method receives a single argument (a struct or instance - // corresponding to the RPC request object). Defining this field will - // add one or more overloads providing flattened or simpler method signatures - // in some languages. - // - // The fields on the method signature are provided as a comma-separated - // string. - // - // For example, the proto RPC and annotation: - // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } - // - // Would add the following Java overload (in addition to the method accepting - // the request object): - // - // public final Subscription createSubscription(String name, String topic) - // - // The following backwards-compatibility guidelines apply: - // - // * Adding this annotation to an unannotated method is backwards - // compatible. - // * Adding this annotation to a method which already has existing - // method signature annotations is backwards compatible if and only if - // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is - // a breaking change. - // * Re-ordering existing method signature annotations is a breaking - // change. - repeated string method_signature = 1051; -} - -extend google.protobuf.ServiceOptions { - // The hostname for this service. - // This should be specified with no prefix or protocol. - // - // Example: - // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } - string default_host = 1049; - - // OAuth scopes needed for the client. - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } - // - // If there is more than one scope, use a comma-separated string: - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } - string oauth_scopes = 1050; -} diff --git a/src/proto/proto/google/api/field_behavior.proto b/src/proto/proto/google/api/field_behavior.proto deleted file mode 100644 index c4abe3b6..00000000 --- a/src/proto/proto/google/api/field_behavior.proto +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "FieldBehaviorProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // A designation of a specific field behavior (required, output only, etc.) - // in protobuf messages. - // - // Examples: - // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; - repeated google.api.FieldBehavior field_behavior = 1052; -} - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -enum FieldBehavior { - // Conventional default for enums. Do not use this. - FIELD_BEHAVIOR_UNSPECIFIED = 0; - - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - OPTIONAL = 1; - - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - REQUIRED = 2; - - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - OUTPUT_ONLY = 3; - - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - INPUT_ONLY = 4; - - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - IMMUTABLE = 5; - - // Denotes that a (repeated) field is an unordered list. - // This indicates that the service may provide the elements of the list - // in any arbitrary order, rather than the order the user originally - // provided. Additionally, the list's order may or may not be stable. - UNORDERED_LIST = 6; - - // Denotes that this field returns a non-empty default value if not set. - // This indicates that if the user provides the empty value in a request, - // a non-empty value will be returned. The user will not be aware of what - // non-empty value to expect. - NON_EMPTY_DEFAULT = 7; -} diff --git a/src/proto/proto/google/api/http.proto b/src/proto/proto/google/api/http.proto deleted file mode 100644 index 113fa936..00000000 --- a/src/proto/proto/google/api/http.proto +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "HttpProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -message Http { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated HttpRule rules = 1; - - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - bool fully_decode_reserved_expansion = 2; -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -message HttpRule { - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - oneof pattern { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - string get = 2; - - // Maps to HTTP PUT. Used for replacing a resource. - string put = 3; - - // Maps to HTTP POST. Used for creating a resource or performing an action. - string post = 4; - - // Maps to HTTP DELETE. Used for deleting a resource. - string delete = 5; - - // Maps to HTTP PATCH. Used for updating a resource. - string patch = 6; - - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - CustomHttpPattern custom = 8; - } - - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - string body = 7; - - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - string response_body = 12; - - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - repeated HttpRule additional_bindings = 11; -} - -// A custom pattern is used for defining custom HTTP verb. -message CustomHttpPattern { - // The name of this custom HTTP verb. - string kind = 1; - - // The path matched by this custom verb. - string path = 2; -} diff --git a/src/proto/proto/google/firestore/admin/v1/database.proto b/src/proto/proto/google/firestore/admin/v1/database.proto deleted file mode 100644 index 3f242c3e..00000000 --- a/src/proto/proto/google/firestore/admin/v1/database.proto +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "DatabaseProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option ruby_package = "Google::Cloud::Firestore::Admin::V1"; - -// A Cloud Firestore Database. -// Currently only one database is allowed per cloud project; this database -// must have a `database_id` of '(default)'. -message Database { - option (google.api.resource) = { - type: "firestore.googleapis.com/Database" - pattern: "projects/{project}/databases/{database}" - style: DECLARATIVE_FRIENDLY - }; - - // The type of the database. - // See https://cloud.google.com/datastore/docs/firestore-or-datastore for - // information about how to choose. - // - // Mode changes are only allowed if the database is empty. - enum DatabaseType { - // The default value. This value is used if the database type is omitted. - DATABASE_TYPE_UNSPECIFIED = 0; - - // Firestore Native Mode - FIRESTORE_NATIVE = 1; - - // Firestore in Datastore Mode. - DATASTORE_MODE = 2; - } - - // The type of concurrency control mode for transactions. - enum ConcurrencyMode { - // Not used. - CONCURRENCY_MODE_UNSPECIFIED = 0; - - // Use optimistic concurrency control by default. This mode is available - // for Cloud Firestore databases. - OPTIMISTIC = 1; - - // Use pessimistic concurrency control by default. This mode is available - // for Cloud Firestore databases. - // - // This is the default setting for Cloud Firestore. - PESSIMISTIC = 2; - - // Use optimistic concurrency control with entity groups by default. - // - // This is the only available mode for Cloud Datastore. - // - // This mode is also available for Cloud Firestore with Datastore Mode but - // is not recommended. - OPTIMISTIC_WITH_ENTITY_GROUPS = 3; - } - - // The type of App Engine integration mode. - enum AppEngineIntegrationMode { - // Not used. - APP_ENGINE_INTEGRATION_MODE_UNSPECIFIED = 0; - - // If an App Engine application exists in the same region as this database, - // App Engine configuration will impact this database. This includes - // disabling of the application & database, as well as disabling writes to - // the database. - ENABLED = 1; - - // Appengine has no affect on the ability of this database to serve - // requests. - DISABLED = 2; - } - - // The resource name of the Database. - // Format: `projects/{project}/databases/{database}` - string name = 1; - - // The location of the database. Available databases are listed at - // https://cloud.google.com/firestore/docs/locations. - string location_id = 9; - - // The type of the database. - // See https://cloud.google.com/datastore/docs/firestore-or-datastore for - // information about how to choose. - DatabaseType type = 10; - - // The concurrency control mode to use for this database. - ConcurrencyMode concurrency_mode = 15; - - // The App Engine integration mode to use for this database. - AppEngineIntegrationMode app_engine_integration_mode = 19; - - // Output only. The key_prefix for this database. This key_prefix is used, in combination - // with the project id ("~") to construct the - // application id that is returned from the Cloud Datastore APIs in Google App - // Engine first generation runtimes. - // - // This value may be empty in which case the appid to use for URL-encoded keys - // is the project_id (eg: foo instead of v~foo). - string key_prefix = 20 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // This checksum is computed by the server based on the value of other - // fields, and may be sent on update and delete requests to ensure the - // client has an up-to-date value before proceeding. - string etag = 99; -} diff --git a/src/proto/proto/google/firestore/admin/v1/field.proto b/src/proto/proto/google/firestore/admin/v1/field.proto deleted file mode 100644 index 0bbb11d8..00000000 --- a/src/proto/proto/google/firestore/admin/v1/field.proto +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/firestore/admin/v1/index.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "FieldProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option ruby_package = "Google::Cloud::Firestore::Admin::V1"; - -// Represents a single field in the database. -// -// Fields are grouped by their "Collection Group", which represent all -// collections in the database with the same id. -message Field { - option (google.api.resource) = { - type: "firestore.googleapis.com/Field" - pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}" - }; - - // The index configuration for this field. - message IndexConfig { - // The indexes supported for this field. - repeated Index indexes = 1; - - // Output only. When true, the `Field`'s index configuration is set from the - // configuration specified by the `ancestor_field`. - // When false, the `Field`'s index configuration is defined explicitly. - bool uses_ancestor_config = 2; - - // Output only. Specifies the resource name of the `Field` from which this field's - // index configuration is set (when `uses_ancestor_config` is true), - // or from which it *would* be set if this field had no index configuration - // (when `uses_ancestor_config` is false). - string ancestor_field = 3; - - // Output only - // When true, the `Field`'s index configuration is in the process of being - // reverted. Once complete, the index config will transition to the same - // state as the field specified by `ancestor_field`, at which point - // `uses_ancestor_config` will be `true` and `reverting` will be `false`. - bool reverting = 4; - } - - // The TTL (time-to-live) configuration for documents that have this `Field` - // set. - // Storing a timestamp value into a TTL-enabled field will be treated as - // the document's absolute expiration time. Using any other data type or - // leaving the field absent will disable the TTL for the individual document. - message TtlConfig { - // The state of applying the TTL configuration to all documents. - enum State { - // The state is unspecified or unknown. - STATE_UNSPECIFIED = 0; - - // The TTL is being applied. There is an active long-running operation to - // track the change. Newly written documents will have TTLs applied as - // requested. Requested TTLs on existing documents are still being - // processed. When TTLs on all existing documents have been processed, the - // state will move to 'ACTIVE'. - CREATING = 1; - - // The TTL is active for all documents. - ACTIVE = 2; - - // The TTL configuration could not be enabled for all existing documents. - // Newly written documents will continue to have their TTL applied. - // The LRO returned when last attempting to enable TTL for this `Field` - // has failed, and may have more details. - NEEDS_REPAIR = 3; - } - - // Output only. The state of the TTL configuration. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - } - - // Required. A field name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - // - // A field path may be a simple field name, e.g. `address` or a path to fields - // within map_value , e.g. `address.city`, - // or a special field path. The only valid special field is `*`, which - // represents any field. - // - // Field paths may be quoted using ` (backtick). The only character that needs - // to be escaped within a quoted field path is the backtick character itself, - // escaped using a backslash. Special characters in field paths that - // must be quoted include: `*`, `.`, - // ``` (backtick), `[`, `]`, as well as any ascii symbolic characters. - // - // Examples: - // (Note: Comments here are written in markdown syntax, so there is an - // additional layer of backticks to represent a code block) - // `\`address.city\`` represents a field named `address.city`, not the map key - // `city` in the field `address`. - // `\`*\`` represents a field named `*`, not any field. - // - // A special `Field` contains the default indexing settings for all fields. - // This field's resource name is: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*` - // Indexes defined on this `Field` will be applied to all fields which do not - // have their own `Field` index configuration. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // The index configuration for this field. If unset, field indexing will - // revert to the configuration defined by the `ancestor_field`. To - // explicitly remove all indexes for this field, specify an index config - // with an empty list of indexes. - IndexConfig index_config = 2; - - // The TTL configuration for this `Field`. - // Setting or unsetting this will enable or disable the TTL for - // documents that have this `Field`. - TtlConfig ttl_config = 3; -} diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_admin.proto b/src/proto/proto/google/firestore/admin/v1/firestore_admin.proto deleted file mode 100644 index c493673a..00000000 --- a/src/proto/proto/google/firestore/admin/v1/firestore_admin.proto +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/firestore/admin/v1/database.proto"; -import "google/firestore/admin/v1/field.proto"; -import "google/firestore/admin/v1/index.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreAdminProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option ruby_package = "Google::Cloud::Firestore::Admin::V1"; -option (google.api.resource_definition) = { - type: "firestore.googleapis.com/Location" - pattern: "projects/{project}/locations/{location}" -}; -option (google.api.resource_definition) = { - type: "firestore.googleapis.com/CollectionGroup" - pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}" -}; - -// The Cloud Firestore Admin API. -// -// This API provides several administrative services for Cloud Firestore. -// -// Project, Database, Namespace, Collection, Collection Group, and Document are -// used as defined in the Google Cloud Firestore API. -// -// Operation: An Operation represents work being performed in the background. -// -// The index service manages Cloud Firestore indexes. -// -// Index creation is performed asynchronously. -// An Operation resource is created for each such asynchronous operation. -// The state of the operation (including any errors encountered) -// may be queried via the Operation resource. -// -// The Operations collection provides a record of actions performed for the -// specified Project (including any Operations in progress). Operations are not -// created directly but through calls on other collections or resources. -// -// An Operation that is done may be deleted so that it is no longer listed as -// part of the Operation collection. Operations are garbage collected after -// 30 days. By default, ListOperations will only return in progress and failed -// operations. To list completed operation, issue a ListOperations request with -// the filter `done: true`. -// -// Operations are created by service `FirestoreAdmin`, but are accessed via -// service `google.longrunning.Operations`. -service FirestoreAdmin { - option (google.api.default_host) = "firestore.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/datastore"; - - // Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation] - // which may be used to track the status of the creation. The metadata for - // the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata]. - rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes" - body: "index" - }; - option (google.api.method_signature) = "parent,index"; - option (google.longrunning.operation_info) = { - response_type: "Index" - metadata_type: "IndexOperationMetadata" - }; - } - - // Lists composite indexes. - rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes" - }; - option (google.api.method_signature) = "parent"; - } - - // Gets a composite index. - rpc GetIndex(GetIndexRequest) returns (Index) { - option (google.api.http) = { - get: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Deletes a composite index. - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Gets the metadata and configuration for a Field. - rpc GetField(GetFieldRequest) returns (Field) { - option (google.api.http) = { - get: "/v1/{name=projects/*/databases/*/collectionGroups/*/fields/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Updates a field configuration. Currently, field updates apply only to - // single field index configuration. However, calls to - // [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField] should provide a field mask to avoid - // changing any configuration that the caller isn't aware of. The field mask - // should be specified as: `{ paths: "index_config" }`. - // - // This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to - // track the status of the field update. The metadata for - // the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata]. - // - // To configure the default field settings for the database, use - // the special `Field` with resource name: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. - rpc UpdateField(UpdateFieldRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}" - body: "field" - }; - option (google.api.method_signature) = "field"; - option (google.longrunning.operation_info) = { - response_type: "Field" - metadata_type: "FieldOperationMetadata" - }; - } - - // Lists the field configuration and metadata for this database. - // - // Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields - // that have been explicitly overridden. To issue this query, call - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with the filter set to - // `indexConfig.usesAncestorConfig:false` . - rpc ListFields(ListFieldsRequest) returns (ListFieldsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/fields" - }; - option (google.api.method_signature) = "parent"; - } - - // Exports a copy of all or a subset of documents from Google Cloud Firestore - // to another storage system, such as Google Cloud Storage. Recent updates to - // documents may not be reflected in the export. The export occurs in the - // background and its progress can be monitored and managed via the - // Operation resource that is created. The output of an export may only be - // used once the associated operation is done. If an export operation is - // cancelled before completion it may leave partial data behind in Google - // Cloud Storage. - // - // For more details on export behavior and output format, refer to: - // https://cloud.google.com/firestore/docs/manage-data/export-import - rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{name=projects/*/databases/*}:exportDocuments" - body: "*" - }; - option (google.api.method_signature) = "name"; - option (google.longrunning.operation_info) = { - response_type: "ExportDocumentsResponse" - metadata_type: "ExportDocumentsMetadata" - }; - } - - // Imports documents into Google Cloud Firestore. Existing documents with the - // same name are overwritten. The import occurs in the background and its - // progress can be monitored and managed via the Operation resource that is - // created. If an ImportDocuments operation is cancelled, it is possible - // that a subset of the data has already been imported to Cloud Firestore. - rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{name=projects/*/databases/*}:importDocuments" - body: "*" - }; - option (google.api.method_signature) = "name"; - option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "ImportDocumentsMetadata" - }; - } - - // Gets information about a database. - rpc GetDatabase(GetDatabaseRequest) returns (Database) { - option (google.api.http) = { - get: "/v1/{name=projects/*/databases/*}" - }; - option (google.api.method_signature) = "name"; - } - - // List all the databases in the project. - rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/databases" - }; - option (google.api.method_signature) = "parent"; - } - - // Updates a database. - rpc UpdateDatabase(UpdateDatabaseRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{database.name=projects/*/databases/*}" - body: "database" - }; - option (google.api.method_signature) = "database,update_mask"; - option (google.longrunning.operation_info) = { - response_type: "Database" - metadata_type: "UpdateDatabaseMetadata" - }; - } -} - -// A request to list the Firestore Databases in all locations for a project. -message ListDatabasesRequest { - // Required. A parent name of the form - // `projects/{project_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "firestore.googleapis.com/Database" - } - ]; -} - -// The list of databases for a project. -message ListDatabasesResponse { - // The databases in the project. - repeated Database databases = 1; -} - -// The request for [FirestoreAdmin.GetDatabase][google.firestore.admin.v1.FirestoreAdmin.GetDatabase]. -message GetDatabaseRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Database" - } - ]; -} - -// The request for [FirestoreAdmin.UpdateDatabase][google.firestore.admin.v1.FirestoreAdmin.UpdateDatabase]. -message UpdateDatabaseRequest { - // Required. The database to update. - Database database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The list of fields to be updated. - google.protobuf.FieldMask update_mask = 2; -} - -// Metadata related to the update database operation. -message UpdateDatabaseMetadata { - -} - -// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. -message CreateIndexRequest { - // Required. A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/CollectionGroup" - } - ]; - - // Required. The composite index to create. - Index index = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. -message ListIndexesRequest { - // Required. A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/CollectionGroup" - } - ]; - - // The filter to apply to list results. - string filter = 2; - - // The number of results to return. - int32 page_size = 3; - - // A page token, returned from a previous call to - // [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes], that may be used to get the next - // page of results. - string page_token = 4; -} - -// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. -message ListIndexesResponse { - // The requested indexes. - repeated Index indexes = 1; - - // A page token that may be used to request another page of results. If blank, - // this is the last page. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1.FirestoreAdmin.GetIndex]. -message GetIndexRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Index" - } - ]; -} - -// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1.FirestoreAdmin.DeleteIndex]. -message DeleteIndexRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Index" - } - ]; -} - -// The request for [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. -message UpdateFieldRequest { - // Required. The field to be updated. - Field field = 1 [(google.api.field_behavior) = REQUIRED]; - - // A mask, relative to the field. If specified, only configuration specified - // by this field_mask will be updated in the field. - google.protobuf.FieldMask update_mask = 2; -} - -// The request for [FirestoreAdmin.GetField][google.firestore.admin.v1.FirestoreAdmin.GetField]. -message GetFieldRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Field" - } - ]; -} - -// The request for [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. -message ListFieldsRequest { - // Required. A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/CollectionGroup" - } - ]; - - // The filter to apply to list results. Currently, - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields - // that have been explicitly overridden. To issue this query, call - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with a filter that includes - // `indexConfig.usesAncestorConfig:false` . - string filter = 2; - - // The number of results to return. - int32 page_size = 3; - - // A page token, returned from a previous call to - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields], that may be used to get the next - // page of results. - string page_token = 4; -} - -// The response for [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. -message ListFieldsResponse { - // The requested fields. - repeated Field fields = 1; - - // A page token that may be used to request another page of results. If blank, - // this is the last page. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsRequest { - // Required. Database to export. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Database" - } - ]; - - // Which collection ids to export. Unspecified means all collections. - repeated string collection_ids = 2; - - // The output URI. Currently only supports Google Cloud Storage URIs of the - // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name - // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional - // Google Cloud Storage namespace path. When - // choosing a name, be sure to consider Google Cloud Storage naming - // guidelines: https://cloud.google.com/storage/docs/naming. - // If the URI is a bucket (without a namespace path), a prefix will be - // generated based on the start time. - string output_uri_prefix = 3; -} - -// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsRequest { - // Required. Database to import into. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Database" - } - ]; - - // Which collection ids to import. Unspecified means all collections included - // in the import. - repeated string collection_ids = 2; - - // Location of the exported files. - // This must match the output_uri_prefix of an ExportDocumentsResponse from - // an export that has completed successfully. - // See: - // [google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix]. - string input_uri_prefix = 3; -} diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json b/src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json deleted file mode 100755 index 1460a8b7..00000000 --- a/src/proto/proto/google/firestore/admin/v1/firestore_admin_grpc_service_config.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "methodConfig": [ - { - "name": [ - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "ListIndexes" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "GetIndex" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "DeleteIndex" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "GetField" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "ListFields" - } - ], - "timeout": "60s", - "retryPolicy": { - "maxAttempts": 5, - "initialBackoff": "0.100s", - "maxBackoff": "60s", - "backoffMultiplier": 1.3, - "retryableStatusCodes": [ - "UNAVAILABLE", - "INTERNAL", - "DEADLINE_EXCEEDED" - ] - } - }, - { - "name": [ - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "CreateIndex" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "ImportDocuments" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "ExportDocuments" - }, - { - "service": "google.firestore.admin.v1.FirestoreAdmin", - "method": "UpdateField" - } - ], - "timeout": "60s" - } - ] -} diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml b/src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml deleted file mode 100644 index f13f3c2b..00000000 --- a/src/proto/proto/google/firestore/admin/v1/firestore_gapic.yaml +++ /dev/null @@ -1,5 +0,0 @@ -type: com.google.api.codegen.ConfigProto -config_schema_version: 2.0.0 -language_settings: - java: - package_name: com.google.cloud.firestore.v1 diff --git a/src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml b/src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml deleted file mode 100644 index 3b45b2f6..00000000 --- a/src/proto/proto/google/firestore/admin/v1/firestore_v1.yaml +++ /dev/null @@ -1,75 +0,0 @@ -type: google.api.Service -config_version: 3 -name: firestore.googleapis.com -title: Cloud Firestore API - -apis: -- name: google.cloud.location.Locations -- name: google.firestore.admin.v1.FirestoreAdmin -- name: google.longrunning.Operations - -types: -- name: google.firestore.admin.v1.ExportDocumentsMetadata -- name: google.firestore.admin.v1.ExportDocumentsResponse -- name: google.firestore.admin.v1.FieldOperationMetadata -- name: google.firestore.admin.v1.ImportDocumentsMetadata -- name: google.firestore.admin.v1.IndexOperationMetadata -- name: google.firestore.admin.v1.LocationMetadata -- name: google.firestore.admin.v1.UpdateDatabaseMetadata - -documentation: - summary: |- - Accesses the NoSQL document database built for automatic scaling, high - performance, and ease of application development. - rules: - - selector: google.cloud.location.Locations.GetLocation - description: Gets information about a location. - - - selector: google.cloud.location.Locations.ListLocations - description: Lists information about the supported locations for this service. - -backend: - rules: - - selector: google.cloud.location.Locations.GetLocation - deadline: 295.0 - - selector: google.cloud.location.Locations.ListLocations - deadline: 295.0 - - selector: 'google.firestore.admin.v1.FirestoreAdmin.*' - deadline: 295.0 - - selector: 'google.longrunning.Operations.*' - deadline: 295.0 - -http: - rules: - - selector: google.longrunning.Operations.CancelOperation - post: '/v1/{name=projects/*/databases/*/operations/*}:cancel' - body: '*' - - selector: google.longrunning.Operations.DeleteOperation - delete: '/v1/{name=projects/*/databases/*/operations/*}' - - selector: google.longrunning.Operations.GetOperation - get: '/v1/{name=projects/*/databases/*/operations/*}' - - selector: google.longrunning.Operations.ListOperations - get: '/v1/{name=projects/*/databases/*}/operations' - -authentication: - rules: - - selector: google.cloud.location.Locations.GetLocation - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/datastore - - selector: google.cloud.location.Locations.ListLocations - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/datastore - - selector: 'google.firestore.admin.v1.FirestoreAdmin.*' - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/datastore - - selector: 'google.longrunning.Operations.*' - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/datastore diff --git a/src/proto/proto/google/firestore/admin/v1/index.proto b/src/proto/proto/google/firestore/admin/v1/index.proto deleted file mode 100644 index 066d4109..00000000 --- a/src/proto/proto/google/firestore/admin/v1/index.proto +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/resource.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "IndexProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option ruby_package = "Google::Cloud::Firestore::Admin::V1"; - -// Cloud Firestore indexes enable simple and complex queries against -// documents in a database. -message Index { - option (google.api.resource) = { - type: "firestore.googleapis.com/Index" - pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}" - }; - - // Query Scope defines the scope at which a query is run. This is specified on - // a StructuredQuery's `from` field. - enum QueryScope { - // The query scope is unspecified. Not a valid option. - QUERY_SCOPE_UNSPECIFIED = 0; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified - // at query time, and that has the collection id specified by the index. - COLLECTION = 1; - - // Indexes with a collection group query scope specified allow queries - // against all collections that has the collection id specified by the - // index. - COLLECTION_GROUP = 2; - } - - // A field in an index. - // The field_path describes which field is indexed, the value_mode describes - // how the field value is indexed. - message IndexField { - // The supported orderings. - enum Order { - // The ordering is unspecified. Not a valid option. - ORDER_UNSPECIFIED = 0; - - // The field is ordered by ascending field value. - ASCENDING = 1; - - // The field is ordered by descending field value. - DESCENDING = 2; - } - - // The supported array value configurations. - enum ArrayConfig { - // The index does not support additional array queries. - ARRAY_CONFIG_UNSPECIFIED = 0; - - // The index supports array containment queries. - CONTAINS = 1; - } - - // Can be __name__. - // For single field indexes, this must match the name of the field or may - // be omitted. - string field_path = 1; - - // How the field value is indexed. - oneof value_mode { - // Indicates that this field supports ordering by the specified order or - // comparing using =, !=, <, <=, >, >=. - Order order = 2; - - // Indicates that this field supports operations on `array_value`s. - ArrayConfig array_config = 3; - } - } - - // The state of an index. During index creation, an index will be in the - // `CREATING` state. If the index is created successfully, it will transition - // to the `READY` state. If the index creation encounters a problem, the index - // will transition to the `NEEDS_REPAIR` state. - enum State { - // The state is unspecified. - STATE_UNSPECIFIED = 0; - - // The index is being created. - // There is an active long-running operation for the index. - // The index is updated when writing a document. - // Some index data may exist. - CREATING = 1; - - // The index is ready to be used. - // The index is updated when writing a document. - // The index is fully populated from all stored documents it applies to. - READY = 2; - - // The index was being created, but something went wrong. - // There is no active long-running operation for the index, - // and the most recently finished long-running operation failed. - // The index is not updated when writing a document. - // Some index data may exist. - // Use the google.longrunning.Operations API to determine why the operation - // that last attempted to create this index failed, then re-create the - // index. - NEEDS_REPAIR = 3; - } - - // Output only. A server defined name for this index. - // The form of this name for composite indexes will be: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}` - // For single field indexes, this field will be empty. - string name = 1; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified at - // query time, and that has the same collection id. - // - // Indexes with a collection group query scope specified allow queries against - // all collections descended from a specific document, specified at query - // time, and that have the same collection id as this index. - QueryScope query_scope = 2; - - // The fields supported by this index. - // - // For composite indexes, this is always 2 or more fields. - // The last field entry is always for the field path `__name__`. If, on - // creation, `__name__` was not specified as the last field, it will be added - // automatically with the same direction as that of the last field defined. If - // the final field in a composite index is not directional, the `__name__` - // will be ordered ASCENDING (unless explicitly specified). - // - // For single field indexes, this will always be exactly one entry with a - // field path equal to the field path of the associated field. - repeated IndexField fields = 3; - - // Output only. The serving state of the index. - State state = 4; -} diff --git a/src/proto/proto/google/firestore/admin/v1/location.proto b/src/proto/proto/google/firestore/admin/v1/location.proto deleted file mode 100644 index 8f7519c4..00000000 --- a/src/proto/proto/google/firestore/admin/v1/location.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.admin.v1; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "LocationProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option ruby_package = "Google::Cloud::Firestore::Admin::V1"; - -// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. -message LocationMetadata { - -} diff --git a/src/proto/proto/google/firestore/admin/v1/operation.proto b/src/proto/proto/google/firestore/admin/v1/operation.proto deleted file mode 100644 index 654a6ad6..00000000 --- a/src/proto/proto/google/firestore/admin/v1/operation.proto +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/firestore/admin/v1/index.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "OperationProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option ruby_package = "Google::Cloud::Firestore::Admin::V1"; - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. -message IndexOperationMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The index resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string index = 3; - - // The state of the operation. - OperationState state = 4; - - // The progress, in documents, of this operation. - Progress progress_documents = 5; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 6; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. -message FieldOperationMetadata { - // Information about an index configuration change. - message IndexConfigDelta { - // Specifies how the index is changing. - enum ChangeType { - // The type of change is not specified or known. - CHANGE_TYPE_UNSPECIFIED = 0; - - // The single field index is being added. - ADD = 1; - - // The single field index is being removed. - REMOVE = 2; - } - - // Specifies how the index is changing. - ChangeType change_type = 1; - - // The index being changed. - Index index = 2; - } - - // Information about an TTL configuration change. - message TtlConfigDelta { - // Specifies how the TTL config is changing. - enum ChangeType { - // The type of change is not specified or known. - CHANGE_TYPE_UNSPECIFIED = 0; - - // The TTL config is being added. - ADD = 1; - - // The TTL config is being removed. - REMOVE = 2; - } - - // Specifies how the TTL configuration is changing. - ChangeType change_type = 1; - } - - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The field resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - string field = 3; - - // A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this - // operation. - repeated IndexConfigDelta index_config_deltas = 4; - - // The state of the operation. - OperationState state = 5; - - // The progress, in documents, of this operation. - Progress progress_documents = 6; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 7; - - // Describes the deltas of TTL configuration. - TtlConfigDelta ttl_config_delta = 8; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the export operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being exported. - repeated string collection_ids = 6; - - // Where the entities are being exported to. - string output_uri_prefix = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the import operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being imported. - repeated string collection_ids = 6; - - // The location of the documents being imported. - string input_uri_prefix = 7; -} - -// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. -message ExportDocumentsResponse { - // Location of the output files. This can be used to begin an import - // into Cloud Firestore (this project or another project) after the operation - // completes successfully. - string output_uri_prefix = 1; -} - -// Describes the state of the operation. -enum OperationState { - // Unspecified. - OPERATION_STATE_UNSPECIFIED = 0; - - // Request is being prepared for processing. - INITIALIZING = 1; - - // Request is actively being processed. - PROCESSING = 2; - - // Request is in the process of being cancelled after user called - // google.longrunning.Operations.CancelOperation on the operation. - CANCELLING = 3; - - // Request has been processed and is in its finalization stage. - FINALIZING = 4; - - // Request has completed successfully. - SUCCESSFUL = 5; - - // Request has finished being processed, but encountered an error. - FAILED = 6; - - // Request has finished being cancelled after user called - // google.longrunning.Operations.CancelOperation. - CANCELLED = 7; -} - -// Describes the progress of the operation. -// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1.Progress] -// is used. -message Progress { - // The amount of work estimated. - int64 estimated_work = 1; - - // The amount of work completed. - int64 completed_work = 2; -} diff --git a/src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto b/src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto deleted file mode 100644 index 5d7908b9..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta1/firestore_admin.proto +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta1; - -import "google/api/annotations.proto"; -import "google/firestore/admin/v1beta1/index.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/client.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreAdminProto"; -option java_package = "com.google.firestore.admin.v1beta1"; -option objc_class_prefix = "GCFS"; - -// The Cloud Firestore Admin API. -// -// This API provides several administrative services for Cloud Firestore. -// -// # Concepts -// -// Project, Database, Namespace, Collection, and Document are used as defined in -// the Google Cloud Firestore API. -// -// Operation: An Operation represents work being performed in the background. -// -// -// # Services -// -// ## Index -// -// The index service manages Cloud Firestore indexes. -// -// Index creation is performed asynchronously. -// An Operation resource is created for each such asynchronous operation. -// The state of the operation (including any errors encountered) -// may be queried via the Operation resource. -// -// ## Metadata -// -// Provides metadata and statistical information about data in Cloud Firestore. -// The data provided as part of this API may be stale. -// -// ## Operation -// -// The Operations collection provides a record of actions performed for the -// specified Project (including any Operations in progress). Operations are not -// created directly but through calls on other collections or resources. -// -// An Operation that is not yet done may be cancelled. The request to cancel is -// asynchronous and the Operation may continue to run for some time after the -// request to cancel is made. -// -// An Operation that is done may be deleted so that it is no longer listed as -// part of the Operation collection. -// -// Operations are created by service `FirestoreAdmin`, but are accessed via -// service `google.longrunning.Operations`. -service FirestoreAdmin { - option (google.api.default_host) = "firestore.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/datastore"; - - // Creates the specified index. - // A newly created index's initial state is `CREATING`. On completion of the - // returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. - // If the index already exists, the call will return an `ALREADY_EXISTS` - // status. - // - // During creation, the process could result in an error, in which case the - // index will move to the `ERROR` state. The process can be recovered by - // fixing the data that caused the error, removing the index with - // [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with - // [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - // - // Indexes with a single field cannot be created. - rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{parent=projects/*/databases/*}/indexes" - body: "index" - }; - } - - // Lists the indexes that match the specified filters. - rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { - option (google.api.http) = { - get: "/v1beta1/{parent=projects/*/databases/*}/indexes" - }; - } - - // Gets an index. - rpc GetIndex(GetIndexRequest) returns (Index) { - option (google.api.http) = { - get: "/v1beta1/{name=projects/*/databases/*/indexes/*}" - }; - } - - // Deletes an index. - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1beta1/{name=projects/*/databases/*/indexes/*}" - }; - } - - // Exports a copy of all or a subset of documents from Google Cloud Firestore - // to another storage system, such as Google Cloud Storage. Recent updates to - // documents may not be reflected in the export. The export occurs in the - // background and its progress can be monitored and managed via the - // Operation resource that is created. The output of an export may only be - // used once the associated operation is done. If an export operation is - // cancelled before completion it may leave partial data behind in Google - // Cloud Storage. - rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{name=projects/*/databases/*}:exportDocuments" - body: "*" - }; - } - - // Imports documents into Google Cloud Firestore. Existing documents with the - // same name are overwritten. The import occurs in the background and its - // progress can be monitored and managed via the Operation resource that is - // created. If an ImportDocuments operation is cancelled, it is possible - // that a subset of the data has already been imported to Cloud Firestore. - rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{name=projects/*/databases/*}:importDocuments" - body: "*" - }; - } -} - -// Metadata for index operations. This metadata populates -// the metadata field of [google.longrunning.Operation][google.longrunning.Operation]. -message IndexOperationMetadata { - // The type of index operation. - enum OperationType { - // Unspecified. Never set by server. - OPERATION_TYPE_UNSPECIFIED = 0; - - // The operation is creating the index. Initiated by a `CreateIndex` call. - CREATING_INDEX = 1; - } - - // The time that work began on the operation. - google.protobuf.Timestamp start_time = 1; - - // The time the operation ended, either successfully or otherwise. Unset if - // the operation is still active. - google.protobuf.Timestamp end_time = 2; - - // The index resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` - string index = 3; - - // The type of index operation. - OperationType operation_type = 4; - - // True if the [google.longrunning.Operation] was cancelled. If the - // cancellation is in progress, cancelled will be true but - // [google.longrunning.Operation.done][google.longrunning.Operation.done] will be false. - bool cancelled = 5; - - // Progress of the existing operation, measured in number of documents. - Progress document_progress = 6; -} - -// Measures the progress of a particular metric. -message Progress { - // An estimate of how much work has been completed. Note that this may be - // greater than `work_estimated`. - int64 work_completed = 1; - - // An estimate of how much work needs to be performed. Zero if the - // work estimate is unavailable. May change as work progresses. - int64 work_estimated = 2; -} - -// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. -message CreateIndexRequest { - // The name of the database this index will apply to. For example: - // `projects/{project_id}/databases/{database_id}` - string parent = 1; - - // The index to create. The name and state fields are output only and will be - // ignored. Certain single field indexes cannot be created or deleted. - Index index = 2; -} - -// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta1.FirestoreAdmin.GetIndex]. -message GetIndexRequest { - // The name of the index. For example: - // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` - string name = 1; -} - -// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. -message ListIndexesRequest { - // The database name. For example: - // `projects/{project_id}/databases/{database_id}` - string parent = 1; - - string filter = 2; - - // The standard List page size. - int32 page_size = 3; - - // The standard List page token. - string page_token = 4; -} - -// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex]. -message DeleteIndexRequest { - // The index name. For example: - // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` - string name = 1; -} - -// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. -message ListIndexesResponse { - // The indexes. - repeated Index indexes = 1; - - // The standard List next-page token. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsRequest { - // Database to export. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1; - - // Which collection ids to export. Unspecified means all collections. - repeated string collection_ids = 3; - - // The output URI. Currently only supports Google Cloud Storage URIs of the - // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name - // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional - // Google Cloud Storage namespace path. When - // choosing a name, be sure to consider Google Cloud Storage naming - // guidelines: https://cloud.google.com/storage/docs/naming. - // If the URI is a bucket (without a namespace path), a prefix will be - // generated based on the start time. - string output_uri_prefix = 4; -} - -// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsRequest { - // Database to import into. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1; - - // Which collection ids to import. Unspecified means all collections included - // in the import. - repeated string collection_ids = 3; - - // Location of the exported files. - // This must match the output_uri_prefix of an ExportDocumentsResponse from - // an export that has completed successfully. - // See: - // [google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix]. - string input_uri_prefix = 4; -} - -// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. -message ExportDocumentsResponse { - // Location of the output files. This can be used to begin an import - // into Cloud Firestore (this project or another project) after the operation - // completes successfully. - string output_uri_prefix = 1; -} - -// Metadata for ExportDocuments operations. -message ExportDocumentsMetadata { - // The time that work began on the operation. - google.protobuf.Timestamp start_time = 1; - - // The time the operation ended, either successfully or otherwise. Unset if - // the operation is still active. - google.protobuf.Timestamp end_time = 2; - - // The state of the export operation. - OperationState operation_state = 3; - - // An estimate of the number of documents processed. - Progress progress_documents = 4; - - // An estimate of the number of bytes processed. - Progress progress_bytes = 5; - - // Which collection ids are being exported. - repeated string collection_ids = 6; - - // Where the entities are being exported to. - string output_uri_prefix = 7; -} - -// Metadata for ImportDocuments operations. -message ImportDocumentsMetadata { - // The time that work began on the operation. - google.protobuf.Timestamp start_time = 1; - - // The time the operation ended, either successfully or otherwise. Unset if - // the operation is still active. - google.protobuf.Timestamp end_time = 2; - - // The state of the import operation. - OperationState operation_state = 3; - - // An estimate of the number of documents processed. - Progress progress_documents = 4; - - // An estimate of the number of bytes processed. - Progress progress_bytes = 5; - - // Which collection ids are being imported. - repeated string collection_ids = 6; - - // The location of the documents being imported. - string input_uri_prefix = 7; -} - -// The various possible states for an ongoing Operation. -enum OperationState { - // Unspecified. - STATE_UNSPECIFIED = 0; - - // Request is being prepared for processing. - INITIALIZING = 1; - - // Request is actively being processed. - PROCESSING = 2; - - // Request is in the process of being cancelled after user called - // google.longrunning.Operations.CancelOperation on the operation. - CANCELLING = 3; - - // Request has been processed and is in its finalization stage. - FINALIZING = 4; - - // Request has completed successfully. - SUCCESSFUL = 5; - - // Request has finished being processed, but encountered an error. - FAILED = 6; - - // Request has finished being cancelled after user called - // google.longrunning.Operations.CancelOperation. - CANCELLED = 7; -} diff --git a/src/proto/proto/google/firestore/admin/v1beta1/index.proto b/src/proto/proto/google/firestore/admin/v1beta1/index.proto deleted file mode 100644 index 0ca95985..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta1/index.proto +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta1; - -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; -option java_multiple_files = true; -option java_outer_classname = "IndexProto"; -option java_package = "com.google.firestore.admin.v1beta1"; -option objc_class_prefix = "GCFS"; - -// A field of an index. -message IndexField { - // The mode determines how a field is indexed. - enum Mode { - // The mode is unspecified. - MODE_UNSPECIFIED = 0; - - // The field's values are indexed so as to support sequencing in - // ascending order and also query by <, >, <=, >=, and =. - ASCENDING = 2; - - // The field's values are indexed so as to support sequencing in - // descending order and also query by <, >, <=, >=, and =. - DESCENDING = 3; - - // The field's array values are indexed so as to support membership using - // ARRAY_CONTAINS queries. - ARRAY_CONTAINS = 4; - } - - // The path of the field. Must match the field path specification described - // by [google.firestore.v1beta1.Document.fields][fields]. - // Special field path `__name__` may be used by itself or at the end of a - // path. `__type__` may be used only at the end of path. - string field_path = 1; - - // The field's mode. - Mode mode = 2; -} - -// An index definition. -message Index { - // The state of an index. During index creation, an index will be in the - // `CREATING` state. If the index is created successfully, it will transition - // to the `READY` state. If the index is not able to be created, it will - // transition to the `ERROR` state. - enum State { - // The state is unspecified. - STATE_UNSPECIFIED = 0; - - // The index is being created. - // There is an active long-running operation for the index. - // The index is updated when writing a document. - // Some index data may exist. - CREATING = 3; - - // The index is ready to be used. - // The index is updated when writing a document. - // The index is fully populated from all stored documents it applies to. - READY = 2; - - // The index was being created, but something went wrong. - // There is no active long-running operation for the index, - // and the most recently finished long-running operation failed. - // The index is not updated when writing a document. - // Some index data may exist. - ERROR = 5; - } - - // The resource name of the index. - // Output only. - string name = 1; - - // The collection ID to which this index applies. Required. - string collection_id = 2; - - // The fields to index. - repeated IndexField fields = 3; - - // The state of the index. - // Output only. - State state = 6; -} diff --git a/src/proto/proto/google/firestore/admin/v1beta1/location.proto b/src/proto/proto/google/firestore/admin/v1beta1/location.proto deleted file mode 100644 index 2201b07d..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta1/location.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta1; - -import "google/type/latlng.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; -option java_multiple_files = true; -option java_outer_classname = "LocationProto"; -option java_package = "com.google.firestore.admin.v1beta1"; -option objc_class_prefix = "GCFS"; - -// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. -message LocationMetadata { - -} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/field.proto b/src/proto/proto/google/firestore/admin/v1beta2/field.proto deleted file mode 100644 index cec35519..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta2/field.proto +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta2; - -import "google/firestore/admin/v1beta2/index.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; -option java_multiple_files = true; -option java_outer_classname = "FieldProto"; -option java_package = "com.google.firestore.admin.v1beta2"; -option objc_class_prefix = "GCFS"; - -// Represents a single field in the database. -// -// Fields are grouped by their "Collection Group", which represent all -// collections in the database with the same id. -message Field { - // The index configuration for this field. - message IndexConfig { - // The indexes supported for this field. - repeated Index indexes = 1; - - // Output only. When true, the `Field`'s index configuration is set from the - // configuration specified by the `ancestor_field`. - // When false, the `Field`'s index configuration is defined explicitly. - bool uses_ancestor_config = 2; - - // Output only. Specifies the resource name of the `Field` from which this field's - // index configuration is set (when `uses_ancestor_config` is true), - // or from which it *would* be set if this field had no index configuration - // (when `uses_ancestor_config` is false). - string ancestor_field = 3; - - // Output only - // When true, the `Field`'s index configuration is in the process of being - // reverted. Once complete, the index config will transition to the same - // state as the field specified by `ancestor_field`, at which point - // `uses_ancestor_config` will be `true` and `reverting` will be `false`. - bool reverting = 4; - } - - // A field name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - // - // A field path may be a simple field name, e.g. `address` or a path to fields - // within map_value , e.g. `address.city`, - // or a special field path. The only valid special field is `*`, which - // represents any field. - // - // Field paths may be quoted using ` (backtick). The only character that needs - // to be escaped within a quoted field path is the backtick character itself, - // escaped using a backslash. Special characters in field paths that - // must be quoted include: `*`, `.`, - // ``` (backtick), `[`, `]`, as well as any ascii symbolic characters. - // - // Examples: - // (Note: Comments here are written in markdown syntax, so there is an - // additional layer of backticks to represent a code block) - // `\`address.city\`` represents a field named `address.city`, not the map key - // `city` in the field `address`. - // `\`*\`` represents a field named `*`, not any field. - // - // A special `Field` contains the default indexing settings for all fields. - // This field's resource name is: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*` - // Indexes defined on this `Field` will be applied to all fields which do not - // have their own `Field` index configuration. - string name = 1; - - // The index configuration for this field. If unset, field indexing will - // revert to the configuration defined by the `ancestor_field`. To - // explicitly remove all indexes for this field, specify an index config - // with an empty list of indexes. - IndexConfig index_config = 2; -} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto b/src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto deleted file mode 100644 index 56ca764b..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta2/firestore_admin.proto +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta2; - -import "google/api/annotations.proto"; -import "google/firestore/admin/v1beta2/field.proto"; -import "google/firestore/admin/v1beta2/index.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/api/client.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreAdminProto"; -option java_package = "com.google.firestore.admin.v1beta2"; -option objc_class_prefix = "GCFS"; - -// Operations are created by service `FirestoreAdmin`, but are accessed via -// service `google.longrunning.Operations`. -service FirestoreAdmin { - option (google.api.default_host) = "firestore.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/datastore"; - - // Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation] - // which may be used to track the status of the creation. The metadata for - // the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1beta2.IndexOperationMetadata]. - rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta2/{parent=projects/*/databases/*/collectionGroups/*}/indexes" - body: "index" - }; - } - - // Lists composite indexes. - rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { - option (google.api.http) = { - get: "/v1beta2/{parent=projects/*/databases/*/collectionGroups/*}/indexes" - }; - } - - // Gets a composite index. - rpc GetIndex(GetIndexRequest) returns (Index) { - option (google.api.http) = { - get: "/v1beta2/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" - }; - } - - // Deletes a composite index. - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1beta2/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" - }; - } - - // Gets the metadata and configuration for a Field. - rpc GetField(GetFieldRequest) returns (Field) { - option (google.api.http) = { - get: "/v1beta2/{name=projects/*/databases/*/collectionGroups/*/fields/*}" - }; - } - - // Updates a field configuration. Currently, field updates apply only to - // single field index configuration. However, calls to - // [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField] should provide a field mask to avoid - // changing any configuration that the caller isn't aware of. The field mask - // should be specified as: `{ paths: "index_config" }`. - // - // This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to - // track the status of the field update. The metadata for - // the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1beta2.FieldOperationMetadata]. - // - // To configure the default field settings for the database, use - // the special `Field` with resource name: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. - rpc UpdateField(UpdateFieldRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1beta2/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}" - body: "field" - }; - } - - // Lists the field configuration and metadata for this database. - // - // Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] only supports listing fields - // that have been explicitly overridden. To issue this query, call - // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] with the filter set to - // `indexConfig.usesAncestorConfig:false`. - rpc ListFields(ListFieldsRequest) returns (ListFieldsResponse) { - option (google.api.http) = { - get: "/v1beta2/{parent=projects/*/databases/*/collectionGroups/*}/fields" - }; - } - - // Exports a copy of all or a subset of documents from Google Cloud Firestore - // to another storage system, such as Google Cloud Storage. Recent updates to - // documents may not be reflected in the export. The export occurs in the - // background and its progress can be monitored and managed via the - // Operation resource that is created. The output of an export may only be - // used once the associated operation is done. If an export operation is - // cancelled before completion it may leave partial data behind in Google - // Cloud Storage. - rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta2/{name=projects/*/databases/*}:exportDocuments" - body: "*" - }; - } - - // Imports documents into Google Cloud Firestore. Existing documents with the - // same name are overwritten. The import occurs in the background and its - // progress can be monitored and managed via the Operation resource that is - // created. If an ImportDocuments operation is cancelled, it is possible - // that a subset of the data has already been imported to Cloud Firestore. - rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta2/{name=projects/*/databases/*}:importDocuments" - body: "*" - }; - } -} - -// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta2.FirestoreAdmin.CreateIndex]. -message CreateIndexRequest { - // A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1; - - // The composite index to create. - Index index = 2; -} - -// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta2.FirestoreAdmin.ListIndexes]. -message ListIndexesRequest { - // A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1; - - // The filter to apply to list results. - string filter = 2; - - // The number of results to return. - int32 page_size = 3; - - // A page token, returned from a previous call to - // [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta2.FirestoreAdmin.ListIndexes], that may be used to get the next - // page of results. - string page_token = 4; -} - -// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta2.FirestoreAdmin.ListIndexes]. -message ListIndexesResponse { - // The requested indexes. - repeated Index indexes = 1; - - // A page token that may be used to request another page of results. If blank, - // this is the last page. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta2.FirestoreAdmin.GetIndex]. -message GetIndexRequest { - // A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string name = 1; -} - -// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta2.FirestoreAdmin.DeleteIndex]. -message DeleteIndexRequest { - // A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string name = 1; -} - -// The request for [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField]. -message UpdateFieldRequest { - // The field to be updated. - Field field = 1; - - // A mask, relative to the field. If specified, only configuration specified - // by this field_mask will be updated in the field. - google.protobuf.FieldMask update_mask = 2; -} - -// The request for [FirestoreAdmin.GetField][google.firestore.admin.v1beta2.FirestoreAdmin.GetField]. -message GetFieldRequest { - // A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}` - string name = 1; -} - -// The request for [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields]. -message ListFieldsRequest { - // A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1; - - // The filter to apply to list results. Currently, - // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] only supports listing fields - // that have been explicitly overridden. To issue this query, call - // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields] with the filter set to - // `indexConfig.usesAncestorConfig:false`. - string filter = 2; - - // The number of results to return. - int32 page_size = 3; - - // A page token, returned from a previous call to - // [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields], that may be used to get the next - // page of results. - string page_token = 4; -} - -// The response for [FirestoreAdmin.ListFields][google.firestore.admin.v1beta2.FirestoreAdmin.ListFields]. -message ListFieldsResponse { - // The requested fields. - repeated Field fields = 1; - - // A page token that may be used to request another page of results. If blank, - // this is the last page. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsRequest { - // Database to export. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1; - - // Which collection ids to export. Unspecified means all collections. - repeated string collection_ids = 2; - - // The output URI. Currently only supports Google Cloud Storage URIs of the - // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name - // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional - // Google Cloud Storage namespace path. When - // choosing a name, be sure to consider Google Cloud Storage naming - // guidelines: https://cloud.google.com/storage/docs/naming. - // If the URI is a bucket (without a namespace path), a prefix will be - // generated based on the start time. - string output_uri_prefix = 3; -} - -// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsRequest { - // Database to import into. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1; - - // Which collection ids to import. Unspecified means all collections included - // in the import. - repeated string collection_ids = 2; - - // Location of the exported files. - // This must match the output_uri_prefix of an ExportDocumentsResponse from - // an export that has completed successfully. - // See: - // [google.firestore.admin.v1beta2.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1beta2.ExportDocumentsResponse.output_uri_prefix]. - string input_uri_prefix = 3; -} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/index.proto b/src/proto/proto/google/firestore/admin/v1beta2/index.proto deleted file mode 100644 index c5dc6b98..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta2/index.proto +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta2; - -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; -option java_multiple_files = true; -option java_outer_classname = "IndexProto"; -option java_package = "com.google.firestore.admin.v1beta2"; -option objc_class_prefix = "GCFS"; - -// Cloud Firestore indexes enable simple and complex queries against -// documents in a database. -message Index { - // A field in an index. - // The field_path describes which field is indexed, the value_mode describes - // how the field value is indexed. - message IndexField { - // The supported orderings. - enum Order { - // The ordering is unspecified. Not a valid option. - ORDER_UNSPECIFIED = 0; - - // The field is ordered by ascending field value. - ASCENDING = 1; - - // The field is ordered by descending field value. - DESCENDING = 2; - } - - // The supported array value configurations. - enum ArrayConfig { - // The index does not support additional array queries. - ARRAY_CONFIG_UNSPECIFIED = 0; - - // The index supports array containment queries. - CONTAINS = 1; - } - - // Can be __name__. - // For single field indexes, this must match the name of the field or may - // be omitted. - string field_path = 1; - - // How the field value is indexed. - oneof value_mode { - // Indicates that this field supports ordering by the specified order or - // comparing using =, <, <=, >, >=. - Order order = 2; - - // Indicates that this field supports operations on `array_value`s. - ArrayConfig array_config = 3; - } - } - - // Query Scope defines the scope at which a query is run. This is specified on - // a StructuredQuery's `from` field. - enum QueryScope { - // The query scope is unspecified. Not a valid option. - QUERY_SCOPE_UNSPECIFIED = 0; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified - // at query time, and that has the collection id specified by the index. - COLLECTION = 1; - - // Indexes with a collection group query scope specified allow queries - // against all collections that has the collection id specified by the - // index. - COLLECTION_GROUP = 2; - } - - // The state of an index. During index creation, an index will be in the - // `CREATING` state. If the index is created successfully, it will transition - // to the `READY` state. If the index creation encounters a problem, the index - // will transition to the `NEEDS_REPAIR` state. - enum State { - // The state is unspecified. - STATE_UNSPECIFIED = 0; - - // The index is being created. - // There is an active long-running operation for the index. - // The index is updated when writing a document. - // Some index data may exist. - CREATING = 1; - - // The index is ready to be used. - // The index is updated when writing a document. - // The index is fully populated from all stored documents it applies to. - READY = 2; - - // The index was being created, but something went wrong. - // There is no active long-running operation for the index, - // and the most recently finished long-running operation failed. - // The index is not updated when writing a document. - // Some index data may exist. - // Use the google.longrunning.Operations API to determine why the operation - // that last attempted to create this index failed, then re-create the - // index. - NEEDS_REPAIR = 3; - } - - // Output only. A server defined name for this index. - // The form of this name for composite indexes will be: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}` - // For single field indexes, this field will be empty. - string name = 1; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified at - // query time, and that has the same collection id. - // - // Indexes with a collection group query scope specified allow queries against - // all collections descended from a specific document, specified at query - // time, and that have the same collection id as this index. - QueryScope query_scope = 2; - - // The fields supported by this index. - // - // For composite indexes, this is always 2 or more fields. - // The last field entry is always for the field path `__name__`. If, on - // creation, `__name__` was not specified as the last field, it will be added - // automatically with the same direction as that of the last field defined. If - // the final field in a composite index is not directional, the `__name__` - // will be ordered ASCENDING (unless explicitly specified). - // - // For single field indexes, this will always be exactly one entry with a - // field path equal to the field path of the associated field. - repeated IndexField fields = 3; - - // Output only. The serving state of the index. - State state = 4; -} diff --git a/src/proto/proto/google/firestore/admin/v1beta2/operation.proto b/src/proto/proto/google/firestore/admin/v1beta2/operation.proto deleted file mode 100644 index d9a1f84e..00000000 --- a/src/proto/proto/google/firestore/admin/v1beta2/operation.proto +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta2; - -import "google/firestore/admin/v1beta2/index.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; -option java_multiple_files = true; -option java_outer_classname = "OperationProto"; -option java_package = "com.google.firestore.admin.v1beta2"; -option objc_class_prefix = "GCFS"; - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta2.FirestoreAdmin.CreateIndex]. -message IndexOperationMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The index resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string index = 3; - - // The state of the operation. - OperationState state = 4; - - // The progress, in documents, of this operation. - Progress progress_documents = 5; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 6; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField]. -message FieldOperationMetadata { - // Information about an index configuration change. - message IndexConfigDelta { - // Specifies how the index is changing. - enum ChangeType { - // The type of change is not specified or known. - CHANGE_TYPE_UNSPECIFIED = 0; - - // The single field index is being added. - ADD = 1; - - // The single field index is being removed. - REMOVE = 2; - } - - // Specifies how the index is changing. - ChangeType change_type = 1; - - // The index being changed. - Index index = 2; - } - - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The field resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - string field = 3; - - // A list of [IndexConfigDelta][google.firestore.admin.v1beta2.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this - // operation. - repeated IndexConfigDelta index_config_deltas = 4; - - // The state of the operation. - OperationState state = 5; - - // The progress, in documents, of this operation. - Progress document_progress = 6; - - // The progress, in bytes, of this operation. - Progress bytes_progress = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the export operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being exported. - repeated string collection_ids = 6; - - // Where the entities are being exported to. - string output_uri_prefix = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the import operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being imported. - repeated string collection_ids = 6; - - // The location of the documents being imported. - string input_uri_prefix = 7; -} - -// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. -message ExportDocumentsResponse { - // Location of the output files. This can be used to begin an import - // into Cloud Firestore (this project or another project) after the operation - // completes successfully. - string output_uri_prefix = 1; -} - -// Describes the state of the operation. -enum OperationState { - // Unspecified. - OPERATION_STATE_UNSPECIFIED = 0; - - // Request is being prepared for processing. - INITIALIZING = 1; - - // Request is actively being processed. - PROCESSING = 2; - - // Request is in the process of being cancelled after user called - // google.longrunning.Operations.CancelOperation on the operation. - CANCELLING = 3; - - // Request has been processed and is in its finalization stage. - FINALIZING = 4; - - // Request has completed successfully. - SUCCESSFUL = 5; - - // Request has finished being processed, but encountered an error. - FAILED = 6; - - // Request has finished being cancelled after user called - // google.longrunning.Operations.CancelOperation. - CANCELLED = 7; -} - -// Describes the progress of the operation. -// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1beta2.Progress] -// is used. -message Progress { - // The amount of work estimated. - int64 estimated_work = 1; - - // The amount of work completed. - int64 completed_work = 2; -} diff --git a/src/proto/proto/google/firestore/v1/aggregation_result.proto b/src/proto/proto/google/firestore/v1/aggregation_result.proto deleted file mode 100644 index 538e3fef..00000000 --- a/src/proto/proto/google/firestore/v1/aggregation_result.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/firestore/v1/document.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "AggregationResultProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; -option ruby_package = "Google::Cloud::Firestore::V1"; - -// The result of a single bucket from a Firestore aggregation query. -// -// The keys of `aggregate_fields` are the same for all results in an aggregation -// query, unlike document queries which can have different fields present for -// each result. -message AggregationResult { - // The result of the aggregation functions, ex: `COUNT(*) AS total_docs`. - // - // The key is the [alias][google.firestore.v1.StructuredAggregationQuery.Aggregation.alias] - // assigned to the aggregation function on input and the size of this map - // equals the number of aggregation functions in the query. - map aggregate_fields = 2; -} diff --git a/src/proto/proto/google/firestore/v1/common.proto b/src/proto/proto/google/firestore/v1/common.proto deleted file mode 100644 index 3bc978ca..00000000 --- a/src/proto/proto/google/firestore/v1/common.proto +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; -option ruby_package = "Google::Cloud::Firestore::V1"; - -// A set of field paths on a document. -// Used to restrict a get or update operation on a document to a subset of its -// fields. -// This is different from standard field masks, as this is always scoped to a -// [Document][google.firestore.v1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1.Value]. -message DocumentMask { - // The list of field paths in the mask. See [Document.fields][google.firestore.v1.Document.fields] for a field - // path syntax reference. - repeated string field_paths = 1; -} - -// A precondition on a document, used for conditional operations. -message Precondition { - // The type of precondition. - oneof condition_type { - // When set to `true`, the target document must exist. - // When set to `false`, the target document must not exist. - bool exists = 1; - - // When set, the target document must exist and have been last updated at - // that time. - google.protobuf.Timestamp update_time = 2; - } -} - -// Options for creating a new transaction. -message TransactionOptions { - // Options for a transaction that can be used to read and write documents. - message ReadWrite { - // An optional transaction to retry. - bytes retry_transaction = 1; - } - - // Options for a transaction that can only be used to read documents. - message ReadOnly { - // The consistency mode for this transaction. If not set, defaults to strong - // consistency. - oneof consistency_selector { - // Reads documents at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 2; - } - } - - // The mode of the transaction. - oneof mode { - // The transaction can only be used for read operations. - ReadOnly read_only = 2; - - // The transaction can be used for both read and write operations. - ReadWrite read_write = 3; - } -} diff --git a/src/proto/proto/google/firestore/v1/document.proto b/src/proto/proto/google/firestore/v1/document.proto deleted file mode 100644 index 5238a943..00000000 --- a/src/proto/proto/google/firestore/v1/document.proto +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/type/latlng.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "DocumentProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; -option ruby_package = "Google::Cloud::Firestore::V1"; - -// A Firestore document. -// -// Must not exceed 1 MiB - 4 bytes. -message Document { - // The resource name of the document, for example - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string name = 1; - - // The document's fields. - // - // The map keys represent field names. - // - // A simple field name contains only characters `a` to `z`, `A` to `Z`, - // `0` to `9`, or `_`, and must not start with `0` to `9`. For example, - // `foo_bar_17`. - // - // Field names matching the regular expression `__.*__` are reserved. Reserved - // field names are forbidden except in certain documented contexts. The map - // keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be - // empty. - // - // Field paths may be used in other contexts to refer to structured fields - // defined here. For `map_value`, the field path is represented by the simple - // or quoted field names of the containing fields, delimited by `.`. For - // example, the structured field - // `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be - // represented by the field path `foo.x&y`. - // - // Within a field path, a quoted field name starts and ends with `` ` `` and - // may contain any character. Some characters, including `` ` ``, must be - // escaped using a `\`. For example, `` `x&y` `` represents `x&y` and - // `` `bak\`tik` `` represents `` bak`tik ``. - map fields = 2; - - // Output only. The time at which the document was created. - // - // This value increases monotonically when a document is deleted then - // recreated. It can also be compared to values from other documents and - // the `read_time` of a query. - google.protobuf.Timestamp create_time = 3; - - // Output only. The time at which the document was last changed. - // - // This value is initially set to the `create_time` then increases - // monotonically with each change to the document. It can also be - // compared to values from other documents and the `read_time` of a query. - google.protobuf.Timestamp update_time = 4; -} - -// A message that can hold any of the supported value types. -message Value { - // Must have a value set. - oneof value_type { - // A null value. - google.protobuf.NullValue null_value = 11; - - // A boolean value. - bool boolean_value = 1; - - // An integer value. - int64 integer_value = 2; - - // A double value. - double double_value = 3; - - // A timestamp value. - // - // Precise only to microseconds. When stored, any additional precision is - // rounded down. - google.protobuf.Timestamp timestamp_value = 10; - - // A string value. - // - // The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. - // Only the first 1,500 bytes of the UTF-8 representation are considered by - // queries. - string string_value = 17; - - // A bytes value. - // - // Must not exceed 1 MiB - 89 bytes. - // Only the first 1,500 bytes are considered by queries. - bytes bytes_value = 18; - - // A reference to a document. For example: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string reference_value = 5; - - // A geo point value representing a point on the surface of Earth. - google.type.LatLng geo_point_value = 8; - - // An array value. - // - // Cannot directly contain another array value, though can contain an - // map which contains another array. - ArrayValue array_value = 9; - - // A map value. - MapValue map_value = 6; - } -} - -// An array value. -message ArrayValue { - // Values in the array. - repeated Value values = 1; -} - -// A map value. -message MapValue { - // The map's fields. - // - // The map keys represent field names. Field names matching the regular - // expression `__.*__` are reserved. Reserved field names are forbidden except - // in certain documented contexts. The map keys, represented as UTF-8, must - // not exceed 1,500 bytes and cannot be empty. - map fields = 1; -} diff --git a/src/proto/proto/google/firestore/v1/firestore.proto b/src/proto/proto/google/firestore/v1/firestore.proto deleted file mode 100644 index aefbe716..00000000 --- a/src/proto/proto/google/firestore/v1/firestore.proto +++ /dev/null @@ -1,980 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/firestore/v1/aggregation_result.proto"; -import "google/firestore/v1/common.proto"; -import "google/firestore/v1/document.proto"; -import "google/firestore/v1/query.proto"; -import "google/firestore/v1/write.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; -option ruby_package = "Google::Cloud::Firestore::V1"; - -// Specification of the Firestore API. - -// The Cloud Firestore service. -// -// Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL -// document database that simplifies storing, syncing, and querying data for -// your mobile, web, and IoT apps at global scale. Its client libraries provide -// live synchronization and offline support, while its security features and -// integrations with Firebase and Google Cloud Platform (GCP) accelerate -// building truly serverless apps. -service Firestore { - option (google.api.default_host) = "firestore.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/datastore"; - - // Gets a single document. - rpc GetDocument(GetDocumentRequest) returns (Document) { - option (google.api.http) = { - get: "/v1/{name=projects/*/databases/*/documents/*/**}" - }; - } - - // Lists documents. - rpc ListDocuments(ListDocumentsRequest) returns (ListDocumentsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" - }; - } - - // Updates or inserts a document. - rpc UpdateDocument(UpdateDocumentRequest) returns (Document) { - option (google.api.http) = { - patch: "/v1/{document.name=projects/*/databases/*/documents/*/**}" - body: "document" - }; - option (google.api.method_signature) = "document,update_mask"; - } - - // Deletes a document. - rpc DeleteDocument(DeleteDocumentRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/databases/*/documents/*/**}" - }; - option (google.api.method_signature) = "name"; - } - - // Gets multiple documents. - // - // Documents returned by this method are not guaranteed to be returned in the - // same order that they were requested. - rpc BatchGetDocuments(BatchGetDocumentsRequest) returns (stream BatchGetDocumentsResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:batchGet" - body: "*" - }; - } - - // Starts a new transaction. - rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:beginTransaction" - body: "*" - }; - option (google.api.method_signature) = "database"; - } - - // Commits a transaction, while optionally updating documents. - rpc Commit(CommitRequest) returns (CommitResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:commit" - body: "*" - }; - option (google.api.method_signature) = "database,writes"; - } - - // Rolls back a transaction. - rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:rollback" - body: "*" - }; - option (google.api.method_signature) = "database,transaction"; - } - - // Runs a query. - rpc RunQuery(RunQueryRequest) returns (stream RunQueryResponse) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/documents}:runQuery" - body: "*" - additional_bindings { - post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery" - body: "*" - } - }; - } - - // Runs an aggregation query. - // - // Rather than producing [Document][google.firestore.v1.Document] results like [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery], - // this API allows running an aggregation to produce a series of - // [AggregationResult][google.firestore.v1.AggregationResult] server-side. - // - // High-Level Example: - // - // ``` - // -- Return the number of documents in table given a filter. - // SELECT COUNT(*) FROM ( SELECT * FROM k where a = true ); - // ``` - rpc RunAggregationQuery(RunAggregationQueryRequest) returns (stream RunAggregationQueryResponse) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery" - body: "*" - additional_bindings { - post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery" - body: "*" - } - }; - } - - // Partitions a query by returning partition cursors that can be used to run - // the query in parallel. The returned partition cursors are split points that - // can be used by RunQuery as starting/end points for the query results. - rpc PartitionQuery(PartitionQueryRequest) returns (PartitionQueryResponse) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/documents}:partitionQuery" - body: "*" - additional_bindings { - post: "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery" - body: "*" - } - }; - } - - // Streams batches of document updates and deletes, in order. - rpc Write(stream WriteRequest) returns (stream WriteResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:write" - body: "*" - }; - } - - // Listens to changes. - rpc Listen(stream ListenRequest) returns (stream ListenResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:listen" - body: "*" - }; - } - - // Lists all the collection IDs underneath a document. - rpc ListCollectionIds(ListCollectionIdsRequest) returns (ListCollectionIdsResponse) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds" - body: "*" - additional_bindings { - post: "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds" - body: "*" - } - }; - option (google.api.method_signature) = "parent"; - } - - // Applies a batch of write operations. - // - // The BatchWrite method does not apply the write operations atomically - // and can apply them out of order. Method does not allow more than one write - // per document. Each write succeeds or fails independently. See the - // [BatchWriteResponse][google.firestore.v1.BatchWriteResponse] for the success status of each write. - // - // If you require an atomically applied set of writes, use - // [Commit][google.firestore.v1.Firestore.Commit] instead. - rpc BatchWrite(BatchWriteRequest) returns (BatchWriteResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/databases/*}/documents:batchWrite" - body: "*" - }; - } - - // Creates a new document. - rpc CreateDocument(CreateDocumentRequest) returns (Document) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}" - body: "document" - }; - } -} - -// The request for [Firestore.GetDocument][google.firestore.v1.Firestore.GetDocument]. -message GetDocumentRequest { - // Required. The resource name of the Document to get. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // The fields to return. If not set, returns all fields. - // - // If the document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 2; - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads the document in a transaction. - bytes transaction = 3; - - // Reads the version of the document at the given time. - // This may not be older than 270 seconds. - google.protobuf.Timestamp read_time = 5; - } -} - -// The request for [Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments]. -message ListDocumentsRequest { - // Required. The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The collection ID, relative to `parent`, to list. For example: `chatrooms` - // or `messages`. - string collection_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // The maximum number of documents to return. - int32 page_size = 3; - - // The `next_page_token` value returned from a previous List request, if any. - string page_token = 4; - - // The order to sort results by. For example: `priority desc, name`. - string order_by = 6; - - // The fields to return. If not set, returns all fields. - // - // If a document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 7; - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads documents in a transaction. - bytes transaction = 8; - - // Reads documents as they were at the given time. - // This may not be older than 270 seconds. - google.protobuf.Timestamp read_time = 10; - } - - // If the list should show missing documents. A missing document is a - // document that does not exist but has sub-documents. These documents will - // be returned with a key but will not have fields, [Document.create_time][google.firestore.v1.Document.create_time], - // or [Document.update_time][google.firestore.v1.Document.update_time] set. - // - // Requests with `show_missing` may not specify `where` or - // `order_by`. - bool show_missing = 12; -} - -// The response for [Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments]. -message ListDocumentsResponse { - // The Documents found. - repeated Document documents = 1; - - // The next page token. - string next_page_token = 2; -} - -// The request for [Firestore.CreateDocument][google.firestore.v1.Firestore.CreateDocument]. -message CreateDocumentRequest { - // Required. The parent resource. For example: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`. - string collection_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // The client-assigned document ID to use for this document. - // - // Optional. If not specified, an ID will be assigned by the service. - string document_id = 3; - - // Required. The document to create. `name` must not be set. - Document document = 4 [(google.api.field_behavior) = REQUIRED]; - - // The fields to return. If not set, returns all fields. - // - // If the document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 5; -} - -// The request for [Firestore.UpdateDocument][google.firestore.v1.Firestore.UpdateDocument]. -message UpdateDocumentRequest { - // Required. The updated document. - // Creates the document if it does not already exist. - Document document = 1 [(google.api.field_behavior) = REQUIRED]; - - // The fields to update. - // None of the field paths in the mask may contain a reserved name. - // - // If the document exists on the server and has fields not referenced in the - // mask, they are left unchanged. - // Fields referenced in the mask, but not present in the input document, are - // deleted from the document on the server. - DocumentMask update_mask = 2; - - // The fields to return. If not set, returns all fields. - // - // If the document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 3; - - // An optional precondition on the document. - // The request will fail if this is set and not met by the target document. - Precondition current_document = 4; -} - -// The request for [Firestore.DeleteDocument][google.firestore.v1.Firestore.DeleteDocument]. -message DeleteDocumentRequest { - // Required. The resource name of the Document to delete. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // An optional precondition on the document. - // The request will fail if this is set and not met by the target document. - Precondition current_document = 2; -} - -// The request for [Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments]. -message BatchGetDocumentsRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The names of the documents to retrieve. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // The request will fail if any of the document is not a child resource of the - // given `database`. Duplicate names will be elided. - repeated string documents = 2; - - // The fields to return. If not set, returns all fields. - // - // If a document has a field that is not present in this mask, that field will - // not be returned in the response. - DocumentMask mask = 3; - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads documents in a transaction. - bytes transaction = 4; - - // Starts a new transaction and reads the documents. - // Defaults to a read-only transaction. - // The new transaction ID will be returned as the first response in the - // stream. - TransactionOptions new_transaction = 5; - - // Reads documents as they were at the given time. - // This may not be older than 270 seconds. - google.protobuf.Timestamp read_time = 7; - } -} - -// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments]. -message BatchGetDocumentsResponse { - // A single result. - // This can be empty if the server is just returning a transaction. - oneof result { - // A document that was requested. - Document found = 1; - - // A document name that was requested but does not exist. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string missing = 2; - } - - // The transaction that was started as part of this request. - // Will only be set in the first response, and only if - // [BatchGetDocumentsRequest.new_transaction][google.firestore.v1.BatchGetDocumentsRequest.new_transaction] was set in the request. - bytes transaction = 3; - - // The time at which the document was read. - // This may be monotically increasing, in this case the previous documents in - // the result stream are guaranteed not to have changed between their - // read_time and this one. - google.protobuf.Timestamp read_time = 4; -} - -// The request for [Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction]. -message BeginTransactionRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The options for the transaction. - // Defaults to a read-write transaction. - TransactionOptions options = 2; -} - -// The response for [Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction]. -message BeginTransactionResponse { - // The transaction that was started. - bytes transaction = 1; -} - -// The request for [Firestore.Commit][google.firestore.v1.Firestore.Commit]. -message CommitRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The writes to apply. - // - // Always executed atomically and in order. - repeated Write writes = 2; - - // If set, applies all writes in this transaction, and commits it. - bytes transaction = 3; -} - -// The response for [Firestore.Commit][google.firestore.v1.Firestore.Commit]. -message CommitResponse { - // The result of applying the writes. - // - // This i-th write result corresponds to the i-th write in the - // request. - repeated WriteResult write_results = 1; - - // The time at which the commit occurred. Any read with an equal or greater - // `read_time` is guaranteed to see the effects of the commit. - google.protobuf.Timestamp commit_time = 2; -} - -// The request for [Firestore.Rollback][google.firestore.v1.Firestore.Rollback]. -message RollbackRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The transaction to roll back. - bytes transaction = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery]. -message RunQueryRequest { - // Required. The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The query to run. - oneof query_type { - // A structured query. - StructuredQuery structured_query = 2; - } - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads documents in a transaction. - bytes transaction = 5; - - // Starts a new transaction and reads the documents. - // Defaults to a read-only transaction. - // The new transaction ID will be returned as the first response in the - // stream. - TransactionOptions new_transaction = 6; - - // Reads documents as they were at the given time. - // This may not be older than 270 seconds. - google.protobuf.Timestamp read_time = 7; - } -} - -// The response for [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery]. -message RunQueryResponse { - // The transaction that was started as part of this request. - // Can only be set in the first response, and only if - // [RunQueryRequest.new_transaction][google.firestore.v1.RunQueryRequest.new_transaction] was set in the request. - // If set, no other fields will be set in this response. - bytes transaction = 2; - - // A query result. - // Not set when reporting partial progress. - Document document = 1; - - // The time at which the document was read. This may be monotonically - // increasing; in this case, the previous documents in the result stream are - // guaranteed not to have changed between their `read_time` and this one. - // - // If the query returns no results, a response with `read_time` and no - // `document` will be sent, and this represents the time at which the query - // was run. - google.protobuf.Timestamp read_time = 3; - - // The number of results that have been skipped due to an offset between - // the last response and the current response. - int32 skipped_results = 4; -} - -// The request for [Firestore.RunAggregationQuery][google.firestore.v1.Firestore.RunAggregationQuery]. -message RunAggregationQueryRequest { - // Required. The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The query to run. - oneof query_type { - // An aggregation query. - StructuredAggregationQuery structured_aggregation_query = 2; - } - - // The consistency mode for the query, defaults to strong consistency. - oneof consistency_selector { - // Run the aggregation within an already active transaction. - // - // The value here is the opaque transaction ID to execute the query in. - bytes transaction = 4; - - // Starts a new transaction as part of the query, defaulting to read-only. - // - // The new transaction ID will be returned as the first response in the - // stream. - TransactionOptions new_transaction = 5; - - // Executes the query at the given timestamp. - // - // Requires: - // - // * Cannot be more than 270 seconds in the past. - google.protobuf.Timestamp read_time = 6; - } -} - -// The response for [Firestore.RunAggregationQuery][google.firestore.v1.Firestore.RunAggregationQuery]. -message RunAggregationQueryResponse { - // A single aggregation result. - // - // Not present when reporting partial progress or when the query produced - // zero results. - AggregationResult result = 1; - - // The transaction that was started as part of this request. - // - // Only present on the first response when the request requested to start - // a new transaction. - bytes transaction = 2; - - // The time at which the aggregate value is valid for. - google.protobuf.Timestamp read_time = 3; -} - -// The request for [Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery]. -message PartitionQueryRequest { - // Required. The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents`. - // Document resource names are not supported; only database resource names - // can be specified. - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The query to partition. - oneof query_type { - // A structured query. - // Query must specify collection with all descendants and be ordered by name - // ascending. Other filters, order bys, limits, offsets, and start/end - // cursors are not supported. - StructuredQuery structured_query = 2; - } - - // The desired maximum number of partition points. - // The partitions may be returned across multiple pages of results. - // The number must be positive. The actual number of partitions - // returned may be fewer. - // - // For example, this may be set to one fewer than the number of parallel - // queries to be run, or in running a data pipeline job, one fewer than the - // number of workers or compute instances available. - int64 partition_count = 3; - - // The `next_page_token` value returned from a previous call to - // PartitionQuery that may be used to get an additional set of results. - // There are no ordering guarantees between sets of results. Thus, using - // multiple sets of results will require merging the different result sets. - // - // For example, two subsequent calls using a page_token may return: - // - // * cursor B, cursor M, cursor Q - // * cursor A, cursor U, cursor W - // - // To obtain a complete result set ordered with respect to the results of the - // query supplied to PartitionQuery, the results sets should be merged: - // cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W - string page_token = 4; - - // The maximum number of partitions to return in this call, subject to - // `partition_count`. - // - // For example, if `partition_count` = 10 and `page_size` = 8, the first call - // to PartitionQuery will return up to 8 partitions and a `next_page_token` - // if more results exist. A second call to PartitionQuery will return up to - // 2 partitions, to complete the total of 10 specified in `partition_count`. - int32 page_size = 5; -} - -// The response for [Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery]. -message PartitionQueryResponse { - // Partition results. - // Each partition is a split point that can be used by RunQuery as a starting - // or end point for the query results. The RunQuery requests must be made with - // the same query supplied to this PartitionQuery request. The partition - // cursors will be ordered according to same ordering as the results of the - // query supplied to PartitionQuery. - // - // For example, if a PartitionQuery request returns partition cursors A and B, - // running the following three queries will return the entire result set of - // the original query: - // - // * query, end_at A - // * query, start_at A, end_at B - // * query, start_at B - // - // An empty result may indicate that the query has too few results to be - // partitioned. - repeated Cursor partitions = 1; - - // A page token that may be used to request an additional set of results, up - // to the number specified by `partition_count` in the PartitionQuery request. - // If blank, there are no more results. - string next_page_token = 2; -} - -// The request for [Firestore.Write][google.firestore.v1.Firestore.Write]. -// -// The first request creates a stream, or resumes an existing one from a token. -// -// When creating a new stream, the server replies with a response containing -// only an ID and a token, to use in the next request. -// -// When resuming a stream, the server first streams any responses later than the -// given token, then a response containing only an up-to-date token, to use in -// the next request. -message WriteRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - // This is only required in the first message. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The ID of the write stream to resume. - // This may only be set in the first message. When left empty, a new write - // stream will be created. - string stream_id = 2; - - // The writes to apply. - // - // Always executed atomically and in order. - // This must be empty on the first request. - // This may be empty on the last request. - // This must not be empty on all other requests. - repeated Write writes = 3; - - // A stream token that was previously sent by the server. - // - // The client should set this field to the token from the most recent - // [WriteResponse][google.firestore.v1.WriteResponse] it has received. This acknowledges that the client has - // received responses up to this token. After sending this token, earlier - // tokens may not be used anymore. - // - // The server may close the stream if there are too many unacknowledged - // responses. - // - // Leave this field unset when creating a new stream. To resume a stream at - // a specific point, set this field and the `stream_id` field. - // - // Leave this field unset when creating a new stream. - bytes stream_token = 4; - - // Labels associated with this write request. - map labels = 5; -} - -// The response for [Firestore.Write][google.firestore.v1.Firestore.Write]. -message WriteResponse { - // The ID of the stream. - // Only set on the first message, when a new stream was created. - string stream_id = 1; - - // A token that represents the position of this response in the stream. - // This can be used by a client to resume the stream at this point. - // - // This field is always set. - bytes stream_token = 2; - - // The result of applying the writes. - // - // This i-th write result corresponds to the i-th write in the - // request. - repeated WriteResult write_results = 3; - - // The time at which the commit occurred. Any read with an equal or greater - // `read_time` is guaranteed to see the effects of the write. - google.protobuf.Timestamp commit_time = 4; -} - -// A request for [Firestore.Listen][google.firestore.v1.Firestore.Listen] -message ListenRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The supported target changes. - oneof target_change { - // A target to add to this stream. - Target add_target = 2; - - // The ID of a target to remove from this stream. - int32 remove_target = 3; - } - - // Labels associated with this target change. - map labels = 4; -} - -// The response for [Firestore.Listen][google.firestore.v1.Firestore.Listen]. -message ListenResponse { - // The supported responses. - oneof response_type { - // Targets have changed. - TargetChange target_change = 2; - - // A [Document][google.firestore.v1.Document] has changed. - DocumentChange document_change = 3; - - // A [Document][google.firestore.v1.Document] has been deleted. - DocumentDelete document_delete = 4; - - // A [Document][google.firestore.v1.Document] has been removed from a target (because it is no longer - // relevant to that target). - DocumentRemove document_remove = 6; - - // A filter to apply to the set of documents previously returned for the - // given target. - // - // Returned when documents may have been removed from the given target, but - // the exact documents are unknown. - ExistenceFilter filter = 5; - } -} - -// A specification of a set of documents to listen to. -message Target { - // A target specified by a set of documents names. - message DocumentsTarget { - // The names of the documents to retrieve. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // The request will fail if any of the document is not a child resource of - // the given `database`. Duplicate names will be elided. - repeated string documents = 2; - } - - // A target specified by a query. - message QueryTarget { - // The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1; - - // The query to run. - oneof query_type { - // A structured query. - StructuredQuery structured_query = 2; - } - } - - // The type of target to listen to. - oneof target_type { - // A target specified by a query. - QueryTarget query = 2; - - // A target specified by a set of document names. - DocumentsTarget documents = 3; - } - - // When to start listening. - // - // If not specified, all matching Documents are returned before any - // subsequent changes. - oneof resume_type { - // A resume token from a prior [TargetChange][google.firestore.v1.TargetChange] for an identical target. - // - // Using a resume token with a different target is unsupported and may fail. - bytes resume_token = 4; - - // Start listening after a specific `read_time`. - // - // The client must know the state of matching documents at this time. - google.protobuf.Timestamp read_time = 11; - } - - // The target ID that identifies the target on the stream. Must be a positive - // number and non-zero. - int32 target_id = 5; - - // If the target should be removed once it is current and consistent. - bool once = 6; -} - -// Targets being watched have changed. -message TargetChange { - // The type of change. - enum TargetChangeType { - // No change has occurred. Used only to send an updated `resume_token`. - NO_CHANGE = 0; - - // The targets have been added. - ADD = 1; - - // The targets have been removed. - REMOVE = 2; - - // The targets reflect all changes committed before the targets were added - // to the stream. - // - // This will be sent after or with a `read_time` that is greater than or - // equal to the time at which the targets were added. - // - // Listeners can wait for this change if read-after-write semantics - // are desired. - CURRENT = 3; - - // The targets have been reset, and a new initial state for the targets - // will be returned in subsequent changes. - // - // After the initial state is complete, `CURRENT` will be returned even - // if the target was previously indicated to be `CURRENT`. - RESET = 4; - } - - // The type of change that occurred. - TargetChangeType target_change_type = 1; - - // The target IDs of targets that have changed. - // - // If empty, the change applies to all targets. - // - // The order of the target IDs is not defined. - repeated int32 target_ids = 2; - - // The error that resulted in this change, if applicable. - google.rpc.Status cause = 3; - - // A token that can be used to resume the stream for the given `target_ids`, - // or all targets if `target_ids` is empty. - // - // Not set on every target change. - bytes resume_token = 4; - - // The consistent `read_time` for the given `target_ids` (omitted when the - // target_ids are not at a consistent snapshot). - // - // The stream is guaranteed to send a `read_time` with `target_ids` empty - // whenever the entire stream reaches a new consistent snapshot. ADD, - // CURRENT, and RESET messages are guaranteed to (eventually) result in a - // new consistent snapshot (while NO_CHANGE and REMOVE messages are not). - // - // For a given stream, `read_time` is guaranteed to be monotonically - // increasing. - google.protobuf.Timestamp read_time = 6; -} - -// The request for [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds]. -message ListCollectionIdsRequest { - // Required. The parent document. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The maximum number of results to return. - int32 page_size = 2; - - // A page token. Must be a value from - // [ListCollectionIdsResponse][google.firestore.v1.ListCollectionIdsResponse]. - string page_token = 3; -} - -// The response from [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds]. -message ListCollectionIdsResponse { - // The collection ids. - repeated string collection_ids = 1; - - // A page token that may be used to continue the list. - string next_page_token = 2; -} - -// The request for [Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite]. -message BatchWriteRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The writes to apply. - // - // Method does not apply writes atomically and does not guarantee ordering. - // Each write succeeds or fails independently. You cannot write to the same - // document more than once per request. - repeated Write writes = 2; - - // Labels associated with this batch write. - map labels = 3; -} - -// The response from [Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite]. -message BatchWriteResponse { - // The result of applying the writes. - // - // This i-th write result corresponds to the i-th write in the - // request. - repeated WriteResult write_results = 1; - - // The status of applying the writes. - // - // This i-th write status corresponds to the i-th write in the - // request. - repeated google.rpc.Status status = 2; -} diff --git a/src/proto/proto/google/firestore/v1/query.proto b/src/proto/proto/google/firestore/v1/query.proto deleted file mode 100644 index e3d95534..00000000 --- a/src/proto/proto/google/firestore/v1/query.proto +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/firestore/v1/document.proto"; -import "google/protobuf/wrappers.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "QueryProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; -option ruby_package = "Google::Cloud::Firestore::V1"; - -// A Firestore query. -message StructuredQuery { - // A selection of a collection, such as `messages as m1`. - message CollectionSelector { - // The collection ID. - // When set, selects only collections with this ID. - string collection_id = 2; - - // When false, selects only collections that are immediate children of - // the `parent` specified in the containing `RunQueryRequest`. - // When true, selects all descendant collections. - bool all_descendants = 3; - } - - // A filter. - message Filter { - // The type of filter. - oneof filter_type { - // A composite filter. - CompositeFilter composite_filter = 1; - - // A filter on a document field. - FieldFilter field_filter = 2; - - // A filter that takes exactly one argument. - UnaryFilter unary_filter = 3; - } - } - - // A filter that merges multiple other filters using the given operator. - message CompositeFilter { - // A composite filter operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // Documents are required to satisfy all of the combined filters. - AND = 1; - - // Documents are required to satisfy at least one of the combined filters. - OR = 2; - } - - // The operator for combining multiple filters. - Operator op = 1; - - // The list of filters to combine. - // Must contain at least one filter. - repeated Filter filters = 2; - } - - // A filter on a specific field. - message FieldFilter { - // A field filter operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // The given `field` is less than the given `value`. - // - // Requires: - // - // * That `field` come first in `order_by`. - LESS_THAN = 1; - - // The given `field` is less than or equal to the given `value`. - // - // Requires: - // - // * That `field` come first in `order_by`. - LESS_THAN_OR_EQUAL = 2; - - // The given `field` is greater than the given `value`. - // - // Requires: - // - // * That `field` come first in `order_by`. - GREATER_THAN = 3; - - // The given `field` is greater than or equal to the given `value`. - // - // Requires: - // - // * That `field` come first in `order_by`. - GREATER_THAN_OR_EQUAL = 4; - - // The given `field` is equal to the given `value`. - EQUAL = 5; - - // The given `field` is not equal to the given `value`. - // - // Requires: - // - // * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`. - // * That `field` comes first in the `order_by`. - NOT_EQUAL = 6; - - // The given `field` is an array that contains the given `value`. - ARRAY_CONTAINS = 7; - - // The given `field` is equal to at least one value in the given array. - // - // Requires: - // - // * That `value` is a non-empty `ArrayValue` with at most 10 values. - // * No other `IN` or `ARRAY_CONTAINS_ANY` or `NOT_IN`. - IN = 8; - - // The given `field` is an array that contains any of the values in the - // given array. - // - // Requires: - // - // * That `value` is a non-empty `ArrayValue` with at most 10 values. - // * No other `IN` or `ARRAY_CONTAINS_ANY` or `NOT_IN`. - ARRAY_CONTAINS_ANY = 9; - - // The value of the `field` is not in the given array. - // - // Requires: - // - // * That `value` is a non-empty `ArrayValue` with at most 10 values. - // * No other `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`, `NOT_EQUAL`, - // `IS_NOT_NULL`, or `IS_NOT_NAN`. - // * That `field` comes first in the `order_by`. - NOT_IN = 10; - } - - // The field to filter by. - FieldReference field = 1; - - // The operator to filter by. - Operator op = 2; - - // The value to compare to. - Value value = 3; - } - - // A filter with a single operand. - message UnaryFilter { - // A unary operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // The given `field` is equal to `NaN`. - IS_NAN = 2; - - // The given `field` is equal to `NULL`. - IS_NULL = 3; - - // The given `field` is not equal to `NaN`. - // - // Requires: - // - // * No other `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`. - // * That `field` comes first in the `order_by`. - IS_NOT_NAN = 4; - - // The given `field` is not equal to `NULL`. - // - // Requires: - // - // * A single `NOT_EQUAL`, `NOT_IN`, `IS_NOT_NULL`, or `IS_NOT_NAN`. - // * That `field` comes first in the `order_by`. - IS_NOT_NULL = 5; - } - - // The unary operator to apply. - Operator op = 1; - - // The argument to the filter. - oneof operand_type { - // The field to which to apply the operator. - FieldReference field = 2; - } - } - - // An order on a field. - message Order { - // The field to order by. - FieldReference field = 1; - - // The direction to order by. Defaults to `ASCENDING`. - Direction direction = 2; - } - - // A reference to a field, such as `max(messages.time) as max_time`. - message FieldReference { - string field_path = 2; - } - - // The projection of document's fields to return. - message Projection { - // The fields to return. - // - // If empty, all fields are returned. To only return the name - // of the document, use `['__name__']`. - repeated FieldReference fields = 2; - } - - // A sort direction. - enum Direction { - // Unspecified. - DIRECTION_UNSPECIFIED = 0; - - // Ascending. - ASCENDING = 1; - - // Descending. - DESCENDING = 2; - } - - // The projection to return. - Projection select = 1; - - // The collections to query. - repeated CollectionSelector from = 2; - - // The filter to apply. - Filter where = 3; - - // The order to apply to the query results. - // - // Firestore guarantees a stable ordering through the following rules: - // - // * Any field required to appear in `order_by`, that is not already - // specified in `order_by`, is appended to the order in field name order - // by default. - // * If an order on `__name__` is not specified, it is appended by default. - // - // Fields are appended with the same sort direction as the last order - // specified, or 'ASCENDING' if no order was specified. For example: - // - // * `SELECT * FROM Foo ORDER BY A` becomes - // `SELECT * FROM Foo ORDER BY A, __name__` - // * `SELECT * FROM Foo ORDER BY A DESC` becomes - // `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC` - // * `SELECT * FROM Foo WHERE A > 1` becomes - // `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__` - repeated Order order_by = 4; - - // A starting point for the query results. - Cursor start_at = 7; - - // A end point for the query results. - Cursor end_at = 8; - - // The number of results to skip. - // - // Applies before limit, but after all other constraints. Must be >= 0 if - // specified. - int32 offset = 6; - - // The maximum number of results to return. - // - // Applies after all other constraints. - // Must be >= 0 if specified. - google.protobuf.Int32Value limit = 5; -} - -message StructuredAggregationQuery { - // Defines a aggregation that produces a single result. - message Aggregation { - // Count of documents that match the query. - // - // The `COUNT(*)` aggregation function operates on the entire document - // so it does not require a field reference. - message Count { - // Optional. Optional constraint on the maximum number of documents to count. - // - // This provides a way to set an upper bound on the number of documents - // to scan, limiting latency and cost. - // - // High-Level Example: - // - // ``` - // SELECT COUNT_UP_TO(1000) FROM ( SELECT * FROM k ); - // ``` - // - // Requires: - // - // * Must be greater than zero when present. - google.protobuf.Int64Value up_to = 1; - } - - // The type of aggregation to perform, required. - oneof operator { - // Count aggregator. - Count count = 1; - } - - // Required. The name of the field to store the result of the aggregation into. - // - // Requires: - // - // * Must be present. - // * Must be unique across all aggregation aliases. - // * Conform to existing [document field name][google.firestore.v1.Document.fields] limitations. - string alias = 7; - } - - // The base query to aggregate over. - oneof query_type { - // Nested structured query. - StructuredQuery structured_query = 1; - } - - // Optional. Series of aggregations to apply on top of the `structured_query`. - repeated Aggregation aggregations = 3; -} - -// A position in a query result set. -message Cursor { - // The values that represent a position, in the order they appear in - // the order by clause of a query. - // - // Can contain fewer values than specified in the order by clause. - repeated Value values = 1; - - // If the position is just before or just after the given values, relative - // to the sort order defined by the query. - bool before = 2; -} diff --git a/src/proto/proto/google/firestore/v1/write.proto b/src/proto/proto/google/firestore/v1/write.proto deleted file mode 100644 index a9ac9832..00000000 --- a/src/proto/proto/google/firestore/v1/write.proto +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/firestore/v1/common.proto"; -import "google/firestore/v1/document.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "WriteProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; -option ruby_package = "Google::Cloud::Firestore::V1"; - -// A write on a document. -message Write { - // The operation to execute. - oneof operation { - // A document to write. - Document update = 1; - - // A document name to delete. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string delete = 2; - - // The name of a document on which to verify the `current_document` - // precondition. - // This only requires read access to the document. - string verify = 5; - - // Applies a transformation to a document. - DocumentTransform transform = 6; - } - - // The fields to update in this write. - // - // This field can be set only when the operation is `update`. - // If the mask is not set for an `update` and the document exists, any - // existing data will be overwritten. - // If the mask is set and the document on the server has fields not covered by - // the mask, they are left unchanged. - // Fields referenced in the mask, but not present in the input document, are - // deleted from the document on the server. - // The field paths in this mask must not contain a reserved field name. - DocumentMask update_mask = 3; - - // The transforms to perform after update. - // - // This field can be set only when the operation is `update`. If present, this - // write is equivalent to performing `update` and `transform` to the same - // document atomically and in order. - repeated DocumentTransform.FieldTransform update_transforms = 7; - - // An optional precondition on the document. - // - // The write will fail if this is set and not met by the target document. - Precondition current_document = 4; -} - -// A transformation of a document. -message DocumentTransform { - // A transformation of a field of the document. - message FieldTransform { - // A value that is calculated by the server. - enum ServerValue { - // Unspecified. This value must not be used. - SERVER_VALUE_UNSPECIFIED = 0; - - // The time at which the server processed the request, with millisecond - // precision. If used on multiple fields (same or different documents) in - // a transaction, all the fields will get the same server timestamp. - REQUEST_TIME = 1; - } - - // The path of the field. See [Document.fields][google.firestore.v1.Document.fields] for the field path syntax - // reference. - string field_path = 1; - - // The transformation to apply on the field. - oneof transform_type { - // Sets the field to the given server value. - ServerValue set_to_server_value = 2; - - // Adds the given value to the field's current value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the given value. - // If either of the given value or the current field value are doubles, - // both values will be interpreted as doubles. Double arithmetic and - // representation of double values follow IEEE 754 semantics. - // If there is positive/negative integer overflow, the field is resolved - // to the largest magnitude positive/negative integer. - Value increment = 3; - - // Sets the field to the maximum of its current value and the given value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the given value. - // If a maximum operation is applied where the field and the input value - // are of mixed types (that is - one is an integer and one is a double) - // the field takes on the type of the larger operand. If the operands are - // equivalent (e.g. 3 and 3.0), the field does not change. - // 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and - // zero input value is always the stored value. - // The maximum of any numeric value x and NaN is NaN. - Value maximum = 4; - - // Sets the field to the minimum of its current value and the given value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the input value. - // If a minimum operation is applied where the field and the input value - // are of mixed types (that is - one is an integer and one is a double) - // the field takes on the type of the smaller operand. If the operands are - // equivalent (e.g. 3 and 3.0), the field does not change. - // 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and - // zero input value is always the stored value. - // The minimum of any numeric value x and NaN is NaN. - Value minimum = 5; - - // Append the given elements in order if they are not already present in - // the current field value. - // If the field is not an array, or if the field does not yet exist, it is - // first set to the empty array. - // - // Equivalent numbers of different types (e.g. 3L and 3.0) are - // considered equal when checking if a value is missing. - // NaN is equal to NaN, and Null is equal to Null. - // If the input contains multiple equivalent values, only the first will - // be considered. - // - // The corresponding transform_result will be the null value. - ArrayValue append_missing_elements = 6; - - // Remove all of the given elements from the array in the field. - // If the field is not an array, or if the field does not yet exist, it is - // set to the empty array. - // - // Equivalent numbers of the different types (e.g. 3L and 3.0) are - // considered equal when deciding whether an element should be removed. - // NaN is equal to NaN, and Null is equal to Null. - // This will remove all equivalent values if there are duplicates. - // - // The corresponding transform_result will be the null value. - ArrayValue remove_all_from_array = 7; - } - } - - // The name of the document to transform. - string document = 1; - - // The list of transformations to apply to the fields of the document, in - // order. - // This must not be empty. - repeated FieldTransform field_transforms = 2; -} - -// The result of applying a write. -message WriteResult { - // The last update time of the document after applying the write. Not set - // after a `delete`. - // - // If the write did not actually change the document, this will be the - // previous update_time. - google.protobuf.Timestamp update_time = 1; - - // The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1.DocumentTransform.FieldTransform], in the - // same order. - repeated Value transform_results = 2; -} - -// A [Document][google.firestore.v1.Document] has changed. -// -// May be the result of multiple [writes][google.firestore.v1.Write], including deletes, that -// ultimately resulted in a new value for the [Document][google.firestore.v1.Document]. -// -// Multiple [DocumentChange][google.firestore.v1.DocumentChange] messages may be returned for the same logical -// change, if multiple targets are affected. -message DocumentChange { - // The new state of the [Document][google.firestore.v1.Document]. - // - // If `mask` is set, contains only fields that were updated or added. - Document document = 1; - - // A set of target IDs of targets that match this document. - repeated int32 target_ids = 5; - - // A set of target IDs for targets that no longer match this document. - repeated int32 removed_target_ids = 6; -} - -// A [Document][google.firestore.v1.Document] has been deleted. -// -// May be the result of multiple [writes][google.firestore.v1.Write], including updates, the -// last of which deleted the [Document][google.firestore.v1.Document]. -// -// Multiple [DocumentDelete][google.firestore.v1.DocumentDelete] messages may be returned for the same logical -// delete, if multiple targets are affected. -message DocumentDelete { - // The resource name of the [Document][google.firestore.v1.Document] that was deleted. - string document = 1; - - // A set of target IDs for targets that previously matched this entity. - repeated int32 removed_target_ids = 6; - - // The read timestamp at which the delete was observed. - // - // Greater or equal to the `commit_time` of the delete. - google.protobuf.Timestamp read_time = 4; -} - -// A [Document][google.firestore.v1.Document] has been removed from the view of the targets. -// -// Sent if the document is no longer relevant to a target and is out of view. -// Can be sent instead of a DocumentDelete or a DocumentChange if the server -// can not send the new value of the document. -// -// Multiple [DocumentRemove][google.firestore.v1.DocumentRemove] messages may be returned for the same logical -// write or delete, if multiple targets are affected. -message DocumentRemove { - // The resource name of the [Document][google.firestore.v1.Document] that has gone out of view. - string document = 1; - - // A set of target IDs for targets that previously matched this document. - repeated int32 removed_target_ids = 2; - - // The read timestamp at which the remove was observed. - // - // Greater or equal to the `commit_time` of the change/delete/remove. - google.protobuf.Timestamp read_time = 4; -} - -// A digest of all the documents that match a given target. -message ExistenceFilter { - // The target ID to which this filter applies. - int32 target_id = 1; - - // The total count of documents that match [target_id][google.firestore.v1.ExistenceFilter.target_id]. - // - // If different from the count of documents in the client that match, the - // client must manually determine which documents no longer match the target. - int32 count = 2; -} diff --git a/src/proto/proto/google/protobuf/any.proto b/src/proto/proto/google/protobuf/any.proto deleted file mode 100644 index c9be8541..00000000 --- a/src/proto/proto/google/protobuf/any.proto +++ /dev/null @@ -1,155 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/src/proto/proto/google/protobuf/descriptor.proto b/src/proto/proto/google/protobuf/descriptor.proto deleted file mode 100644 index d5d794f5..00000000 --- a/src/proto/proto/google/protobuf/descriptor.proto +++ /dev/null @@ -1,882 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - } - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - } - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default = false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default = false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default = false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default = false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default = SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default = false]; - optional bool java_generic_services = 17 [default = false]; - optional bool py_generic_services = 18 [default = false]; - optional bool php_generic_services = 42 [default = false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default = false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default = false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - optional string php_metadata_namespace = 44; - - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - optional string ruby_package = 45; - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default = false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default = false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default = false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default = false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default = false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default = false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default = false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default = false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default = false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default = false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = 34 - [default = IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed = true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed = true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed = true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/src/proto/proto/google/protobuf/empty.proto b/src/proto/proto/google/protobuf/empty.proto deleted file mode 100644 index 03cacd23..00000000 --- a/src/proto/proto/google/protobuf/empty.proto +++ /dev/null @@ -1,52 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/empty"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "EmptyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -message Empty {} diff --git a/src/proto/proto/google/protobuf/struct.proto b/src/proto/proto/google/protobuf/struct.proto deleted file mode 100644 index 7d7808e7..00000000 --- a/src/proto/proto/google/protobuf/struct.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -message Struct { - // Unordered map of dynamically typed values. - map fields = 1; -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -message Value { - // The kind of value. - oneof kind { - // Represents a null value. - NullValue null_value = 1; - // Represents a double value. - double number_value = 2; - // Represents a string value. - string string_value = 3; - // Represents a boolean value. - bool bool_value = 4; - // Represents a structured value. - Struct struct_value = 5; - // Represents a repeated `Value`. - ListValue list_value = 6; - } -} - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -enum NullValue { - // Null value. - NULL_VALUE = 0; -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -message ListValue { - // Repeated field of dynamically typed values. - repeated Value values = 1; -} diff --git a/src/proto/proto/google/protobuf/timestamp.proto b/src/proto/proto/google/protobuf/timestamp.proto deleted file mode 100644 index 2b9e26a9..00000000 --- a/src/proto/proto/google/protobuf/timestamp.proto +++ /dev/null @@ -1,137 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/src/proto/proto/google/protobuf/wrappers.proto b/src/proto/proto/google/protobuf/wrappers.proto deleted file mode 100644 index 9ee41e38..00000000 --- a/src/proto/proto/google/protobuf/wrappers.proto +++ /dev/null @@ -1,123 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. -// -// These wrappers have no meaningful use within repeated fields as they lack -// the ability to detect presence on individual elements. -// These wrappers have no meaningful use within a map or a oneof since -// individual entries of a map or fields of a oneof can already detect presence. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/wrappers"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "WrappersProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -message DoubleValue { - // The double value. - double value = 1; -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -message FloatValue { - // The float value. - float value = 1; -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -message Int64Value { - // The int64 value. - int64 value = 1; -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -message UInt64Value { - // The uint64 value. - uint64 value = 1; -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -message Int32Value { - // The int32 value. - int32 value = 1; -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -message UInt32Value { - // The uint32 value. - uint32 value = 1; -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -message BoolValue { - // The bool value. - bool value = 1; -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -message StringValue { - // The string value. - string value = 1; -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -message BytesValue { - // The bytes value. - bytes value = 1; -} diff --git a/src/proto/proto/google/rpc/status.proto b/src/proto/proto/google/rpc/status.proto deleted file mode 100644 index 3b1f7a93..00000000 --- a/src/proto/proto/google/rpc/status.proto +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/any.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; -option java_multiple_files = true; -option java_outer_classname = "StatusProto"; -option java_package = "com.google.rpc"; -option objc_class_prefix = "RPC"; - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). Each `Status` message contains -// three pieces of data: error code, error message, and error details. -// -// You can find out more about this error model and how to work with it in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). -message Status { - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. - int32 code = 1; - - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. - string message = 2; - - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - repeated google.protobuf.Any details = 3; -} diff --git a/src/proto/proto/google/type/latlng.proto b/src/proto/proto/google/type/latlng.proto deleted file mode 100644 index 9231456e..00000000 --- a/src/proto/proto/google/type/latlng.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.type; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/type/latlng;latlng"; -option java_multiple_files = true; -option java_outer_classname = "LatLngProto"; -option java_package = "com.google.type"; -option objc_class_prefix = "GTP"; - -// An object that represents a latitude/longitude pair. This is expressed as a -// pair of doubles to represent degrees latitude and degrees longitude. Unless -// specified otherwise, this must conform to the -// WGS84 -// standard. Values must be within normalized ranges. -message LatLng { - // The latitude in degrees. It must be in the range [-90.0, +90.0]. - double latitude = 1; - - // The longitude in degrees. It must be in the range [-180.0, +180.0]. - double longitude = 2; -} diff --git a/src/proto/proto/protos.json b/src/proto/proto/protos.json deleted file mode 100644 index 1586bfb8..00000000 --- a/src/proto/proto/protos.json +++ /dev/null @@ -1,2825 +0,0 @@ -{ - "nested": { - "google": { - "nested": { - "protobuf": { - "options": { - "csharp_namespace": "Google.Protobuf.WellKnownTypes", - "go_package": "github.com/golang/protobuf/ptypes/wrappers", - "java_package": "com.google.protobuf", - "java_outer_classname": "WrappersProto", - "java_multiple_files": true, - "objc_class_prefix": "GPB", - "cc_enable_arenas": true, - "optimize_for": "SPEED" - }, - "nested": { - "Timestamp": { - "fields": { - "seconds": { - "type": "int64", - "id": 1 - }, - "nanos": { - "type": "int32", - "id": 2 - } - } - }, - "FileDescriptorSet": { - "fields": { - "file": { - "rule": "repeated", - "type": "FileDescriptorProto", - "id": 1 - } - } - }, - "FileDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "package": { - "type": "string", - "id": 2 - }, - "dependency": { - "rule": "repeated", - "type": "string", - "id": 3 - }, - "publicDependency": { - "rule": "repeated", - "type": "int32", - "id": 10, - "options": { - "packed": false - } - }, - "weakDependency": { - "rule": "repeated", - "type": "int32", - "id": 11, - "options": { - "packed": false - } - }, - "messageType": { - "rule": "repeated", - "type": "DescriptorProto", - "id": 4 - }, - "enumType": { - "rule": "repeated", - "type": "EnumDescriptorProto", - "id": 5 - }, - "service": { - "rule": "repeated", - "type": "ServiceDescriptorProto", - "id": 6 - }, - "extension": { - "rule": "repeated", - "type": "FieldDescriptorProto", - "id": 7 - }, - "options": { - "type": "FileOptions", - "id": 8 - }, - "sourceCodeInfo": { - "type": "SourceCodeInfo", - "id": 9 - }, - "syntax": { - "type": "string", - "id": 12 - } - } - }, - "DescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "field": { - "rule": "repeated", - "type": "FieldDescriptorProto", - "id": 2 - }, - "extension": { - "rule": "repeated", - "type": "FieldDescriptorProto", - "id": 6 - }, - "nestedType": { - "rule": "repeated", - "type": "DescriptorProto", - "id": 3 - }, - "enumType": { - "rule": "repeated", - "type": "EnumDescriptorProto", - "id": 4 - }, - "extensionRange": { - "rule": "repeated", - "type": "ExtensionRange", - "id": 5 - }, - "oneofDecl": { - "rule": "repeated", - "type": "OneofDescriptorProto", - "id": 8 - }, - "options": { - "type": "MessageOptions", - "id": 7 - }, - "reservedRange": { - "rule": "repeated", - "type": "ReservedRange", - "id": 9 - }, - "reservedName": { - "rule": "repeated", - "type": "string", - "id": 10 - } - }, - "nested": { - "ExtensionRange": { - "fields": { - "start": { - "type": "int32", - "id": 1 - }, - "end": { - "type": "int32", - "id": 2 - } - } - }, - "ReservedRange": { - "fields": { - "start": { - "type": "int32", - "id": 1 - }, - "end": { - "type": "int32", - "id": 2 - } - } - } - } - }, - "FieldDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "number": { - "type": "int32", - "id": 3 - }, - "label": { - "type": "Label", - "id": 4 - }, - "type": { - "type": "Type", - "id": 5 - }, - "typeName": { - "type": "string", - "id": 6 - }, - "extendee": { - "type": "string", - "id": 2 - }, - "defaultValue": { - "type": "string", - "id": 7 - }, - "oneofIndex": { - "type": "int32", - "id": 9 - }, - "jsonName": { - "type": "string", - "id": 10 - }, - "options": { - "type": "FieldOptions", - "id": 8 - } - }, - "nested": { - "Type": { - "values": { - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18 - } - }, - "Label": { - "values": { - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3 - } - } - } - }, - "OneofDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "options": { - "type": "OneofOptions", - "id": 2 - } - } - }, - "EnumDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "value": { - "rule": "repeated", - "type": "EnumValueDescriptorProto", - "id": 2 - }, - "options": { - "type": "EnumOptions", - "id": 3 - } - } - }, - "EnumValueDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "number": { - "type": "int32", - "id": 2 - }, - "options": { - "type": "EnumValueOptions", - "id": 3 - } - } - }, - "ServiceDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "method": { - "rule": "repeated", - "type": "MethodDescriptorProto", - "id": 2 - }, - "options": { - "type": "ServiceOptions", - "id": 3 - } - } - }, - "MethodDescriptorProto": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "inputType": { - "type": "string", - "id": 2 - }, - "outputType": { - "type": "string", - "id": 3 - }, - "options": { - "type": "MethodOptions", - "id": 4 - }, - "clientStreaming": { - "type": "bool", - "id": 5 - }, - "serverStreaming": { - "type": "bool", - "id": 6 - } - } - }, - "FileOptions": { - "fields": { - "javaPackage": { - "type": "string", - "id": 1 - }, - "javaOuterClassname": { - "type": "string", - "id": 8 - }, - "javaMultipleFiles": { - "type": "bool", - "id": 10 - }, - "javaGenerateEqualsAndHash": { - "type": "bool", - "id": 20, - "options": { - "deprecated": true - } - }, - "javaStringCheckUtf8": { - "type": "bool", - "id": 27 - }, - "optimizeFor": { - "type": "OptimizeMode", - "id": 9, - "options": { - "default": "SPEED" - } - }, - "goPackage": { - "type": "string", - "id": 11 - }, - "ccGenericServices": { - "type": "bool", - "id": 16 - }, - "javaGenericServices": { - "type": "bool", - "id": 17 - }, - "pyGenericServices": { - "type": "bool", - "id": 18 - }, - "deprecated": { - "type": "bool", - "id": 23 - }, - "ccEnableArenas": { - "type": "bool", - "id": 31 - }, - "objcClassPrefix": { - "type": "string", - "id": 36 - }, - "csharpNamespace": { - "type": "string", - "id": 37 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ], - "reserved": [ - [ - 38, - 38 - ] - ], - "nested": { - "OptimizeMode": { - "values": { - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3 - } - } - } - }, - "MessageOptions": { - "fields": { - "messageSetWireFormat": { - "type": "bool", - "id": 1 - }, - "noStandardDescriptorAccessor": { - "type": "bool", - "id": 2 - }, - "deprecated": { - "type": "bool", - "id": 3 - }, - "mapEntry": { - "type": "bool", - "id": 7 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ], - "reserved": [ - [ - 8, - 8 - ] - ] - }, - "FieldOptions": { - "fields": { - "ctype": { - "type": "CType", - "id": 1, - "options": { - "default": "STRING" - } - }, - "packed": { - "type": "bool", - "id": 2 - }, - "jstype": { - "type": "JSType", - "id": 6, - "options": { - "default": "JS_NORMAL" - } - }, - "lazy": { - "type": "bool", - "id": 5 - }, - "deprecated": { - "type": "bool", - "id": 3 - }, - "weak": { - "type": "bool", - "id": 10 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ], - "reserved": [ - [ - 4, - 4 - ] - ], - "nested": { - "CType": { - "values": { - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2 - } - }, - "JSType": { - "values": { - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2 - } - } - } - }, - "OneofOptions": { - "fields": { - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ] - }, - "EnumOptions": { - "fields": { - "allowAlias": { - "type": "bool", - "id": 2 - }, - "deprecated": { - "type": "bool", - "id": 3 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ] - }, - "EnumValueOptions": { - "fields": { - "deprecated": { - "type": "bool", - "id": 1 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ] - }, - "ServiceOptions": { - "fields": { - "deprecated": { - "type": "bool", - "id": 33 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ] - }, - "MethodOptions": { - "fields": { - "deprecated": { - "type": "bool", - "id": 33 - }, - "uninterpretedOption": { - "rule": "repeated", - "type": "UninterpretedOption", - "id": 999 - } - }, - "extensions": [ - [ - 1000, - 536870911 - ] - ] - }, - "UninterpretedOption": { - "fields": { - "name": { - "rule": "repeated", - "type": "NamePart", - "id": 2 - }, - "identifierValue": { - "type": "string", - "id": 3 - }, - "positiveIntValue": { - "type": "uint64", - "id": 4 - }, - "negativeIntValue": { - "type": "int64", - "id": 5 - }, - "doubleValue": { - "type": "double", - "id": 6 - }, - "stringValue": { - "type": "bytes", - "id": 7 - }, - "aggregateValue": { - "type": "string", - "id": 8 - } - }, - "nested": { - "NamePart": { - "fields": { - "namePart": { - "rule": "required", - "type": "string", - "id": 1 - }, - "isExtension": { - "rule": "required", - "type": "bool", - "id": 2 - } - } - } - } - }, - "SourceCodeInfo": { - "fields": { - "location": { - "rule": "repeated", - "type": "Location", - "id": 1 - } - }, - "nested": { - "Location": { - "fields": { - "path": { - "rule": "repeated", - "type": "int32", - "id": 1 - }, - "span": { - "rule": "repeated", - "type": "int32", - "id": 2 - }, - "leadingComments": { - "type": "string", - "id": 3 - }, - "trailingComments": { - "type": "string", - "id": 4 - }, - "leadingDetachedComments": { - "rule": "repeated", - "type": "string", - "id": 6 - } - } - } - } - }, - "GeneratedCodeInfo": { - "fields": { - "annotation": { - "rule": "repeated", - "type": "Annotation", - "id": 1 - } - }, - "nested": { - "Annotation": { - "fields": { - "path": { - "rule": "repeated", - "type": "int32", - "id": 1 - }, - "sourceFile": { - "type": "string", - "id": 2 - }, - "begin": { - "type": "int32", - "id": 3 - }, - "end": { - "type": "int32", - "id": 4 - } - } - } - } - }, - "Struct": { - "fields": { - "fields": { - "keyType": "string", - "type": "Value", - "id": 1 - } - } - }, - "Value": { - "oneofs": { - "kind": { - "oneof": [ - "nullValue", - "numberValue", - "stringValue", - "boolValue", - "structValue", - "listValue" - ] - } - }, - "fields": { - "nullValue": { - "type": "NullValue", - "id": 1 - }, - "numberValue": { - "type": "double", - "id": 2 - }, - "stringValue": { - "type": "string", - "id": 3 - }, - "boolValue": { - "type": "bool", - "id": 4 - }, - "structValue": { - "type": "Struct", - "id": 5 - }, - "listValue": { - "type": "ListValue", - "id": 6 - } - } - }, - "NullValue": { - "values": { - "NULL_VALUE": 0 - } - }, - "ListValue": { - "fields": { - "values": { - "rule": "repeated", - "type": "Value", - "id": 1 - } - } - }, - "Empty": { - "fields": {} - }, - "DoubleValue": { - "fields": { - "value": { - "type": "double", - "id": 1 - } - } - }, - "FloatValue": { - "fields": { - "value": { - "type": "float", - "id": 1 - } - } - }, - "Int64Value": { - "fields": { - "value": { - "type": "int64", - "id": 1 - } - } - }, - "UInt64Value": { - "fields": { - "value": { - "type": "uint64", - "id": 1 - } - } - }, - "Int32Value": { - "fields": { - "value": { - "type": "int32", - "id": 1 - } - } - }, - "UInt32Value": { - "fields": { - "value": { - "type": "uint32", - "id": 1 - } - } - }, - "BoolValue": { - "fields": { - "value": { - "type": "bool", - "id": 1 - } - } - }, - "StringValue": { - "fields": { - "value": { - "type": "string", - "id": 1 - } - } - }, - "BytesValue": { - "fields": { - "value": { - "type": "bytes", - "id": 1 - } - } - }, - "Any": { - "fields": { - "typeUrl": { - "type": "string", - "id": 1 - }, - "value": { - "type": "bytes", - "id": 2 - } - } - } - } - }, - "firestore": { - "nested": { - "v1": { - "options": { - "csharp_namespace": "Google.Cloud.Firestore.V1", - "go_package": "google.golang.org/genproto/googleapis/firestore/v1;firestore", - "java_multiple_files": true, - "java_outer_classname": "WriteProto", - "java_package": "com.google.firestore.v1", - "objc_class_prefix": "GCFS", - "php_namespace": "Google\\Cloud\\Firestore\\V1", - "ruby_package": "Google::Cloud::Firestore::V1" - }, - "nested": { - "AggregationResult": { - "fields": { - "aggregateFields": { - "keyType": "string", - "type": "Value", - "id": 2 - } - } - }, - "DocumentMask": { - "fields": { - "fieldPaths": { - "rule": "repeated", - "type": "string", - "id": 1 - } - } - }, - "Precondition": { - "oneofs": { - "conditionType": { - "oneof": [ - "exists", - "updateTime" - ] - } - }, - "fields": { - "exists": { - "type": "bool", - "id": 1 - }, - "updateTime": { - "type": "google.protobuf.Timestamp", - "id": 2 - } - } - }, - "TransactionOptions": { - "oneofs": { - "mode": { - "oneof": [ - "readOnly", - "readWrite" - ] - } - }, - "fields": { - "readOnly": { - "type": "ReadOnly", - "id": 2 - }, - "readWrite": { - "type": "ReadWrite", - "id": 3 - } - }, - "nested": { - "ReadWrite": { - "fields": { - "retryTransaction": { - "type": "bytes", - "id": 1 - } - } - }, - "ReadOnly": { - "oneofs": { - "consistencySelector": { - "oneof": [ - "readTime" - ] - } - }, - "fields": { - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 2 - } - } - } - } - }, - "Document": { - "fields": { - "name": { - "type": "string", - "id": 1 - }, - "fields": { - "keyType": "string", - "type": "Value", - "id": 2 - }, - "createTime": { - "type": "google.protobuf.Timestamp", - "id": 3 - }, - "updateTime": { - "type": "google.protobuf.Timestamp", - "id": 4 - } - } - }, - "Value": { - "oneofs": { - "valueType": { - "oneof": [ - "nullValue", - "booleanValue", - "integerValue", - "doubleValue", - "timestampValue", - "stringValue", - "bytesValue", - "referenceValue", - "geoPointValue", - "arrayValue", - "mapValue" - ] - } - }, - "fields": { - "nullValue": { - "type": "google.protobuf.NullValue", - "id": 11 - }, - "booleanValue": { - "type": "bool", - "id": 1 - }, - "integerValue": { - "type": "int64", - "id": 2 - }, - "doubleValue": { - "type": "double", - "id": 3 - }, - "timestampValue": { - "type": "google.protobuf.Timestamp", - "id": 10 - }, - "stringValue": { - "type": "string", - "id": 17 - }, - "bytesValue": { - "type": "bytes", - "id": 18 - }, - "referenceValue": { - "type": "string", - "id": 5 - }, - "geoPointValue": { - "type": "google.type.LatLng", - "id": 8 - }, - "arrayValue": { - "type": "ArrayValue", - "id": 9 - }, - "mapValue": { - "type": "MapValue", - "id": 6 - } - } - }, - "ArrayValue": { - "fields": { - "values": { - "rule": "repeated", - "type": "Value", - "id": 1 - } - } - }, - "MapValue": { - "fields": { - "fields": { - "keyType": "string", - "type": "Value", - "id": 1 - } - } - }, - "Firestore": { - "options": { - "(google.api.default_host)": "firestore.googleapis.com", - "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore" - }, - "methods": { - "GetDocument": { - "requestType": "GetDocumentRequest", - "responseType": "Document", - "options": { - "(google.api.http).get": "/v1/{name=projects/*/databases/*/documents/*/**}" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "get": "/v1/{name=projects/*/databases/*/documents/*/**}" - } - } - ] - }, - "ListDocuments": { - "requestType": "ListDocumentsRequest", - "responseType": "ListDocumentsResponse", - "options": { - "(google.api.http).get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" - } - } - ] - }, - "UpdateDocument": { - "requestType": "UpdateDocumentRequest", - "responseType": "Document", - "options": { - "(google.api.http).patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}", - "(google.api.http).body": "document", - "(google.api.method_signature)": "document,update_mask" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}", - "body": "document" - } - }, - { - "(google.api.method_signature)": "document,update_mask" - } - ] - }, - "DeleteDocument": { - "requestType": "DeleteDocumentRequest", - "responseType": "google.protobuf.Empty", - "options": { - "(google.api.http).delete": "/v1/{name=projects/*/databases/*/documents/*/**}", - "(google.api.method_signature)": "name" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "delete": "/v1/{name=projects/*/databases/*/documents/*/**}" - } - }, - { - "(google.api.method_signature)": "name" - } - ] - }, - "BatchGetDocuments": { - "requestType": "BatchGetDocumentsRequest", - "responseType": "BatchGetDocumentsResponse", - "responseStream": true, - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchGet", - "(google.api.http).body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:batchGet", - "body": "*" - } - } - ] - }, - "BeginTransaction": { - "requestType": "BeginTransactionRequest", - "responseType": "BeginTransactionResponse", - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction", - "(google.api.http).body": "*", - "(google.api.method_signature)": "database" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction", - "body": "*" - } - }, - { - "(google.api.method_signature)": "database" - } - ] - }, - "Commit": { - "requestType": "CommitRequest", - "responseType": "CommitResponse", - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:commit", - "(google.api.http).body": "*", - "(google.api.method_signature)": "database,writes" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:commit", - "body": "*" - } - }, - { - "(google.api.method_signature)": "database,writes" - } - ] - }, - "Rollback": { - "requestType": "RollbackRequest", - "responseType": "google.protobuf.Empty", - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:rollback", - "(google.api.http).body": "*", - "(google.api.method_signature)": "database,transaction" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:rollback", - "body": "*" - } - }, - { - "(google.api.method_signature)": "database,transaction" - } - ] - }, - "RunQuery": { - "requestType": "RunQueryRequest", - "responseType": "RunQueryResponse", - "responseStream": true, - "options": { - "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runQuery", - "(google.api.http).body": "*", - "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery", - "(google.api.http).additional_bindings.body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{parent=projects/*/databases/*/documents}:runQuery", - "body": "*", - "additional_bindings": { - "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery", - "body": "*" - } - } - } - ] - }, - "RunAggregationQuery": { - "requestType": "RunAggregationQueryRequest", - "responseType": "RunAggregationQueryResponse", - "responseStream": true, - "options": { - "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery", - "(google.api.http).body": "*", - "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery", - "(google.api.http).additional_bindings.body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery", - "body": "*", - "additional_bindings": { - "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery", - "body": "*" - } - } - } - ] - }, - "PartitionQuery": { - "requestType": "PartitionQueryRequest", - "responseType": "PartitionQueryResponse", - "options": { - "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery", - "(google.api.http).body": "*", - "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery", - "(google.api.http).additional_bindings.body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery", - "body": "*", - "additional_bindings": { - "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery", - "body": "*" - } - } - } - ] - }, - "Write": { - "requestType": "WriteRequest", - "requestStream": true, - "responseType": "WriteResponse", - "responseStream": true, - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:write", - "(google.api.http).body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:write", - "body": "*" - } - } - ] - }, - "Listen": { - "requestType": "ListenRequest", - "requestStream": true, - "responseType": "ListenResponse", - "responseStream": true, - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:listen", - "(google.api.http).body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:listen", - "body": "*" - } - } - ] - }, - "ListCollectionIds": { - "requestType": "ListCollectionIdsRequest", - "responseType": "ListCollectionIdsResponse", - "options": { - "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds", - "(google.api.http).body": "*", - "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds", - "(google.api.http).additional_bindings.body": "*", - "(google.api.method_signature)": "parent" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds", - "body": "*", - "additional_bindings": { - "post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds", - "body": "*" - } - } - }, - { - "(google.api.method_signature)": "parent" - } - ] - }, - "BatchWrite": { - "requestType": "BatchWriteRequest", - "responseType": "BatchWriteResponse", - "options": { - "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchWrite", - "(google.api.http).body": "*" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{database=projects/*/databases/*}/documents:batchWrite", - "body": "*" - } - } - ] - }, - "CreateDocument": { - "requestType": "CreateDocumentRequest", - "responseType": "Document", - "options": { - "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}", - "(google.api.http).body": "document" - }, - "parsedOptions": [ - { - "(google.api.http)": { - "post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}", - "body": "document" - } - } - ] - } - } - }, - "GetDocumentRequest": { - "oneofs": { - "consistencySelector": { - "oneof": [ - "transaction", - "readTime" - ] - } - }, - "fields": { - "name": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "mask": { - "type": "DocumentMask", - "id": 2 - }, - "transaction": { - "type": "bytes", - "id": 3 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 5 - } - } - }, - "ListDocumentsRequest": { - "oneofs": { - "consistencySelector": { - "oneof": [ - "transaction", - "readTime" - ] - } - }, - "fields": { - "parent": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "collectionId": { - "type": "string", - "id": 2, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "pageSize": { - "type": "int32", - "id": 3 - }, - "pageToken": { - "type": "string", - "id": 4 - }, - "orderBy": { - "type": "string", - "id": 6 - }, - "mask": { - "type": "DocumentMask", - "id": 7 - }, - "transaction": { - "type": "bytes", - "id": 8 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 10 - }, - "showMissing": { - "type": "bool", - "id": 12 - } - } - }, - "ListDocumentsResponse": { - "fields": { - "documents": { - "rule": "repeated", - "type": "Document", - "id": 1 - }, - "nextPageToken": { - "type": "string", - "id": 2 - } - } - }, - "CreateDocumentRequest": { - "fields": { - "parent": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "collectionId": { - "type": "string", - "id": 2, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "documentId": { - "type": "string", - "id": 3 - }, - "document": { - "type": "Document", - "id": 4, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "mask": { - "type": "DocumentMask", - "id": 5 - } - } - }, - "UpdateDocumentRequest": { - "fields": { - "document": { - "type": "Document", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "updateMask": { - "type": "DocumentMask", - "id": 2 - }, - "mask": { - "type": "DocumentMask", - "id": 3 - }, - "currentDocument": { - "type": "Precondition", - "id": 4 - } - } - }, - "DeleteDocumentRequest": { - "fields": { - "name": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "currentDocument": { - "type": "Precondition", - "id": 2 - } - } - }, - "BatchGetDocumentsRequest": { - "oneofs": { - "consistencySelector": { - "oneof": [ - "transaction", - "newTransaction", - "readTime" - ] - } - }, - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "documents": { - "rule": "repeated", - "type": "string", - "id": 2 - }, - "mask": { - "type": "DocumentMask", - "id": 3 - }, - "transaction": { - "type": "bytes", - "id": 4 - }, - "newTransaction": { - "type": "TransactionOptions", - "id": 5 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 7 - } - } - }, - "BatchGetDocumentsResponse": { - "oneofs": { - "result": { - "oneof": [ - "found", - "missing" - ] - } - }, - "fields": { - "found": { - "type": "Document", - "id": 1 - }, - "missing": { - "type": "string", - "id": 2 - }, - "transaction": { - "type": "bytes", - "id": 3 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 4 - } - } - }, - "BeginTransactionRequest": { - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "options": { - "type": "TransactionOptions", - "id": 2 - } - } - }, - "BeginTransactionResponse": { - "fields": { - "transaction": { - "type": "bytes", - "id": 1 - } - } - }, - "CommitRequest": { - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "writes": { - "rule": "repeated", - "type": "Write", - "id": 2 - }, - "transaction": { - "type": "bytes", - "id": 3 - } - } - }, - "CommitResponse": { - "fields": { - "writeResults": { - "rule": "repeated", - "type": "WriteResult", - "id": 1 - }, - "commitTime": { - "type": "google.protobuf.Timestamp", - "id": 2 - } - } - }, - "RollbackRequest": { - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "transaction": { - "type": "bytes", - "id": 2, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - } - } - }, - "RunQueryRequest": { - "oneofs": { - "queryType": { - "oneof": [ - "structuredQuery" - ] - }, - "consistencySelector": { - "oneof": [ - "transaction", - "newTransaction", - "readTime" - ] - } - }, - "fields": { - "parent": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "structuredQuery": { - "type": "StructuredQuery", - "id": 2 - }, - "transaction": { - "type": "bytes", - "id": 5 - }, - "newTransaction": { - "type": "TransactionOptions", - "id": 6 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 7 - } - } - }, - "RunQueryResponse": { - "fields": { - "transaction": { - "type": "bytes", - "id": 2 - }, - "document": { - "type": "Document", - "id": 1 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 3 - }, - "skippedResults": { - "type": "int32", - "id": 4 - } - } - }, - "RunAggregationQueryRequest": { - "oneofs": { - "queryType": { - "oneof": [ - "structuredAggregationQuery" - ] - }, - "consistencySelector": { - "oneof": [ - "transaction", - "newTransaction", - "readTime" - ] - } - }, - "fields": { - "parent": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "structuredAggregationQuery": { - "type": "StructuredAggregationQuery", - "id": 2 - }, - "transaction": { - "type": "bytes", - "id": 4 - }, - "newTransaction": { - "type": "TransactionOptions", - "id": 5 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 6 - } - } - }, - "RunAggregationQueryResponse": { - "fields": { - "result": { - "type": "AggregationResult", - "id": 1 - }, - "transaction": { - "type": "bytes", - "id": 2 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 3 - } - } - }, - "PartitionQueryRequest": { - "oneofs": { - "queryType": { - "oneof": [ - "structuredQuery" - ] - } - }, - "fields": { - "parent": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "structuredQuery": { - "type": "StructuredQuery", - "id": 2 - }, - "partitionCount": { - "type": "int64", - "id": 3 - }, - "pageToken": { - "type": "string", - "id": 4 - }, - "pageSize": { - "type": "int32", - "id": 5 - } - } - }, - "PartitionQueryResponse": { - "fields": { - "partitions": { - "rule": "repeated", - "type": "Cursor", - "id": 1 - }, - "nextPageToken": { - "type": "string", - "id": 2 - } - } - }, - "WriteRequest": { - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "streamId": { - "type": "string", - "id": 2 - }, - "writes": { - "rule": "repeated", - "type": "Write", - "id": 3 - }, - "streamToken": { - "type": "bytes", - "id": 4 - }, - "labels": { - "keyType": "string", - "type": "string", - "id": 5 - } - } - }, - "WriteResponse": { - "fields": { - "streamId": { - "type": "string", - "id": 1 - }, - "streamToken": { - "type": "bytes", - "id": 2 - }, - "writeResults": { - "rule": "repeated", - "type": "WriteResult", - "id": 3 - }, - "commitTime": { - "type": "google.protobuf.Timestamp", - "id": 4 - } - } - }, - "ListenRequest": { - "oneofs": { - "targetChange": { - "oneof": [ - "addTarget", - "removeTarget" - ] - } - }, - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "addTarget": { - "type": "Target", - "id": 2 - }, - "removeTarget": { - "type": "int32", - "id": 3 - }, - "labels": { - "keyType": "string", - "type": "string", - "id": 4 - } - } - }, - "ListenResponse": { - "oneofs": { - "responseType": { - "oneof": [ - "targetChange", - "documentChange", - "documentDelete", - "documentRemove", - "filter" - ] - } - }, - "fields": { - "targetChange": { - "type": "TargetChange", - "id": 2 - }, - "documentChange": { - "type": "DocumentChange", - "id": 3 - }, - "documentDelete": { - "type": "DocumentDelete", - "id": 4 - }, - "documentRemove": { - "type": "DocumentRemove", - "id": 6 - }, - "filter": { - "type": "ExistenceFilter", - "id": 5 - } - } - }, - "Target": { - "oneofs": { - "targetType": { - "oneof": [ - "query", - "documents" - ] - }, - "resumeType": { - "oneof": [ - "resumeToken", - "readTime" - ] - } - }, - "fields": { - "query": { - "type": "QueryTarget", - "id": 2 - }, - "documents": { - "type": "DocumentsTarget", - "id": 3 - }, - "resumeToken": { - "type": "bytes", - "id": 4 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 11 - }, - "targetId": { - "type": "int32", - "id": 5 - }, - "once": { - "type": "bool", - "id": 6 - } - }, - "nested": { - "DocumentsTarget": { - "fields": { - "documents": { - "rule": "repeated", - "type": "string", - "id": 2 - } - } - }, - "QueryTarget": { - "oneofs": { - "queryType": { - "oneof": [ - "structuredQuery" - ] - } - }, - "fields": { - "parent": { - "type": "string", - "id": 1 - }, - "structuredQuery": { - "type": "StructuredQuery", - "id": 2 - } - } - } - } - }, - "TargetChange": { - "fields": { - "targetChangeType": { - "type": "TargetChangeType", - "id": 1 - }, - "targetIds": { - "rule": "repeated", - "type": "int32", - "id": 2 - }, - "cause": { - "type": "google.rpc.Status", - "id": 3 - }, - "resumeToken": { - "type": "bytes", - "id": 4 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 6 - } - }, - "nested": { - "TargetChangeType": { - "values": { - "NO_CHANGE": 0, - "ADD": 1, - "REMOVE": 2, - "CURRENT": 3, - "RESET": 4 - } - } - } - }, - "ListCollectionIdsRequest": { - "fields": { - "parent": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "pageSize": { - "type": "int32", - "id": 2 - }, - "pageToken": { - "type": "string", - "id": 3 - } - } - }, - "ListCollectionIdsResponse": { - "fields": { - "collectionIds": { - "rule": "repeated", - "type": "string", - "id": 1 - }, - "nextPageToken": { - "type": "string", - "id": 2 - } - } - }, - "BatchWriteRequest": { - "fields": { - "database": { - "type": "string", - "id": 1, - "options": { - "(google.api.field_behavior)": "REQUIRED" - } - }, - "writes": { - "rule": "repeated", - "type": "Write", - "id": 2 - }, - "labels": { - "keyType": "string", - "type": "string", - "id": 3 - } - } - }, - "BatchWriteResponse": { - "fields": { - "writeResults": { - "rule": "repeated", - "type": "WriteResult", - "id": 1 - }, - "status": { - "rule": "repeated", - "type": "google.rpc.Status", - "id": 2 - } - } - }, - "StructuredQuery": { - "fields": { - "select": { - "type": "Projection", - "id": 1 - }, - "from": { - "rule": "repeated", - "type": "CollectionSelector", - "id": 2 - }, - "where": { - "type": "Filter", - "id": 3 - }, - "orderBy": { - "rule": "repeated", - "type": "Order", - "id": 4 - }, - "startAt": { - "type": "Cursor", - "id": 7 - }, - "endAt": { - "type": "Cursor", - "id": 8 - }, - "offset": { - "type": "int32", - "id": 6 - }, - "limit": { - "type": "google.protobuf.Int32Value", - "id": 5 - } - }, - "nested": { - "CollectionSelector": { - "fields": { - "collectionId": { - "type": "string", - "id": 2 - }, - "allDescendants": { - "type": "bool", - "id": 3 - } - } - }, - "Filter": { - "oneofs": { - "filterType": { - "oneof": [ - "compositeFilter", - "fieldFilter", - "unaryFilter" - ] - } - }, - "fields": { - "compositeFilter": { - "type": "CompositeFilter", - "id": 1 - }, - "fieldFilter": { - "type": "FieldFilter", - "id": 2 - }, - "unaryFilter": { - "type": "UnaryFilter", - "id": 3 - } - } - }, - "CompositeFilter": { - "fields": { - "op": { - "type": "Operator", - "id": 1 - }, - "filters": { - "rule": "repeated", - "type": "Filter", - "id": 2 - } - }, - "nested": { - "Operator": { - "values": { - "OPERATOR_UNSPECIFIED": 0, - "AND": 1, - "OR": 2 - } - } - } - }, - "FieldFilter": { - "fields": { - "field": { - "type": "FieldReference", - "id": 1 - }, - "op": { - "type": "Operator", - "id": 2 - }, - "value": { - "type": "Value", - "id": 3 - } - }, - "nested": { - "Operator": { - "values": { - "OPERATOR_UNSPECIFIED": 0, - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "NOT_EQUAL": 6, - "ARRAY_CONTAINS": 7, - "IN": 8, - "ARRAY_CONTAINS_ANY": 9, - "NOT_IN": 10 - } - } - } - }, - "UnaryFilter": { - "oneofs": { - "operandType": { - "oneof": [ - "field" - ] - } - }, - "fields": { - "op": { - "type": "Operator", - "id": 1 - }, - "field": { - "type": "FieldReference", - "id": 2 - } - }, - "nested": { - "Operator": { - "values": { - "OPERATOR_UNSPECIFIED": 0, - "IS_NAN": 2, - "IS_NULL": 3, - "IS_NOT_NAN": 4, - "IS_NOT_NULL": 5 - } - } - } - }, - "Order": { - "fields": { - "field": { - "type": "FieldReference", - "id": 1 - }, - "direction": { - "type": "Direction", - "id": 2 - } - } - }, - "FieldReference": { - "fields": { - "fieldPath": { - "type": "string", - "id": 2 - } - } - }, - "Projection": { - "fields": { - "fields": { - "rule": "repeated", - "type": "FieldReference", - "id": 2 - } - } - }, - "Direction": { - "values": { - "DIRECTION_UNSPECIFIED": 0, - "ASCENDING": 1, - "DESCENDING": 2 - } - } - } - }, - "StructuredAggregationQuery": { - "oneofs": { - "queryType": { - "oneof": [ - "structuredQuery" - ] - } - }, - "fields": { - "structuredQuery": { - "type": "StructuredQuery", - "id": 1 - }, - "aggregations": { - "rule": "repeated", - "type": "Aggregation", - "id": 3 - } - }, - "nested": { - "Aggregation": { - "oneofs": { - "operator": { - "oneof": [ - "count" - ] - } - }, - "fields": { - "count": { - "type": "Count", - "id": 1 - }, - "alias": { - "type": "string", - "id": 7 - } - }, - "nested": { - "Count": { - "fields": { - "upTo": { - "type": "google.protobuf.Int64Value", - "id": 1 - } - } - } - } - } - } - }, - "Cursor": { - "fields": { - "values": { - "rule": "repeated", - "type": "Value", - "id": 1 - }, - "before": { - "type": "bool", - "id": 2 - } - } - }, - "Write": { - "oneofs": { - "operation": { - "oneof": [ - "update", - "delete", - "verify", - "transform" - ] - } - }, - "fields": { - "update": { - "type": "Document", - "id": 1 - }, - "delete": { - "type": "string", - "id": 2 - }, - "verify": { - "type": "string", - "id": 5 - }, - "transform": { - "type": "DocumentTransform", - "id": 6 - }, - "updateMask": { - "type": "DocumentMask", - "id": 3 - }, - "updateTransforms": { - "rule": "repeated", - "type": "DocumentTransform.FieldTransform", - "id": 7 - }, - "currentDocument": { - "type": "Precondition", - "id": 4 - } - } - }, - "DocumentTransform": { - "fields": { - "document": { - "type": "string", - "id": 1 - }, - "fieldTransforms": { - "rule": "repeated", - "type": "FieldTransform", - "id": 2 - } - }, - "nested": { - "FieldTransform": { - "oneofs": { - "transformType": { - "oneof": [ - "setToServerValue", - "increment", - "maximum", - "minimum", - "appendMissingElements", - "removeAllFromArray" - ] - } - }, - "fields": { - "fieldPath": { - "type": "string", - "id": 1 - }, - "setToServerValue": { - "type": "ServerValue", - "id": 2 - }, - "increment": { - "type": "Value", - "id": 3 - }, - "maximum": { - "type": "Value", - "id": 4 - }, - "minimum": { - "type": "Value", - "id": 5 - }, - "appendMissingElements": { - "type": "ArrayValue", - "id": 6 - }, - "removeAllFromArray": { - "type": "ArrayValue", - "id": 7 - } - }, - "nested": { - "ServerValue": { - "values": { - "SERVER_VALUE_UNSPECIFIED": 0, - "REQUEST_TIME": 1 - } - } - } - } - } - }, - "WriteResult": { - "fields": { - "updateTime": { - "type": "google.protobuf.Timestamp", - "id": 1 - }, - "transformResults": { - "rule": "repeated", - "type": "Value", - "id": 2 - } - } - }, - "DocumentChange": { - "fields": { - "document": { - "type": "Document", - "id": 1 - }, - "targetIds": { - "rule": "repeated", - "type": "int32", - "id": 5 - }, - "removedTargetIds": { - "rule": "repeated", - "type": "int32", - "id": 6 - } - } - }, - "DocumentDelete": { - "fields": { - "document": { - "type": "string", - "id": 1 - }, - "removedTargetIds": { - "rule": "repeated", - "type": "int32", - "id": 6 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 4 - } - } - }, - "DocumentRemove": { - "fields": { - "document": { - "type": "string", - "id": 1 - }, - "removedTargetIds": { - "rule": "repeated", - "type": "int32", - "id": 2 - }, - "readTime": { - "type": "google.protobuf.Timestamp", - "id": 4 - } - } - }, - "ExistenceFilter": { - "fields": { - "targetId": { - "type": "int32", - "id": 1 - }, - "count": { - "type": "int32", - "id": 2 - } - } - } - } - } - } - }, - "api": { - "options": { - "go_package": "google.golang.org/genproto/googleapis/api/annotations;annotations", - "java_multiple_files": true, - "java_outer_classname": "HttpProto", - "java_package": "com.google.api", - "objc_class_prefix": "GAPI", - "cc_enable_arenas": true - }, - "nested": { - "http": { - "type": "HttpRule", - "id": 72295728, - "extend": "google.protobuf.MethodOptions" - }, - "Http": { - "fields": { - "rules": { - "rule": "repeated", - "type": "HttpRule", - "id": 1 - } - } - }, - "HttpRule": { - "oneofs": { - "pattern": { - "oneof": [ - "get", - "put", - "post", - "delete", - "patch", - "custom" - ] - } - }, - "fields": { - "get": { - "type": "string", - "id": 2 - }, - "put": { - "type": "string", - "id": 3 - }, - "post": { - "type": "string", - "id": 4 - }, - "delete": { - "type": "string", - "id": 5 - }, - "patch": { - "type": "string", - "id": 6 - }, - "custom": { - "type": "CustomHttpPattern", - "id": 8 - }, - "selector": { - "type": "string", - "id": 1 - }, - "body": { - "type": "string", - "id": 7 - }, - "additionalBindings": { - "rule": "repeated", - "type": "HttpRule", - "id": 11 - } - } - }, - "CustomHttpPattern": { - "fields": { - "kind": { - "type": "string", - "id": 1 - }, - "path": { - "type": "string", - "id": 2 - } - } - }, - "methodSignature": { - "rule": "repeated", - "type": "string", - "id": 1051, - "extend": "google.protobuf.MethodOptions" - }, - "defaultHost": { - "type": "string", - "id": 1049, - "extend": "google.protobuf.ServiceOptions" - }, - "oauthScopes": { - "type": "string", - "id": 1050, - "extend": "google.protobuf.ServiceOptions" - }, - "fieldBehavior": { - "rule": "repeated", - "type": "google.api.FieldBehavior", - "id": 1052, - "extend": "google.protobuf.FieldOptions" - }, - "FieldBehavior": { - "values": { - "FIELD_BEHAVIOR_UNSPECIFIED": 0, - "OPTIONAL": 1, - "REQUIRED": 2, - "OUTPUT_ONLY": 3, - "INPUT_ONLY": 4, - "IMMUTABLE": 5, - "UNORDERED_LIST": 6, - "NON_EMPTY_DEFAULT": 7 - } - } - } - }, - "type": { - "options": { - "cc_enable_arenas": true, - "go_package": "google.golang.org/genproto/googleapis/type/latlng;latlng", - "java_multiple_files": true, - "java_outer_classname": "LatLngProto", - "java_package": "com.google.type", - "objc_class_prefix": "GTP" - }, - "nested": { - "LatLng": { - "fields": { - "latitude": { - "type": "double", - "id": 1 - }, - "longitude": { - "type": "double", - "id": 2 - } - } - } - } - }, - "rpc": { - "options": { - "cc_enable_arenas": true, - "go_package": "google.golang.org/genproto/googleapis/rpc/status;status", - "java_multiple_files": true, - "java_outer_classname": "StatusProto", - "java_package": "com.google.rpc", - "objc_class_prefix": "RPC" - }, - "nested": { - "Status": { - "fields": { - "code": { - "type": "int32", - "id": 1 - }, - "message": { - "type": "string", - "id": 2 - }, - "details": { - "rule": "repeated", - "type": "google.protobuf.Any", - "id": 3 - } - } - } - } - } - } - } - } -} diff --git a/src/proto/proto/update.sh b/src/proto/proto/update.sh deleted file mode 100755 index ebff7dc9..00000000 --- a/src/proto/proto/update.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail -IFS=$'\n\t' - -# Variables -PROTOS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -WORK_DIR=`mktemp -d` -PBJS="$(npm bin)/pbjs" - -# deletes the temp directory on exit -function cleanup { - rm -rf "$WORK_DIR" - echo "Deleted temp working directory $WORK_DIR" -} - -# register the cleanup function to be called on the EXIT signal -trap cleanup EXIT - -# Enter work dir -pushd "$WORK_DIR" - -# Clone necessary git repos. -git clone --depth 1 https://github.com/googleapis/googleapis.git -git clone --depth 1 https://github.com/google/protobuf.git - -# Copy necessary protos. -mkdir -p "${PROTOS_DIR}/google/api" -cp googleapis/google/api/{annotations.proto,http.proto,client.proto,field_behavior.proto} \ - "${PROTOS_DIR}/google/api/" - -mkdir -p "${PROTOS_DIR}/google/firestore/v1" -cp googleapis/google/firestore/v1/*.proto \ - "${PROTOS_DIR}/google/firestore/v1/" - -mkdir -p "${PROTOS_DIR}/google/firestore/admin" -cp -rf googleapis/google/firestore/admin/* \ - "${PROTOS_DIR}/google/firestore/admin/" - -mkdir -p "${PROTOS_DIR}/google/rpc" -cp googleapis/google/rpc/status.proto \ - "${PROTOS_DIR}/google/rpc/" - -mkdir -p "${PROTOS_DIR}/google/type" -cp googleapis/google/type/latlng.proto \ - "${PROTOS_DIR}/google/type/" - -# Hack in `verify` support -ex "${PROTOS_DIR}/google/firestore/v1/write.proto" < Result<(DbId, TxId)> { @@ -235,7 +235,6 @@ mod tests { }; let result = sdk.submit_mutation(&mutation).await; assert!(result.is_ok()); - println!("{:?}", result.unwrap()); let ten_millis = time::Duration::from_millis(1000); thread::sleep(ten_millis); count = count + 1; diff --git a/src/sdk/src/store_sdk.rs b/src/sdk/src/store_sdk.rs index fe7c3aab..61103882 100644 --- a/src/sdk/src/store_sdk.rs +++ b/src/sdk/src/store_sdk.rs @@ -20,13 +20,13 @@ use chrono::Utc; use db3_crypto::{db3_address::DB3Address, db3_signer::Db3MultiSchemeSigner}; use db3_proto::db3_account_proto::Account; use db3_proto::db3_bill_proto::Bill; +use db3_proto::db3_database_proto::Database; use db3_proto::db3_node_proto::{ storage_node_client::StorageNodeClient, BatchGetKey, BatchGetValue, CloseSessionRequest, GetAccountRequest, GetKeyRequest, GetRangeRequest, GetSessionInfoRequest, OpenSessionRequest, OpenSessionResponse, QueryBillKey, QueryBillRequest, Range as DB3Range, RangeKey, RangeValue, - SessionIdentifier, + SessionIdentifier, ShowDatabaseRequest, }; - use db3_proto::db3_session_proto::{CloseSessionPayload, OpenSessionPayload, QuerySessionInfo}; use db3_session::session_manager::{SessionPool, SessionStatus}; use num_traits::cast::FromPrimitive; @@ -53,6 +53,61 @@ impl StoreSDK { } } + async fn keep_session(&mut self) -> std::result::Result { + if let Some(token) = self.session_pool.get_last_token() { + match self.session_pool.get_session_mut(token.as_ref()) { + Some(session) => { + if session.get_session_query_count() > 2000 { + // close session + self.close_session(&token).await?; + let response = self.open_session().await?; + Ok(response.session_token) + } else { + Ok(token) + } + } + None => Err(Status::not_found(format!( + "Fail to query, session with token {token} not found" + ))), + } + } else { + let response = self.open_session().await?; + Ok(response.session_token) + } + } + + /// + /// get the information of database with a hex format address + /// + pub async fn get_database( + &mut self, + addr: &str, + ) -> std::result::Result, Status> { + let token = self.keep_session().await?; + match self.session_pool.get_session_mut(token.as_ref()) { + Some(session) => { + if session.check_session_running() { + let r = ShowDatabaseRequest { + session_token: token.to_string(), + address: addr.to_string(), + }; + let request = tonic::Request::new(r); + let mut client = self.client.as_ref().clone(); + let response = client.show_database(request).await?.into_inner(); + session.increase_query(1); + Ok(response.db) + } else { + Err(Status::permission_denied( + "Fail to query in this session. Please restart query session", + )) + } + } + None => Err(Status::not_found(format!( + "Fail to query, session with token {token} not found" + ))), + } + } + pub async fn open_session(&mut self) -> std::result::Result { let payload = OpenSessionPayload { header: Uuid::new_v4().to_string(), @@ -71,7 +126,6 @@ impl StoreSDK { payload: buf.as_ref().to_vec(), signature: signature.as_ref().to_vec(), }; - let request = tonic::Request::new(r); let mut client = self.client.as_ref().clone(); let response = client.open_query_session(request).await?.into_inner(); diff --git a/src/session/src/session_manager.rs b/src/session/src/session_manager.rs index df1a2b0c..fd4c9712 100644 --- a/src/session/src/session_manager.rs +++ b/src/session/src/session_manager.rs @@ -119,6 +119,14 @@ impl SessionPool { pub fn get_pool_size(&self) -> usize { self.session_pool.len() } + + pub fn get_last_token(&self) -> Option { + if let Some(k) = self.session_pool.keys().next() { + Some(k.to_string()) + } else { + None + } + } } pub struct SessionStore { @@ -137,6 +145,7 @@ impl SessionStore { sid: 0, } } + fn gen_token(&self) -> String { Uuid::new_v4().to_string() } @@ -280,9 +289,11 @@ impl SessionManager { pub fn get_session_query_count(&self) -> i32 { self.session_info.query_count } + pub fn check_session_running(&mut self) -> bool { matches!(self.check_session_status(), SessionStatus::Running) } + pub fn check_session_status(&mut self) -> &SessionStatus { match self.status { SessionStatus::Running => { diff --git a/src/storage/src/db_key.rs b/src/storage/src/db_key.rs index 93568ead..8067a309 100644 --- a/src/storage/src/db_key.rs +++ b/src/storage/src/db_key.rs @@ -37,7 +37,7 @@ impl DbKey { /// pub fn decode(data: &[u8]) -> Result { const MIN_KEY_TOTAL_LEN: usize = DBID_LENGTH + DATABASE.len(); - if data.len() <= MIN_KEY_TOTAL_LEN { + if data.len() < MIN_KEY_TOTAL_LEN { return Err(DB3Error::KeyCodecError( "the length of data is invalid".to_string(), )); diff --git a/src/storage/src/db_store.rs b/src/storage/src/db_store.rs index e187a814..0bb2e115 100644 --- a/src/storage/src/db_store.rs +++ b/src/storage/src/db_store.rs @@ -28,7 +28,7 @@ use std::collections::HashSet; use std::collections::LinkedList; use std::ops::Range; use std::pin::Pin; -use tracing::{info, warn}; +use tracing::warn; pub struct DbStore {} diff --git a/tools/start_localnet.sh b/tools/start_localnet.sh index 41afec6e..9f51d70c 100644 --- a/tools/start_localnet.sh +++ b/tools/start_localnet.sh @@ -33,7 +33,7 @@ then rm -rf db fi ./tendermint init -../target/${BUILD_MODE}/db3 start >db3.log 2>&1 & +../target/${BUILD_MODE}/db3 start -v >db3.log 2>&1 & sleep 1 ./tendermint unsafe_reset_all && ./tendermint start sleep 1 From 7d8a60020dd43c09123c06394ddbbfad9ef7f2a0 Mon Sep 17 00:00:00 2001 From: imotai Date: Wed, 1 Feb 2023 18:26:40 +0800 Subject: [PATCH 7/9] feat: add console usage --- README.md | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 97212ccd..c63e3c0c 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,46 @@ cd db3 && bash install_env.sh && cargo build cd tools && sh start_localnet.sh ``` -### Start building +### Use Console + +Start db3 console + +```shell +./target/debug/db3 console +db3>-$ new-db +database address | transaction id +--------------------------------------------+---------------------------------------------- +0xa9f5c8170aad7a0f924d89c6edacae6db24ef57d | 0ALy/hH7CQe9lv294K6dOxGP14xWHsbRs+/pXBZa8oU= +``` + +Show database + +```shell +db3>-$ show-db --addr 0x7e16cb6524e2fc21ae9bf2d7ee18b05767b9dc33 + database address | sender address | releated transactions | collections +--------------------------------------------+--------------------------------------------+----------------------------------------------+------------- + 0x7e16cb6524e2fc21ae9bf2d7ee18b05767b9dc33 | 0x96bdb8e20fbd831fcb37dde9f81930a82ab5436b | EMYw64xlI2q4v1MShoKw3T60asNbWJ9//ca75M3JO3Q= | +``` + +Add a collection to database + +```shell +db3>$ new-collection --addr 0xcfb524677673af15edebbec018b16d42d87b1251 --name books --index '{"name":"idx1","fields":[{"field_path":"test1","value_mode":{"Order":1}}]}' +send add collection done with tx +3V7r7VRg+9zUXeGNmqRR0YdVXWtBSl4sk+Z50h9BrOc= + +``` + +Show collections in database + +```shell +db3>-$ show-collection --addr 0xcfb524677673af15edebbec018b16d42d87b1251 + name | index +-------+---------------------------------------------------------------------------- + books | {"name":"idx1","fields":[{"field_path":"test1","value_mode":{"Order":1}}]} +``` + +### Use DB3.js ```typescript /* From 453169e0e54037ec8a3cc6e4eb00ae093077ad13 Mon Sep 17 00:00:00 2001 From: imotai Date: Wed, 1 Feb 2023 20:00:16 +0800 Subject: [PATCH 8/9] fix: remove pg_wire --- .gitmodules | 3 --- thirdparty/pg_wire | 1 - 2 files changed, 4 deletions(-) delete mode 160000 thirdparty/pg_wire diff --git a/.gitmodules b/.gitmodules index 7a36ea64..309f9742 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "thirdparty/pg_wire"] - path = thirdparty/pg_wire - url = https://github.com/rtstore/pg_wire.git [submodule "thirdparty/msql-srv"] path = thirdparty/msql-srv url = https://github.com/imotai/msql-srv.git diff --git a/thirdparty/pg_wire b/thirdparty/pg_wire deleted file mode 160000 index 6d0f1ea2..00000000 --- a/thirdparty/pg_wire +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6d0f1ea25c152ec5c285fcce8ec6b43e00be55dd From 7249410ad95460b62c389c06faccb8d7324b968b Mon Sep 17 00:00:00 2001 From: imotai Date: Wed, 1 Feb 2023 21:35:32 +0800 Subject: [PATCH 9/9] fix: fix key setup error --- src/node/tests/node_test.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/node/tests/node_test.rs b/src/node/tests/node_test.rs index f48c5093..977d10ae 100644 --- a/src/node/tests/node_test.rs +++ b/src/node/tests/node_test.rs @@ -24,6 +24,7 @@ mod node_integration { fn get_mutation_sdk() -> MutationSDK { let public_grpc_url = "http://127.0.0.1:26659"; + db3_cmd::keystore::KeyStore::recover_keypair().unwrap(); // create storage node sdk let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); let signer = Db3MultiSchemeSigner::new(kp); @@ -99,6 +100,7 @@ mod node_integration { let nonce = get_a_random_nonce(); let json_rpc_url = "http://127.0.0.1:26670"; let client = awc::Client::default(); + db3_cmd::keystore::KeyStore::recover_keypair().unwrap(); let kp = db3_cmd::keystore::KeyStore::get_keypair().unwrap(); let signer = Db3MultiSchemeSigner::new(kp); let kv = KvPair {