Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: publish CopyBackup protos to external customers #855

Merged
merged 2 commits into from
Aug 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions google/cloud/bigtable_admin/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
CheckConsistencyResponse,
)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
CreateBackupMetadata,
)
Expand Down Expand Up @@ -242,6 +244,8 @@
"UpdateInstanceMetadata",
"CheckConsistencyRequest",
"CheckConsistencyResponse",
"CopyBackupMetadata",
"CopyBackupRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
"CreateTableFromSnapshotMetadata",
Expand Down
4 changes: 4 additions & 0 deletions google/cloud/bigtable_admin_v2/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@
from .types.bigtable_instance_admin import UpdateInstanceMetadata
from .types.bigtable_table_admin import CheckConsistencyRequest
from .types.bigtable_table_admin import CheckConsistencyResponse
from .types.bigtable_table_admin import CopyBackupMetadata
from .types.bigtable_table_admin import CopyBackupRequest
from .types.bigtable_table_admin import CreateBackupMetadata
from .types.bigtable_table_admin import CreateBackupRequest
from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata
Expand Down Expand Up @@ -116,6 +118,8 @@
"CheckConsistencyResponse",
"Cluster",
"ColumnFamily",
"CopyBackupMetadata",
"CopyBackupRequest",
"CreateAppProfileRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
Expand Down
15 changes: 15 additions & 0 deletions google/cloud/bigtable_admin_v2/gapic_metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,11 @@
"check_consistency"
]
},
"CopyBackup": {
"methods": [
"copy_backup"
]
},
"CreateBackup": {
"methods": [
"create_backup"
Expand Down Expand Up @@ -474,6 +479,11 @@
"check_consistency"
]
},
"CopyBackup": {
"methods": [
"copy_backup"
]
},
"CreateBackup": {
"methods": [
"create_backup"
Expand Down Expand Up @@ -599,6 +609,11 @@
"check_consistency"
]
},
"CopyBackup": {
"methods": [
"copy_backup"
]
},
"CreateBackup": {
"methods": [
"create_backup"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2143,7 +2143,7 @@ async def list_backups(
Returns:
google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager:
The response for
[ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
[ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].

Iterating over this object will yield results and
resolve additional pages automatically.
Expand Down Expand Up @@ -2218,9 +2218,8 @@ async def restore_table(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Create a new table by restoring from a completed backup. The new
table must be in the same project as the instance containing the
backup. The returned table [long-running
r"""Create a new table by restoring from a completed backup. The
returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
Expand Down Expand Up @@ -2283,6 +2282,141 @@ async def restore_table(
# Done; return the response.
return response

async def copy_backup(
self,
request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None,
*,
parent: Optional[str] = None,
backup_id: Optional[str] = None,
source_backup: Optional[str] = None,
expire_time: Optional[timestamp_pb2.Timestamp] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Copy a Cloud Bigtable backup to a new backup in the
destination cluster located in the destination instance
and project.

Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]):
The request object. The request for
[CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
parent (:class:`str`):
Required. The name of the destination cluster that will
contain the backup copy. The cluster must already
exists. Values are of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}``.

This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
backup_id (:class:`str`):
Required. The id of the new backup. The ``backup_id``
along with ``parent`` are combined as
{parent}/backups/{backup_id} to create the full backup
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.

This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
source_backup (:class:`str`):
Required. The source backup to be copied from. The
source backup needs to be in READY state for it to be
copied. Copying a copied backup is not allowed. Once
CopyBackup is in progress, the source backup cannot be
deleted or cleaned up on expiration until CopyBackup is
finished. Values are of the form:
``projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>``.

This corresponds to the ``source_backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
Required. Required. The expiration time of the copied
backup with microsecond granularity that must be at
least 6 hours and at most 30 days from the time the
request is received. Once the ``expire_time`` has
passed, Cloud Bigtable will delete the backup and free
the resources used by the backup.

This corresponds to the ``expire_time`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.

Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.

The result type for the operation will be
:class:`google.cloud.bigtable_admin_v2.types.Backup` A
backup of a Cloud Bigtable table.

"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, backup_id, source_backup, expire_time])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

request = bigtable_table_admin.CopyBackupRequest(request)

# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if backup_id is not None:
request.backup_id = backup_id
if source_backup is not None:
request.source_backup = source_backup
if expire_time is not None:
request.expire_time = expire_time

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.copy_backup,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)

# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)

# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
table.Backup,
metadata_type=bigtable_table_admin.CopyBackupMetadata,
)

# Done; return the response.
return response

async def get_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2407,7 +2407,7 @@ def list_backups(
Returns:
google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager:
The response for
[ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
[ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].

Iterating over this object will yield results and
resolve additional pages automatically.
Expand Down Expand Up @@ -2472,9 +2472,8 @@ def restore_table(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Create a new table by restoring from a completed backup. The new
table must be in the same project as the instance containing the
backup. The returned table [long-running
r"""Create a new table by restoring from a completed backup. The
returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
Expand Down Expand Up @@ -2538,6 +2537,141 @@ def restore_table(
# Done; return the response.
return response

def copy_backup(
self,
request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None,
*,
parent: Optional[str] = None,
backup_id: Optional[str] = None,
source_backup: Optional[str] = None,
expire_time: Optional[timestamp_pb2.Timestamp] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Copy a Cloud Bigtable backup to a new backup in the
destination cluster located in the destination instance
and project.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]):
The request object. The request for
[CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
parent (str):
Required. The name of the destination cluster that will
contain the backup copy. The cluster must already
exists. Values are of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}``.

This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
backup_id (str):
Required. The id of the new backup. The ``backup_id``
along with ``parent`` are combined as
{parent}/backups/{backup_id} to create the full backup
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.

This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
source_backup (str):
Required. The source backup to be copied from. The
source backup needs to be in READY state for it to be
copied. Copying a copied backup is not allowed. Once
CopyBackup is in progress, the source backup cannot be
deleted or cleaned up on expiration until CopyBackup is
finished. Values are of the form:
``projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>``.

This corresponds to the ``source_backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Required. The expiration time of the copied
backup with microsecond granularity that must be at
least 6 hours and at most 30 days from the time the
request is received. Once the ``expire_time`` has
passed, Cloud Bigtable will delete the backup and free
the resources used by the backup.

This corresponds to the ``expire_time`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.

Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.

The result type for the operation will be
:class:`google.cloud.bigtable_admin_v2.types.Backup` A
backup of a Cloud Bigtable table.

"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, backup_id, source_backup, expire_time])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

# Minor optimization to avoid making a copy if the user passes
# in a bigtable_table_admin.CopyBackupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, bigtable_table_admin.CopyBackupRequest):
request = bigtable_table_admin.CopyBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if backup_id is not None:
request.backup_id = backup_id
if source_backup is not None:
request.source_backup = source_backup
if expire_time is not None:
request.expire_time = expire_time

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.copy_backup]

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)

# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)

# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
table.Backup,
metadata_type=bigtable_table_admin.CopyBackupMetadata,
)

# Done; return the response.
return response

def get_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
Expand Down
Loading
Loading