From 6edf7fea3c7ca6e3d98677fd149b64a084ab0a07 Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Thu, 7 Mar 2024 07:02:11 +0000 Subject: [PATCH 01/10] style: fix spelling on appsignal apps --- cli/src/commands/application/init.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cli/src/commands/application/init.rs b/cli/src/commands/application/init.rs index 13def9f2..b2310389 100644 --- a/cli/src/commands/application/init.rs +++ b/cli/src/commands/application/init.rs @@ -22,7 +22,7 @@ use wukong_sdk::graphql::appsignal_apps_query::AppsignalAppsQueryAppsignalApps; pub async fn handle_application_init(context: Context) -> Result { let config = Config::load_from_default_path()?; let mut wk_client = WKClient::for_channel(&config, &context.channel)?; - let mut appsignall_apps = None; + let mut appsignal_apps = None; println!("Welcome! Initializing per-repo configuration for your application."); @@ -51,7 +51,7 @@ pub async fn handle_application_init(context: Context) -> Result = Vec::new(); namespaces - .push(configure_namespace("prod".to_string(), &mut wk_client, &mut appsignall_apps).await?); + .push(configure_namespace("prod".to_string(), &mut wk_client, &mut appsignal_apps).await?); let addons = ["Elixir Livebook"]; let selected_addons = inquire::MultiSelect::new("Addons", addons.to_vec()) @@ -71,7 +71,7 @@ pub async fn handle_application_init(context: Context) -> Result Date: Mon, 11 Mar 2024 07:58:34 +0000 Subject: [PATCH 02/10] feat: add application config --- cli/src/commands/application/init.rs | 60 +- cli/src/wukong_client.rs | 22 +- sdk/src/error.rs | 2 + sdk/src/graphql/application.rs | 8 + sdk/src/graphql/mod.rs | 27 +- .../graphql/query/application_config.graphql | 34 + sdk/src/graphql/schema.json | 629 +++++++++- .../gcloud/api/google.cloud.sql.v1.rs | 1083 +++++++++++------ 8 files changed, 1470 insertions(+), 395 deletions(-) create mode 100644 sdk/src/graphql/query/application_config.graphql diff --git a/cli/src/commands/application/init.rs b/cli/src/commands/application/init.rs index b2310389..f09eac46 100644 --- a/cli/src/commands/application/init.rs +++ b/cli/src/commands/application/init.rs @@ -15,9 +15,12 @@ use crate::{ }; use crossterm::style::Stylize; use heck::ToSnakeCase; -use inquire::{required, CustomType, Text}; +use inquire::{required, CustomType}; use std::fs; -use wukong_sdk::graphql::appsignal_apps_query::AppsignalAppsQueryAppsignalApps; +use wukong_sdk::{ + error::{APIError, WKError}, + graphql::{application_config_query, appsignal_apps_query::AppsignalAppsQueryAppsignalApps}, +}; pub async fn handle_application_init(context: Context) -> Result { let config = Config::load_from_default_path()?; @@ -27,12 +30,35 @@ pub async fn handle_application_init(context: Context) -> Result Result Result, WKCliError> { Ok(workflow_names) } + +async fn get_application_config(wk_client: &mut WKClient, name: &str) -> Result { + let application_config = match wk_client.fetch_application_config(name).await { + Ok(resp) => Ok(resp), + Err(err) => match &err { + WKCliError::WKSdkError(WKError::APIError(APIError::ApplicationConfigNotFound)) => { + Ok(application_config_query::ResponseData { + application_config: None, + }) + } + _ => Err(err), + }, + }? + .application_config; + + Ok(application_config.is_some()) +} diff --git a/cli/src/wukong_client.rs b/cli/src/wukong_client.rs index 2a5975d5..fbb2e6e7 100644 --- a/cli/src/wukong_client.rs +++ b/cli/src/wukong_client.rs @@ -7,12 +7,13 @@ use log::debug; use std::collections::HashMap; use wukong_sdk::{ graphql::{ - application_query, application_with_k8s_cluster_query, applications_query, - appsignal_apps_query, appsignal_average_error_rate_query, appsignal_average_latency_query, - appsignal_average_throughput_query, cd_pipeline_for_rollback_query, - cd_pipeline_github_query, cd_pipeline_query, cd_pipelines_query, changelogs_query, - ci_status_query, deploy_livebook, deployment::cd_pipeline_status_query, destroy_livebook, - execute_cd_pipeline, is_authorized_query, kubernetes_pods_query, livebook_resource_query, + application_config_query, application_query, application_with_k8s_cluster_query, + applications_query, appsignal_apps_query, appsignal_average_error_rate_query, + appsignal_average_latency_query, appsignal_average_throughput_query, + cd_pipeline_for_rollback_query, cd_pipeline_github_query, cd_pipeline_query, + cd_pipelines_query, changelogs_query, ci_status_query, deploy_livebook, + deployment::cd_pipeline_status_query, destroy_livebook, execute_cd_pipeline, + is_authorized_query, kubernetes_pods_query, livebook_resource_query, multi_branch_pipeline_query, pipeline_query, pipelines_query, AppsignalTimeFrame, }, services::{ @@ -425,4 +426,13 @@ impl WKClient { .get_gcloud_database_metrics(project_id, access_token) .await } + + #[wukong_telemetry(api_event = "fetch_application_config")] + pub async fn fetch_application_config( + &mut self, + name: &str, + ) -> Result { + self.check_and_refresh_tokens().await?; + self.inner.fetch_application_config(name).await + } } diff --git a/sdk/src/error.rs b/sdk/src/error.rs index e3f47b25..e5d13a57 100644 --- a/sdk/src/error.rs +++ b/sdk/src/error.rs @@ -86,6 +86,8 @@ pub enum APIError { BuildNotFound, #[error("Github Workflow not found.")] GithubWorkflowNotFound, + #[error("Failed to get the application config.")] + ApplicationConfigNotFound, } #[derive(Debug, ThisError)] diff --git a/sdk/src/graphql/application.rs b/sdk/src/graphql/application.rs index 76677e8b..12f09b1f 100644 --- a/sdk/src/graphql/application.rs +++ b/sdk/src/graphql/application.rs @@ -24,6 +24,14 @@ pub struct ApplicationWithK8sClusterQuery; )] pub struct ApplicationsQuery; +#[derive(GraphQLQuery)] +#[graphql( + schema_path = "src/graphql/schema.json", + query_path = "src/graphql/query/application_config.graphql", + response_derives = "Debug, Serialize, Deserialize" +)] +pub struct ApplicationConfigQuery; + #[cfg(test)] mod test { use crate::{ApiChannel, WKClient, WKConfig}; diff --git a/sdk/src/graphql/mod.rs b/sdk/src/graphql/mod.rs index e09966cb..257b0eb5 100644 --- a/sdk/src/graphql/mod.rs +++ b/sdk/src/graphql/mod.rs @@ -6,11 +6,14 @@ pub mod deployment_github; pub mod kubernetes; pub mod pipeline; -use self::deployment::{cd_pipeline_status_query, CdPipelineStatusQuery}; +use self::{ + application::ApplicationConfigQuery, + deployment::{cd_pipeline_status_query, CdPipelineStatusQuery}, +}; pub use self::{ application::{ - application_query, application_with_k8s_cluster_query, applications_query, - ApplicationQuery, ApplicationWithK8sClusterQuery, ApplicationsQuery, + application_config_query, application_query, application_with_k8s_cluster_query, + applications_query, ApplicationQuery, ApplicationWithK8sClusterQuery, ApplicationsQuery, }, appsignal::{ appsignal_apps_query, appsignal_average_error_rate_query, appsignal_average_latency_query, @@ -744,6 +747,23 @@ impl WKClient { .await .map_err(|err| err.into()) } + + pub async fn fetch_application_config( + &self, + name: &str, + ) -> Result { + let gql_client = setup_gql_client(&self.access_token, &self.channel)?; + + gql_client + .post_graphql::( + &self.api_url, + application_config_query::Variables { + name: name.to_string(), + }, + ) + .await + .map_err(|err| err.into()) + } } fn setup_gql_client(access_token: &str, channel: &ApiChannel) -> Result { @@ -844,6 +864,7 @@ impl ErrorHandler for CanaryErrorHandler { // "github_ref_not_found" => {} // "github_commit_history_not_found" => {} "github_workflow_not_found" => APIError::GithubWorkflowNotFound, + "application_config_not_found" => APIError::ApplicationConfigNotFound, // "slack_webhook_not_configured" => {} _ => APIError::ResponseError { code: original_error_code.to_string(), diff --git a/sdk/src/graphql/query/application_config.graphql b/sdk/src/graphql/query/application_config.graphql new file mode 100644 index 00000000..3e423c36 --- /dev/null +++ b/sdk/src/graphql/query/application_config.graphql @@ -0,0 +1,34 @@ +query ApplicationConfigQuery($name: String!) { + applicationConfig(name: $name) { + name + enable + namespaces { + appsignal { + defaultNamespace + enable + environment + } + build { + buildWorkflow + } + cloudsql { + enable + projectId + } + delivery { + baseReplica + rolloutStrategy + target + } + honeycomb { + dataset + enable + } + type + } + workflows { + excludedWorkflows + provider + } + } +} diff --git a/sdk/src/graphql/schema.json b/sdk/src/graphql/schema.json index 16862f38..a5c6c303 100644 --- a/sdk/src/graphql/schema.json +++ b/sdk/src/graphql/schema.json @@ -1129,6 +1129,458 @@ "name": "Application", "possibleTypes": null }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "addons", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "enable", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "namespaces", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "ApplicationConfigNamespace", + "ofType": null + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "workflows", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfigWorkflows", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfig", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "defaultNamespace", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "enable", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "environment", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigAppsignal", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "buildWorkflow", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigBuild", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "enable", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "projectId", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigCloudSQL", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "baseReplica", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "rolloutStrategy", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "target", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigDelivery", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "dataset", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "enable", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigHoneycomb", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "appsignal", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfigAppsignal", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "build", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfigBuild", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "cloudsql", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfigCloudSQL", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "delivery", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfigDelivery", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "honeycomb", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfigHoneycomb", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "type", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigNamespace", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "excludedWorkflows", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "provider", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "ApplicationConfigWorkflows", + "possibleTypes": null + }, { "description": null, "enumValues": null, @@ -1212,6 +1664,61 @@ "name": "AppsignalApp", "possibleTypes": null }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "deployedAt", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "revision", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "AppsignalDeployMarker", + "possibleTypes": null + }, { "description": null, "enumValues": null, @@ -1371,6 +1878,18 @@ } } }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "perMarkerCount", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + }, { "args": [], "deprecationReason": null, @@ -3636,6 +4155,33 @@ "ofType": null } }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "applicationConfig", + "type": { + "kind": "OBJECT", + "name": "ApplicationConfig", + "ofType": null + } + }, { "args": [], "deprecationReason": null, @@ -3700,9 +4246,58 @@ } } }, + { + "defaultValue": "1", + "description": null, + "name": "limit", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "appsignalDeployMarkers", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "AppsignalDeployMarker", + "ofType": null + } + } + } + } + }, + { + "args": [ { "defaultValue": null, "description": null, + "name": "appId", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "defaultValue": null, + "description": "\"*\" for all namespaces.", "name": "namespace", "type": { "kind": "NON_NULL", @@ -3785,21 +4380,27 @@ }, { "defaultValue": null, - "description": null, + "description": "nil for no limit, n for n incidents.", "name": "limit", "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null - } + "kind": "SCALAR", + "name": "Int", + "ofType": null } }, { "defaultValue": null, - "description": null, + "description": "nil for all markers, n for marker with id = n.", + "name": "marker", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "defaultValue": null, + "description": "[] for all namespaces.", "name": "namespaces", "type": { "kind": "NON_NULL", @@ -3823,7 +4424,7 @@ "deprecationReason": null, "description": null, "isDeprecated": false, - "name": "appsignalExceptionIncident", + "name": "appsignalExceptionIncidents", "type": { "kind": "NON_NULL", "name": null, @@ -3860,7 +4461,7 @@ }, { "defaultValue": null, - "description": null, + "description": "\"*\" for all namespaces.", "name": "namespace", "type": { "kind": "NON_NULL", @@ -3957,7 +4558,7 @@ }, { "defaultValue": null, - "description": null, + "description": "[] for all namespaces.", "name": "namespaces", "type": { "kind": "NON_NULL", @@ -3981,7 +4582,7 @@ "deprecationReason": null, "description": null, "isDeprecated": false, - "name": "appsignalPerformanceIncident", + "name": "appsignalPerformanceIncidents", "type": { "kind": "NON_NULL", "name": null, @@ -4018,7 +4619,7 @@ }, { "defaultValue": null, - "description": null, + "description": "\"*\" for all namespaces.", "name": "namespace", "type": { "kind": "NON_NULL", diff --git a/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs b/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs index 3302c836..f193de91 100644 --- a/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs +++ b/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs @@ -33,7 +33,17 @@ pub struct ApiWarning { } /// Nested message and enum types in `ApiWarning`. pub mod api_warning { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum SqlApiWarningCode { /// An unknown or unset warning type from Cloud SQL API. @@ -85,7 +95,17 @@ pub struct BackupRetentionSettings { /// Nested message and enum types in `BackupRetentionSettings`. pub mod backup_retention_settings { /// The units that retained_backups specifies, we only support COUNT. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum RetentionUnit { /// Backup retention unit is unspecified, will be treated as COUNT. @@ -291,7 +311,9 @@ pub struct DemoteMasterConfiguration { /// replication connection and is stored by MySQL in a file named /// `master.info` in the data directory. #[prost(message, optional, tag = "2")] - pub mysql_replica_configuration: ::core::option::Option, + pub mysql_replica_configuration: ::core::option::Option< + DemoteMasterMySqlReplicaConfiguration, + >, } /// Read-replica configuration specific to MySQL databases. #[allow(clippy::derive_partial_eq_without_eq)] @@ -398,7 +420,9 @@ pub mod export_context { #[prost(message, optional, tag = "2")] pub schema_only: ::core::option::Option, #[prost(message, optional, tag = "3")] - pub mysql_export_options: ::core::option::Option, + pub mysql_export_options: ::core::option::Option< + sql_export_options::MysqlExportOptions, + >, } /// Nested message and enum types in `SqlExportOptions`. pub mod sql_export_options { @@ -505,7 +529,9 @@ pub mod import_context { #[derive(Clone, PartialEq, ::prost::Message)] pub struct SqlBakImportOptions { #[prost(message, optional, tag = "1")] - pub encryption_options: ::core::option::Option, + pub encryption_options: ::core::option::Option< + sql_bak_import_options::EncryptionOptions, + >, /// Whether or not the backup set being restored is striped. /// Applies only to Cloud SQL for SQL Server. #[prost(message, optional, tag = "2")] @@ -838,7 +864,17 @@ pub struct Operation { /// Nested message and enum types in `Operation`. pub mod operation { /// The type of Cloud SQL operation. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum SqlOperationType { /// Unknown operation type. @@ -1017,7 +1053,17 @@ pub mod operation { } } /// The status of an operation. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum SqlOperationStatus { /// The state of the operation is unknown. @@ -1106,7 +1152,17 @@ pub struct PasswordValidationPolicy { /// Nested message and enum types in `PasswordValidationPolicy`. pub mod password_validation_policy { /// The complexity choices of the password. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Complexity { /// Complexity check is not specified. @@ -1158,7 +1214,9 @@ pub struct Settings { /// (Deprecated) Applied to First Generation instances only. #[deprecated] #[prost(string, repeated, tag = "2")] - pub authorized_gae_applications: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + pub authorized_gae_applications: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, /// The tier (or machine type) for this instance, for example /// `db-custom-1-3840`. WARNING: Changing this restarts the instance. #[prost(string, tag = "3")] @@ -1169,8 +1227,10 @@ pub struct Settings { /// User-provided labels, represented as a dictionary where each label is a /// single key value pair. #[prost(map = "string, string", tag = "5")] - pub user_labels: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub user_labels: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// Availability type. Potential values: /// /// * `ZONAL`: The instance serves data from only one zone. Outages in that @@ -1298,7 +1358,17 @@ pub struct Settings { /// Nested message and enum types in `Settings`. pub mod settings { /// Specifies when the instance is activated. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum SqlActivationPolicy { /// Unknown activation plan. @@ -1335,7 +1405,17 @@ pub mod settings { } } /// The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Edition { /// The instance did not specify the edition. @@ -1368,7 +1448,17 @@ pub mod settings { } } /// The options for enforcing Cloud SQL connectors in the instance. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum ConnectorEnforcement { /// The requirement for Cloud SQL connectors is unknown. @@ -2089,7 +2179,7 @@ pub struct SqlInstancesInsertRequest { #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, #[prost(message, optional, tag = "100")] - pub body: ::core::option::Option, + pub body: ::core::option::Option, } /// Instance list request. #[allow(clippy::derive_partial_eq_without_eq)] @@ -2142,7 +2232,7 @@ pub struct SqlInstancesPatchRequest { #[prost(string, tag = "2")] pub project: ::prost::alloc::string::String, #[prost(message, optional, tag = "100")] - pub body: ::core::option::Option, + pub body: ::core::option::Option, } /// Instance promote replica request. #[allow(clippy::derive_partial_eq_without_eq)] @@ -2263,7 +2353,7 @@ pub struct SqlInstancesUpdateRequest { #[prost(string, tag = "2")] pub project: ::prost::alloc::string::String, #[prost(message, optional, tag = "100")] - pub body: ::core::option::Option, + pub body: ::core::option::Option, } /// Instance reschedule maintenance request. #[allow(clippy::derive_partial_eq_without_eq)] @@ -2308,17 +2398,23 @@ pub struct BackupReencryptionConfig { #[prost(int32, optional, tag = "1")] pub backup_limit: ::core::option::Option, /// Type of backups users want to re-encrypt. - #[prost( - enumeration = "backup_reencryption_config::BackupType", - optional, - tag = "2" - )] + #[prost(enumeration = "backup_reencryption_config::BackupType", optional, tag = "2")] pub backup_type: ::core::option::Option, } /// Nested message and enum types in `BackupReencryptionConfig`. pub mod backup_reencryption_config { /// Backup type for re-encryption - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum BackupType { /// Unknown backup type, will be defaulted to AUTOMATIC backup type @@ -2388,12 +2484,23 @@ pub struct SqlInstancesVerifyExternalSyncSettingsRequest { oneof = "sql_instances_verify_external_sync_settings_request::SyncConfig", tags = "6" )] - pub sync_config: - ::core::option::Option, + pub sync_config: ::core::option::Option< + sql_instances_verify_external_sync_settings_request::SyncConfig, + >, } /// Nested message and enum types in `SqlInstancesVerifyExternalSyncSettingsRequest`. pub mod sql_instances_verify_external_sync_settings_request { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum ExternalSyncMode { /// Unknown external sync mode, will be defaulted to ONLINE mode @@ -2461,16 +2568,25 @@ pub struct SqlInstancesStartExternalSyncRequest { tag = "7" )] pub sync_parallel_level: i32, - #[prost( - oneof = "sql_instances_start_external_sync_request::SyncConfig", - tags = "6" - )] - pub sync_config: ::core::option::Option, + #[prost(oneof = "sql_instances_start_external_sync_request::SyncConfig", tags = "6")] + pub sync_config: ::core::option::Option< + sql_instances_start_external_sync_request::SyncConfig, + >, } /// Nested message and enum types in `SqlInstancesStartExternalSyncRequest`. pub mod sql_instances_start_external_sync_request { /// External Sync parallel level. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum ExternalSyncParallelLevel { /// Unknown sync parallel level. Will be defaulted to OPTIMAL. @@ -2603,7 +2719,7 @@ pub struct InstancesListResponse { pub warnings: ::prost::alloc::vec::Vec, /// List of database instance resources. #[prost(message, repeated, tag = "3")] - pub items: ::prost::alloc::vec::Vec, + pub items: ::prost::alloc::vec::Vec, /// The continuation token, used to page through large result sets. Provide /// this value in a subsequent request to return the next page of results. #[prost(string, tag = "4")] @@ -2727,7 +2843,7 @@ pub struct BinLogCoordinates { /// A Cloud SQL instance resource. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct DatabaseMetrics { +pub struct DatabaseInstance { /// This is always `sql#instance`. #[prost(string, tag = "1")] pub kind: ::prost::alloc::string::String, @@ -2841,7 +2957,9 @@ pub struct DatabaseMetrics { pub secondary_gce_zone: ::prost::alloc::string::String, /// Disk encryption configuration specific to an instance. #[prost(message, optional, tag = "26")] - pub disk_encryption_configuration: ::core::option::Option, + pub disk_encryption_configuration: ::core::option::Option< + DiskEncryptionConfiguration, + >, /// Disk encryption status specific to an instance. #[prost(message, optional, tag = "27")] pub disk_encryption_status: ::core::option::Option, @@ -2851,7 +2969,9 @@ pub struct DatabaseMetrics { pub root_password: ::prost::alloc::string::String, /// The start time of any upcoming scheduled maintenance for this instance. #[prost(message, optional, tag = "30")] - pub scheduled_maintenance: ::core::option::Option, + pub scheduled_maintenance: ::core::option::Option< + database_instance::SqlScheduledMaintenance, + >, /// The status indicating if instance satisfiesPzs. /// Reserved for future use. #[prost(message, optional, tag = "35")] @@ -2868,7 +2988,9 @@ pub struct DatabaseMetrics { /// * Readers: /// * the proactive database wellness job #[prost(message, optional, tag = "38")] - pub out_of_disk_report: ::core::option::Option, + pub out_of_disk_report: ::core::option::Option< + database_instance::SqlOutOfDiskReport, + >, /// Output only. The time when the instance was created in /// [RFC 3339]() format, for example /// `2012-11-15T16:19:00.094Z`. @@ -2876,12 +2998,14 @@ pub struct DatabaseMetrics { pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. List all maintenance versions applicable on the instance #[prost(string, repeated, tag = "41")] - pub available_maintenance_versions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + pub available_maintenance_versions: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, /// The current software version on the instance. #[prost(string, tag = "42")] pub maintenance_version: ::prost::alloc::string::String, } -/// Nested message and enum types in `DatabaseMetrics`. +/// Nested message and enum types in `DatabaseInstance`. pub mod database_instance { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2944,7 +3068,15 @@ pub mod database_instance { pub mod sql_out_of_disk_report { /// This enum lists all possible states regarding out-of-disk issues. #[derive( - Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration )] #[repr(i32)] pub enum SqlOutOfDiskState { @@ -2980,7 +3112,17 @@ pub mod database_instance { } } /// The current serving state of the database instance. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum SqlInstanceState { /// The state of the instance is unknown. @@ -3040,8 +3182,9 @@ pub mod database_instance { pub struct SqlInstancesRescheduleMaintenanceRequestBody { /// Required. The type of the reschedule the user wants. #[prost(message, optional, tag = "3")] - pub reschedule: - ::core::option::Option, + pub reschedule: ::core::option::Option< + sql_instances_reschedule_maintenance_request_body::Reschedule, + >, } /// Nested message and enum types in `SqlInstancesRescheduleMaintenanceRequestBody`. pub mod sql_instances_reschedule_maintenance_request_body { @@ -3058,7 +3201,17 @@ pub mod sql_instances_reschedule_maintenance_request_body { #[prost(message, optional, tag = "2")] pub schedule_time: ::core::option::Option<::prost_types::Timestamp>, } - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum RescheduleType { Unspecified = 0, @@ -3197,7 +3350,17 @@ pub struct SqlExternalSyncSettingError { } /// Nested message and enum types in `SqlExternalSyncSettingError`. pub mod sql_external_sync_setting_error { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum SqlExternalSyncSettingErrorType { Unspecified = 0, @@ -3280,17 +3443,25 @@ pub mod sql_external_sync_setting_error { SqlExternalSyncSettingErrorType::Unspecified => { "SQL_EXTERNAL_SYNC_SETTING_ERROR_TYPE_UNSPECIFIED" } - SqlExternalSyncSettingErrorType::ConnectionFailure => "CONNECTION_FAILURE", + SqlExternalSyncSettingErrorType::ConnectionFailure => { + "CONNECTION_FAILURE" + } SqlExternalSyncSettingErrorType::BinlogNotEnabled => "BINLOG_NOT_ENABLED", SqlExternalSyncSettingErrorType::IncompatibleDatabaseVersion => { "INCOMPATIBLE_DATABASE_VERSION" } - SqlExternalSyncSettingErrorType::ReplicaAlreadySetup => "REPLICA_ALREADY_SETUP", - SqlExternalSyncSettingErrorType::InsufficientPrivilege => "INSUFFICIENT_PRIVILEGE", + SqlExternalSyncSettingErrorType::ReplicaAlreadySetup => { + "REPLICA_ALREADY_SETUP" + } + SqlExternalSyncSettingErrorType::InsufficientPrivilege => { + "INSUFFICIENT_PRIVILEGE" + } SqlExternalSyncSettingErrorType::UnsupportedMigrationType => { "UNSUPPORTED_MIGRATION_TYPE" } - SqlExternalSyncSettingErrorType::NoPglogicalInstalled => "NO_PGLOGICAL_INSTALLED", + SqlExternalSyncSettingErrorType::NoPglogicalInstalled => { + "NO_PGLOGICAL_INSTALLED" + } SqlExternalSyncSettingErrorType::PglogicalNodeAlreadyExists => { "PGLOGICAL_NODE_ALREADY_EXISTS" } @@ -3307,24 +3478,34 @@ pub mod sql_external_sync_setting_error { SqlExternalSyncSettingErrorType::InsufficientMaxWorkerProcesses => { "INSUFFICIENT_MAX_WORKER_PROCESSES" } - SqlExternalSyncSettingErrorType::UnsupportedExtensions => "UNSUPPORTED_EXTENSIONS", + SqlExternalSyncSettingErrorType::UnsupportedExtensions => { + "UNSUPPORTED_EXTENSIONS" + } SqlExternalSyncSettingErrorType::InvalidRdsLogicalReplication => { "INVALID_RDS_LOGICAL_REPLICATION" } - SqlExternalSyncSettingErrorType::InvalidLoggingSetup => "INVALID_LOGGING_SETUP", + SqlExternalSyncSettingErrorType::InvalidLoggingSetup => { + "INVALID_LOGGING_SETUP" + } SqlExternalSyncSettingErrorType::InvalidDbParam => "INVALID_DB_PARAM", - SqlExternalSyncSettingErrorType::UnsupportedGtidMode => "UNSUPPORTED_GTID_MODE", + SqlExternalSyncSettingErrorType::UnsupportedGtidMode => { + "UNSUPPORTED_GTID_MODE" + } SqlExternalSyncSettingErrorType::SqlserverAgentNotRunning => { "SQLSERVER_AGENT_NOT_RUNNING" } SqlExternalSyncSettingErrorType::UnsupportedTableDefinition => { "UNSUPPORTED_TABLE_DEFINITION" } - SqlExternalSyncSettingErrorType::UnsupportedDefiner => "UNSUPPORTED_DEFINER", + SqlExternalSyncSettingErrorType::UnsupportedDefiner => { + "UNSUPPORTED_DEFINER" + } SqlExternalSyncSettingErrorType::SqlserverServernameMismatch => { "SQLSERVER_SERVERNAME_MISMATCH" } - SqlExternalSyncSettingErrorType::PrimaryAlreadySetup => "PRIMARY_ALREADY_SETUP", + SqlExternalSyncSettingErrorType::PrimaryAlreadySetup => { + "PRIMARY_ALREADY_SETUP" + } SqlExternalSyncSettingErrorType::UnsupportedBinlogFormat => { "UNSUPPORTED_BINLOG_FORMAT" } @@ -3334,7 +3515,9 @@ pub mod sql_external_sync_setting_error { SqlExternalSyncSettingErrorType::UnsupportedStorageEngine => { "UNSUPPORTED_STORAGE_ENGINE" } - SqlExternalSyncSettingErrorType::LimitedSupportTables => "LIMITED_SUPPORT_TABLES", + SqlExternalSyncSettingErrorType::LimitedSupportTables => { + "LIMITED_SUPPORT_TABLES" + } SqlExternalSyncSettingErrorType::ExistingDataInReplica => { "EXISTING_DATA_IN_REPLICA" } @@ -3356,29 +3539,43 @@ pub mod sql_external_sync_setting_error { /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { - "SQL_EXTERNAL_SYNC_SETTING_ERROR_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "SQL_EXTERNAL_SYNC_SETTING_ERROR_TYPE_UNSPECIFIED" => { + Some(Self::Unspecified) + } "CONNECTION_FAILURE" => Some(Self::ConnectionFailure), "BINLOG_NOT_ENABLED" => Some(Self::BinlogNotEnabled), - "INCOMPATIBLE_DATABASE_VERSION" => Some(Self::IncompatibleDatabaseVersion), + "INCOMPATIBLE_DATABASE_VERSION" => { + Some(Self::IncompatibleDatabaseVersion) + } "REPLICA_ALREADY_SETUP" => Some(Self::ReplicaAlreadySetup), "INSUFFICIENT_PRIVILEGE" => Some(Self::InsufficientPrivilege), "UNSUPPORTED_MIGRATION_TYPE" => Some(Self::UnsupportedMigrationType), "NO_PGLOGICAL_INSTALLED" => Some(Self::NoPglogicalInstalled), "PGLOGICAL_NODE_ALREADY_EXISTS" => Some(Self::PglogicalNodeAlreadyExists), "INVALID_WAL_LEVEL" => Some(Self::InvalidWalLevel), - "INVALID_SHARED_PRELOAD_LIBRARY" => Some(Self::InvalidSharedPreloadLibrary), - "INSUFFICIENT_MAX_REPLICATION_SLOTS" => Some(Self::InsufficientMaxReplicationSlots), + "INVALID_SHARED_PRELOAD_LIBRARY" => { + Some(Self::InvalidSharedPreloadLibrary) + } + "INSUFFICIENT_MAX_REPLICATION_SLOTS" => { + Some(Self::InsufficientMaxReplicationSlots) + } "INSUFFICIENT_MAX_WAL_SENDERS" => Some(Self::InsufficientMaxWalSenders), - "INSUFFICIENT_MAX_WORKER_PROCESSES" => Some(Self::InsufficientMaxWorkerProcesses), + "INSUFFICIENT_MAX_WORKER_PROCESSES" => { + Some(Self::InsufficientMaxWorkerProcesses) + } "UNSUPPORTED_EXTENSIONS" => Some(Self::UnsupportedExtensions), - "INVALID_RDS_LOGICAL_REPLICATION" => Some(Self::InvalidRdsLogicalReplication), + "INVALID_RDS_LOGICAL_REPLICATION" => { + Some(Self::InvalidRdsLogicalReplication) + } "INVALID_LOGGING_SETUP" => Some(Self::InvalidLoggingSetup), "INVALID_DB_PARAM" => Some(Self::InvalidDbParam), "UNSUPPORTED_GTID_MODE" => Some(Self::UnsupportedGtidMode), "SQLSERVER_AGENT_NOT_RUNNING" => Some(Self::SqlserverAgentNotRunning), "UNSUPPORTED_TABLE_DEFINITION" => Some(Self::UnsupportedTableDefinition), "UNSUPPORTED_DEFINER" => Some(Self::UnsupportedDefiner), - "SQLSERVER_SERVERNAME_MISMATCH" => Some(Self::SqlserverServernameMismatch), + "SQLSERVER_SERVERNAME_MISMATCH" => { + Some(Self::SqlserverServernameMismatch) + } "PRIMARY_ALREADY_SETUP" => Some(Self::PrimaryAlreadySetup), "UNSUPPORTED_BINLOG_FORMAT" => Some(Self::UnsupportedBinlogFormat), "BINLOG_RETENTION_SETTING" => Some(Self::BinlogRetentionSetting), @@ -3389,7 +3586,9 @@ pub mod sql_external_sync_setting_error { "RISKY_BACKUP_ADMIN_PRIVILEGE" => Some(Self::RiskyBackupAdminPrivilege), "INSUFFICIENT_GCS_PERMISSIONS" => Some(Self::InsufficientGcsPermissions), "INVALID_FILE_INFO" => Some(Self::InvalidFileInfo), - "UNSUPPORTED_DATABASE_SETTINGS" => Some(Self::UnsupportedDatabaseSettings), + "UNSUPPORTED_DATABASE_SETTINGS" => { + Some(Self::UnsupportedDatabaseSettings) + } _ => None, } } @@ -3536,8 +3735,8 @@ impl SqlSuspensionReason { /// Generated client implementations. pub mod sql_instances_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::http::Uri; use tonic::codegen::*; + use tonic::codegen::http::Uri; /// Service to manage Cloud SQL instances. #[derive(Debug, Clone)] pub struct SqlInstancesServiceClient { @@ -3582,8 +3781,9 @@ pub mod sql_instances_service_client { >::ResponseBody, >, >, - >>::Error: - Into + Send + Sync, + , + >>::Error: Into + Send + Sync, { SqlInstancesServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -3627,21 +3827,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/AddServerCa", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "AddServerCa", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "AddServerCa", + ), + ); self.inner.unary(req, path, codec).await } /// Creates a Cloud SQL instance as a clone of the source instance. Using this @@ -3650,21 +3856,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Clone", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Clone", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Clone"), + ); self.inner.unary(req, path, codec).await } /// Deletes a Cloud SQL instance. @@ -3672,21 +3881,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Delete", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Delete", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Delete"), + ); self.inner.unary(req, path, codec).await } /// Demotes the stand-alone instance to be a Cloud SQL read replica for an @@ -3695,21 +3907,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/DemoteMaster", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "DemoteMaster", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "DemoteMaster", + ), + ); self.inner.unary(req, path, codec).await } /// Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL @@ -3718,21 +3936,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Export", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Export", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Export"), + ); self.inner.unary(req, path, codec).await } /// Initiates a manual failover of a high availability (HA) primary instance @@ -3747,21 +3968,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Failover", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Failover", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "Failover", + ), + ); self.inner.unary(req, path, codec).await } /// Reencrypt CMEK instance with latest key version. @@ -3769,43 +3996,55 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Reencrypt", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Reencrypt", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "Reencrypt", + ), + ); self.inner.unary(req, path, codec).await } /// Retrieves a resource containing information about a Cloud SQL instance. pub async fn get( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Get", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Get", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Get"), + ); self.inner.unary(req, path, codec).await } /// Imports data into a Cloud SQL instance from a SQL dump or CSV file in @@ -3814,21 +4053,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Import", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Import", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Import"), + ); self.inner.unary(req, path, codec).await } /// Creates a new Cloud SQL instance. @@ -3836,44 +4078,52 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Insert", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Insert", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Insert"), + ); self.inner.unary(req, path, codec).await } /// Lists instances under a given project. pub async fn list( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/List", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "List", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "List"), + ); self.inner.unary(req, path, codec).await } /// Lists all of the trusted Certificate Authorities (CAs) for the specified @@ -3888,21 +4138,27 @@ pub mod sql_instances_service_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/ListServerCas", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "ListServerCas", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "ListServerCas", + ), + ); self.inner.unary(req, path, codec).await } /// Partially updates settings of a Cloud SQL instance by merging the request @@ -3911,21 +4167,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Patch", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Patch", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Patch"), + ); self.inner.unary(req, path, codec).await } /// Promotes the read replica instance to be a stand-alone Cloud SQL instance. @@ -3934,21 +4193,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/PromoteReplica", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "PromoteReplica", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "PromoteReplica", + ), + ); self.inner.unary(req, path, codec).await } /// Deletes all client certificates and generates a new server SSL certificate @@ -3957,21 +4222,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/ResetSslConfig", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "ResetSslConfig", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "ResetSslConfig", + ), + ); self.inner.unary(req, path, codec).await } /// Restarts a Cloud SQL instance. @@ -3979,21 +4250,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Restart", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Restart", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Restart"), + ); self.inner.unary(req, path, codec).await } /// Restores a backup of a Cloud SQL instance. Using this operation might cause @@ -4002,21 +4276,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/RestoreBackup", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "RestoreBackup", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "RestoreBackup", + ), + ); self.inner.unary(req, path, codec).await } /// Rotates the server certificate to one signed by the Certificate Authority @@ -4025,21 +4305,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/RotateServerCa", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "RotateServerCa", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "RotateServerCa", + ), + ); self.inner.unary(req, path, codec).await } /// Starts the replication in the read replica instance. @@ -4047,21 +4333,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/StartReplica", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "StartReplica", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "StartReplica", + ), + ); self.inner.unary(req, path, codec).await } /// Stops the replication in the read replica instance. @@ -4069,21 +4361,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/StopReplica", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "StopReplica", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "StopReplica", + ), + ); self.inner.unary(req, path, codec).await } /// Truncate MySQL general and slow query log tables @@ -4092,21 +4390,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/TruncateLog", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "TruncateLog", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "TruncateLog", + ), + ); self.inner.unary(req, path, codec).await } /// Updates settings of a Cloud SQL instance. Using this operation might cause @@ -4115,21 +4419,24 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/Update", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "Update", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Update"), + ); self.inner.unary(req, path, codec).await } /// Generates a short-lived X509 certificate containing the provided public key @@ -4138,70 +4445,94 @@ pub mod sql_instances_service_client { /// database. pub async fn create_ephemeral( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::SqlInstancesCreateEphemeralCertRequest, + >, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/CreateEphemeral", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "CreateEphemeral", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "CreateEphemeral", + ), + ); self.inner.unary(req, path, codec).await } /// Reschedules the maintenance on the given instance. pub async fn reschedule_maintenance( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::SqlInstancesRescheduleMaintenanceRequest, + >, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/RescheduleMaintenance", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "RescheduleMaintenance", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "RescheduleMaintenance", + ), + ); self.inner.unary(req, path, codec).await } /// Verify External primary instance external sync settings. pub async fn verify_external_sync_settings( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::SqlInstancesVerifyExternalSyncSettingsRequest, + >, ) -> std::result::Result< tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/VerifyExternalSyncSettings", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "VerifyExternalSyncSettings", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "VerifyExternalSyncSettings", + ), + ); self.inner.unary(req, path, codec).await } /// Start External primary instance migration. @@ -4209,21 +4540,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/StartExternalSync", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "StartExternalSync", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "StartExternalSync", + ), + ); self.inner.unary(req, path, codec).await } /// Perform Disk Shrink on primary instance. @@ -4231,46 +4568,60 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/PerformDiskShrink", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "PerformDiskShrink", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "PerformDiskShrink", + ), + ); self.inner.unary(req, path, codec).await } /// Get Disk Shrink Config for a given instance. pub async fn get_disk_shrink_config( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::SqlInstancesGetDiskShrinkConfigRequest, + >, ) -> std::result::Result< tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/GetDiskShrinkConfig", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "GetDiskShrinkConfig", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "GetDiskShrinkConfig", + ), + ); self.inner.unary(req, path, codec).await } /// Reset Replica Size to primary instance disk size. @@ -4278,21 +4629,27 @@ pub mod sql_instances_service_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.sql.v1.SqlInstancesService/ResetReplicaSize", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "google.cloud.sql.v1.SqlInstancesService", - "ResetReplicaSize", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "ResetReplicaSize", + ), + ); self.inner.unary(req, path, codec).await } } From d8e23c909b7cb79d58bde2a873d36d88ee5ee12d Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Mon, 11 Mar 2024 08:09:15 +0000 Subject: [PATCH 03/10] style: clippy fixes --- cli/src/commands/application/init.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/src/commands/application/init.rs b/cli/src/commands/application/init.rs index ef2e806d..cf962f6f 100644 --- a/cli/src/commands/application/init.rs +++ b/cli/src/commands/application/init.rs @@ -29,7 +29,7 @@ pub async fn handle_application_init(context: Context) -> Result Date: Mon, 11 Mar 2024 08:15:25 +0000 Subject: [PATCH 04/10] style: format the code --- cli/src/wukong_client.rs | 10 +++++++++- sdk/src/graphql/mod.rs | 30 +++++++++++++++--------------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/cli/src/wukong_client.rs b/cli/src/wukong_client.rs index 323e7634..92927d7b 100644 --- a/cli/src/wukong_client.rs +++ b/cli/src/wukong_client.rs @@ -7,7 +7,15 @@ use log::debug; use std::collections::HashMap; use wukong_sdk::{ graphql::{ - application_config_query, application_query, application_with_k8s_cluster_query, applications_query, appsignal_apps_query, appsignal_average_error_rate_query, appsignal_average_latency_query, appsignal_average_throughput_query, appsignal_deploy_markers_query, appsignal_exception_incidents_query, cd_pipeline_for_rollback_query, cd_pipeline_github_query, cd_pipeline_query, cd_pipelines_query, changelogs_query, ci_status_query, deploy_livebook, deployment::cd_pipeline_status_query, destroy_livebook, execute_cd_pipeline, is_authorized_query, kubernetes_pods_query, livebook_resource_query, multi_branch_pipeline_query, pipeline_query, pipelines_query, AppsignalTimeFrame + application_config_query, application_query, application_with_k8s_cluster_query, + applications_query, appsignal_apps_query, appsignal_average_error_rate_query, + appsignal_average_latency_query, appsignal_average_throughput_query, + appsignal_deploy_markers_query, appsignal_exception_incidents_query, + cd_pipeline_for_rollback_query, cd_pipeline_github_query, cd_pipeline_query, + cd_pipelines_query, changelogs_query, ci_status_query, deploy_livebook, + deployment::cd_pipeline_status_query, destroy_livebook, execute_cd_pipeline, + is_authorized_query, kubernetes_pods_query, livebook_resource_query, + multi_branch_pipeline_query, pipeline_query, pipelines_query, AppsignalTimeFrame, }, services::{ gcloud::{DatabaseMetrics, LogEntries, LogEntriesOptions, TokenInfo}, diff --git a/sdk/src/graphql/mod.rs b/sdk/src/graphql/mod.rs index da3b2206..785b2bd3 100644 --- a/sdk/src/graphql/mod.rs +++ b/sdk/src/graphql/mod.rs @@ -751,21 +751,21 @@ impl WKClient { } pub async fn fetch_application_config( - &self, - name: &str, - ) -> Result { - let gql_client = setup_gql_client(&self.access_token, &self.channel)?; - - gql_client - .post_graphql::( - &self.api_url, - application_config_query::Variables { - name: name.to_string(), - }, - ) - .await - .map_err(|err| err.into()) - } + &self, + name: &str, + ) -> Result { + let gql_client = setup_gql_client(&self.access_token, &self.channel)?; + + gql_client + .post_graphql::( + &self.api_url, + application_config_query::Variables { + name: name.to_string(), + }, + ) + .await + .map_err(|err| err.into()) + } /// Fetch the deploy markers from Appsignal /// the default value for `limit` is 1 From ff6eec52b1db4e9a91d8eeabb7f905a7f8661091 Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Tue, 12 Mar 2024 08:39:39 +0000 Subject: [PATCH 05/10] feat: change function name to has_application_config --- cli/src/commands/application/init.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/src/commands/application/init.rs b/cli/src/commands/application/init.rs index cf962f6f..fc0f6a5c 100644 --- a/cli/src/commands/application/init.rs +++ b/cli/src/commands/application/init.rs @@ -42,7 +42,7 @@ pub async fn handle_application_init(context: Context) -> Result Result, WKCliError> { Ok(workflow_names) } -async fn get_application_config(wk_client: &mut WKClient, name: &str) -> Result { +async fn has_application_config(wk_client: &mut WKClient, name: &str) -> Result { let application_config = match wk_client.fetch_application_config(name).await { Ok(resp) => Ok(resp), Err(err) => match &err { From 3d0c18fe4ea960c066082b3c9d39f6ab1a087c56 Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Tue, 12 Mar 2024 08:40:28 +0000 Subject: [PATCH 06/10] feat: removed unused fields while fetching application config --- .../graphql/query/application_config.graphql | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/sdk/src/graphql/query/application_config.graphql b/sdk/src/graphql/query/application_config.graphql index 3e423c36..31c3bde3 100644 --- a/sdk/src/graphql/query/application_config.graphql +++ b/sdk/src/graphql/query/application_config.graphql @@ -1,34 +1,4 @@ query ApplicationConfigQuery($name: String!) { applicationConfig(name: $name) { name - enable - namespaces { - appsignal { - defaultNamespace - enable - environment - } - build { - buildWorkflow - } - cloudsql { - enable - projectId - } - delivery { - baseReplica - rolloutStrategy - target - } - honeycomb { - dataset - enable - } - type - } - workflows { - excludedWorkflows - provider - } - } } From fe95fe2b3029a4474e9d76095823482b0c3e83bc Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Tue, 12 Mar 2024 08:48:42 +0000 Subject: [PATCH 07/10] feat: use is_some at the end of the function --- cli/src/commands/application/init.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cli/src/commands/application/init.rs b/cli/src/commands/application/init.rs index fc0f6a5c..dd8668b6 100644 --- a/cli/src/commands/application/init.rs +++ b/cli/src/commands/application/init.rs @@ -42,11 +42,11 @@ pub async fn handle_application_init(context: Context) -> Result Result, WKCliError> { Ok(workflow_names) } -async fn has_application_config(wk_client: &mut WKClient, name: &str) -> Result { +async fn get_application_config(wk_client: &mut WKClient, name: &str) -> Result { let application_config = match wk_client.fetch_application_config(name).await { Ok(resp) => Ok(resp), Err(err) => match &err { From f4d41d8d8b36ff0f8b723f7c3563f9b8c2b4aaee Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Tue, 12 Mar 2024 08:55:36 +0000 Subject: [PATCH 08/10] feat: updated logic to use is_some --- cli/src/commands/application/init.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cli/src/commands/application/init.rs b/cli/src/commands/application/init.rs index dd8668b6..3db613e0 100644 --- a/cli/src/commands/application/init.rs +++ b/cli/src/commands/application/init.rs @@ -19,7 +19,10 @@ use inquire::{required, CustomType}; use std::fs; use wukong_sdk::{ error::{APIError, WKError}, - graphql::{application_config_query, appsignal_apps_query::AppsignalAppsQueryAppsignalApps}, + graphql::{ + application_config_query::{self, ApplicationConfigQueryApplicationConfig}, + appsignal_apps_query::AppsignalAppsQueryAppsignalApps, + }, }; pub async fn handle_application_init(context: Context) -> Result { @@ -42,7 +45,9 @@ pub async fn handle_application_init(context: Context) -> Result Result, WKCliError> { Ok(workflow_names) } -async fn get_application_config(wk_client: &mut WKClient, name: &str) -> Result { +async fn get_application_config( + wk_client: &mut WKClient, + name: &str, +) -> Result, WKCliError> { let application_config = match wk_client.fetch_application_config(name).await { Ok(resp) => Ok(resp), Err(err) => match &err { @@ -335,5 +343,5 @@ async fn get_application_config(wk_client: &mut WKClient, name: &str) -> Result< }? .application_config; - Ok(application_config.is_some()) + Ok(application_config) } From e680caa0114f58fd0f9b3e536d0ccc624bcb34b9 Mon Sep 17 00:00:00 2001 From: Mohamed Fauzaan Date: Tue, 12 Mar 2024 08:57:02 +0000 Subject: [PATCH 09/10] fix: add missing bracket --- sdk/src/graphql/query/application_config.graphql | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/src/graphql/query/application_config.graphql b/sdk/src/graphql/query/application_config.graphql index 31c3bde3..30828e91 100644 --- a/sdk/src/graphql/query/application_config.graphql +++ b/sdk/src/graphql/query/application_config.graphql @@ -1,4 +1,5 @@ query ApplicationConfigQuery($name: String!) { applicationConfig(name: $name) { name + } } From 78e4a49a45f412d901344036ce5bcb041f459f58 Mon Sep 17 00:00:00 2001 From: Alex Co Date: Thu, 14 Mar 2024 11:45:38 +0800 Subject: [PATCH 10/10] Update generated code from googleapis Signed-off-by: Alex Co --- sdk/proto/googleapis | 2 +- sdk/src/services/gcloud/api/google.api.rs | 28 + .../gcloud/api/google.cloud.sql.v1.rs | 539 ++++++++++++++++-- 3 files changed, 512 insertions(+), 57 deletions(-) diff --git a/sdk/proto/googleapis b/sdk/proto/googleapis index a3770599..738ff24c 160000 --- a/sdk/proto/googleapis +++ b/sdk/proto/googleapis @@ -1 +1 @@ -Subproject commit a3770599794a8d319286df96f03343b6cd0e7f4f +Subproject commit 738ff24cb9c00be062dc200c10426df7b13d1e65 diff --git a/sdk/src/services/gcloud/api/google.api.rs b/sdk/src/services/gcloud/api/google.api.rs index e88eddc5..ec4c5566 100644 --- a/sdk/src/services/gcloud/api/google.api.rs +++ b/sdk/src/services/gcloud/api/google.api.rs @@ -719,6 +719,19 @@ pub struct MethodSettings { /// seconds: 54000 # 90 minutes #[prost(message, optional, tag = "2")] pub long_running: ::core::option::Option, + /// List of top-level fields of the request message, that should be + /// automatically populated by the client libraries based on their + /// (google.api.field_info).format. Currently supported format: UUID4. + /// + /// Example of a YAML configuration: + /// + /// publishing: + /// method_settings: + /// - selector: google.example.v1.ExampleService.CreateExample + /// auto_populated_fields: + /// - request_id + #[prost(string, repeated, tag = "3")] + pub auto_populated_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `MethodSettings`. pub mod method_settings { @@ -884,6 +897,19 @@ pub enum FieldBehavior { /// a non-empty value will be returned. The user will not be aware of what /// non-empty value to expect. NonEmptyDefault = 7, + /// Denotes that the field in a resource (a message annotated with + /// google.api.resource) is used in the resource name to uniquely identify the + /// resource. For AIP-compliant APIs, this should only be applied to the + /// `name` field on the resource. + /// + /// This behavior should not be applied to references to other resources within + /// the message. + /// + /// The identifier field of resources often have different field behavior + /// depending on the request it is embedded in (e.g. for Create methods name + /// is optional and unused, while for Update methods it is required). Instead + /// of method-specific annotations, only `IDENTIFIER` is required. + Identifier = 8, } impl FieldBehavior { /// String value of the enum field names used in the ProtoBuf definition. @@ -900,6 +926,7 @@ impl FieldBehavior { FieldBehavior::Immutable => "IMMUTABLE", FieldBehavior::UnorderedList => "UNORDERED_LIST", FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", + FieldBehavior::Identifier => "IDENTIFIER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -913,6 +940,7 @@ impl FieldBehavior { "IMMUTABLE" => Some(Self::Immutable), "UNORDERED_LIST" => Some(Self::UnorderedList), "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), + "IDENTIFIER" => Some(Self::Identifier), _ => None, } } diff --git a/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs b/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs index f193de91..731b8c56 100644 --- a/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs +++ b/sdk/src/services/gcloud/api/google.cloud.sql.v1.rs @@ -54,6 +54,12 @@ pub mod api_warning { /// Warning when user provided maxResults parameter exceeds the limit. The /// returned result set may be incomplete. MaxResultsExceedsLimit = 2, + /// Warning when user tries to create/update a user with credentials that + /// have previously been compromised by a public data breach. + CompromisedCredentials = 3, + /// Warning when the operation succeeds but some non-critical workflow state + /// failed. + InternalStateFailure = 4, } impl SqlApiWarningCode { /// String value of the enum field names used in the ProtoBuf definition. @@ -65,6 +71,8 @@ pub mod api_warning { SqlApiWarningCode::Unspecified => "SQL_API_WARNING_CODE_UNSPECIFIED", SqlApiWarningCode::RegionUnreachable => "REGION_UNREACHABLE", SqlApiWarningCode::MaxResultsExceedsLimit => "MAX_RESULTS_EXCEEDS_LIMIT", + SqlApiWarningCode::CompromisedCredentials => "COMPROMISED_CREDENTIALS", + SqlApiWarningCode::InternalStateFailure => "INTERNAL_STATE_FAILURE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -73,6 +81,8 @@ pub mod api_warning { "SQL_API_WARNING_CODE_UNSPECIFIED" => Some(Self::Unspecified), "REGION_UNREACHABLE" => Some(Self::RegionUnreachable), "MAX_RESULTS_EXCEEDS_LIMIT" => Some(Self::MaxResultsExceedsLimit), + "COMPROMISED_CREDENTIALS" => Some(Self::CompromisedCredentials), + "INTERNAL_STATE_FAILURE" => Some(Self::InternalStateFailure), _ => None, } } @@ -550,6 +560,17 @@ pub mod import_context { /// Type of the bak content, FULL or DIFF #[prost(enumeration = "super::BakType", tag = "6")] pub bak_type: i32, + /// Optional. The timestamp when the import should stop. This timestamp is in + /// the [RFC 3339]() format (for example, + /// `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT + /// keyword and applies to Cloud SQL for SQL Server only. + #[prost(message, optional, tag = "7")] + pub stop_at: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The marked transaction where the import should stop. This field + /// is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL + /// Server only. + #[prost(string, tag = "8")] + pub stop_at_mark: ::prost::alloc::string::String, } /// Nested message and enum types in `SqlBakImportOptions`. pub mod sql_bak_import_options { @@ -585,7 +606,15 @@ pub struct IpConfiguration { /// be updated, but it cannot be removed after it is set. #[prost(string, tag = "2")] pub private_network: ::prost::alloc::string::String, - /// Whether SSL connections over IP are enforced or not. + /// Use `ssl_mode` instead for MySQL and PostgreSQL. SQL Server uses this flag. + /// + /// Whether SSL/TLS connections over IP are enforced. + /// If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. + /// For SSL/TLS connections, the client certificate won't be verified. If + /// set to true, then only allow connections encrypted with SSL/TLS and with + /// valid client certificates. If you want to enforce SSL/TLS without enforcing + /// the requirement for valid client certificates, then use the `ssl_mode` flag + /// instead of the `require_ssl` flag. #[prost(message, optional, tag = "3")] pub require_ssl: ::core::option::Option, /// The list of external networks that are allowed to connect to the instance @@ -605,6 +634,112 @@ pub struct IpConfiguration { /// such as BigQuery. #[prost(message, optional, tag = "7")] pub enable_private_path_for_google_cloud_services: ::core::option::Option, + /// Specify how SSL/TLS is enforced in database connections. MySQL and + /// PostgreSQL use the `ssl_mode` flag. If you must use the `require_ssl` flag + /// for backward compatibility, then only the following value pairs are valid: + /// + /// * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` + /// * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` + /// * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` + /// + /// The value of `ssl_mode` gets priority over the value of `require_ssl`. For + /// example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, + /// the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the + /// `require_ssl=false` means accept both non-SSL and SSL connections. MySQL + /// and PostgreSQL databases respect `ssl_mode` in this case and accept only + /// SSL connections. + /// + /// SQL Server uses the `require_ssl` flag. You can set the value for this flag + /// to `true` or `false`. + #[prost(enumeration = "ip_configuration::SslMode", tag = "8")] + pub ssl_mode: i32, + /// PSC settings for this instance. + #[prost(message, optional, tag = "9")] + pub psc_config: ::core::option::Option, +} +/// Nested message and enum types in `IpConfiguration`. +pub mod ip_configuration { + /// The SSL options for database connections. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum SslMode { + /// The SSL mode is unknown. + Unspecified = 0, + /// Allow non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, + /// the client certificate won't be verified. + /// When this value is used, the legacy `require_ssl` flag must be false or + /// cleared to avoid the conflict between values of two flags. + AllowUnencryptedAndEncrypted = 1, + /// Only allow connections encrypted with SSL/TLS. + /// When this value is used, the legacy `require_ssl` flag must be false or + /// cleared to avoid the conflict between values of two flags. + EncryptedOnly = 2, + /// Only allow connections encrypted with SSL/TLS and with valid + /// client certificates. + /// When this value is used, the legacy `require_ssl` flag must be true or + /// cleared to avoid the conflict between values of two flags. + TrustedClientCertificateRequired = 3, + } + impl SslMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SslMode::Unspecified => "SSL_MODE_UNSPECIFIED", + SslMode::AllowUnencryptedAndEncrypted => { + "ALLOW_UNENCRYPTED_AND_ENCRYPTED" + } + SslMode::EncryptedOnly => "ENCRYPTED_ONLY", + SslMode::TrustedClientCertificateRequired => { + "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" + } + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SSL_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "ALLOW_UNENCRYPTED_AND_ENCRYPTED" => { + Some(Self::AllowUnencryptedAndEncrypted) + } + "ENCRYPTED_ONLY" => Some(Self::EncryptedOnly), + "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" => { + Some(Self::TrustedClientCertificateRequired) + } + _ => None, + } + } + } +} +/// PSC settings for a Cloud SQL instance. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PscConfig { + /// Whether PSC connectivity is enabled for this instance. + #[prost(bool, optional, tag = "1")] + pub psc_enabled: ::core::option::Option, + /// Optional. The list of consumer projects that are allow-listed for PSC + /// connections to this instance. This instance can be connected to with PSC + /// from any network in these projects. + /// + /// Each consumer project in this list may be represented by a project number + /// (numeric) or by a project id (alphanumeric). + #[prost(string, repeated, tag = "2")] + pub allowed_consumer_projects: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, } /// Preferred location. This specifies where a Cloud SQL instance is located. /// Note that if the preferred location is not available, the instance will be @@ -624,6 +759,7 @@ pub struct LocationPreference { pub zone: ::prost::alloc::string::String, /// The preferred Compute Engine zone for the secondary/failover /// (for example: us-central1-a, us-central1-b, etc.). + /// To disable this field, set it to 'no_secondary_zone'. #[prost(string, tag = "4")] pub secondary_zone: ::prost::alloc::string::String, /// This is always `sql#locationPreference`. @@ -765,7 +901,7 @@ pub struct DiskEncryptionStatus { #[prost(string, tag = "2")] pub kind: ::prost::alloc::string::String, } -/// Database instance IP Mapping. +/// Database instance IP mapping #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IpMapping { @@ -821,6 +957,9 @@ pub struct Operation { /// populated. #[prost(message, optional, tag = "8")] pub error: ::core::option::Option, + /// An Admin API warning message. + #[prost(message, optional, tag = "19")] + pub api_warning: ::core::option::Option, /// The type of the operation. Valid values are: /// /// * `CREATE` @@ -1148,6 +1287,10 @@ pub struct PasswordValidationPolicy { /// Whether the password policy is enabled or not. #[prost(message, optional, tag = "6")] pub enable_password_policy: ::core::option::Option, + /// Disallow credentials that have been previously compromised by a public data + /// breach. + #[prost(message, optional, tag = "7")] + pub disallow_compromised_credentials: ::core::option::Option, } /// Nested message and enum types in `PasswordValidationPolicy`. pub mod password_validation_policy { @@ -1615,12 +1758,14 @@ impl SqlFileType { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum BakType { - /// default type. + /// Default type. Unspecified = 0, /// Full backup. Full = 1, /// Differential backup. Diff = 2, + /// Transaction Log backup + Tlog = 3, } impl BakType { /// String value of the enum field names used in the ProtoBuf definition. @@ -1632,6 +1777,7 @@ impl BakType { BakType::Unspecified => "BAK_TYPE_UNSPECIFIED", BakType::Full => "FULL", BakType::Diff => "DIFF", + BakType::Tlog => "TLOG", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1640,6 +1786,7 @@ impl BakType { "BAK_TYPE_UNSPECIFIED" => Some(Self::Unspecified), "FULL" => Some(Self::Full), "DIFF" => Some(Self::Diff), + "TLOG" => Some(Self::Tlog), _ => None, } } @@ -2048,6 +2195,11 @@ pub enum SqlUpdateTrack { /// your instance prefer to let Cloud SQL choose the timing of restart (within /// its Maintenance window, if applicable). Stable = 2, + /// For instance update that requires a restart, this update track indicates + /// your instance prefer to let Cloud SQL choose the timing of restart (within + /// its Maintenance window, if applicable) to be at least 5 weeks after the + /// notification. + Week5 = 3, } impl SqlUpdateTrack { /// String value of the enum field names used in the ProtoBuf definition. @@ -2059,6 +2211,7 @@ impl SqlUpdateTrack { SqlUpdateTrack::Unspecified => "SQL_UPDATE_TRACK_UNSPECIFIED", SqlUpdateTrack::Canary => "canary", SqlUpdateTrack::Stable => "stable", + SqlUpdateTrack::Week5 => "week5", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -2067,6 +2220,7 @@ impl SqlUpdateTrack { "SQL_UPDATE_TRACK_UNSPECIFIED" => Some(Self::Unspecified), "canary" => Some(Self::Canary), "stable" => Some(Self::Stable), + "week5" => Some(Self::Week5), _ => None, } } @@ -2120,6 +2274,20 @@ pub struct SqlInstancesDemoteMasterRequest { #[prost(message, optional, tag = "100")] pub body: ::core::option::Option, } +/// Instance demote request. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SqlInstancesDemoteRequest { + /// Required. Cloud SQL instance name. + #[prost(string, tag = "1")] + pub instance: ::prost::alloc::string::String, + /// Required. ID of the project that contains the instance. + #[prost(string, tag = "2")] + pub project: ::prost::alloc::string::String, + /// Required. The request body. + #[prost(message, optional, tag = "100")] + pub body: ::core::option::Option, +} /// Instance export request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2244,6 +2412,27 @@ pub struct SqlInstancesPromoteReplicaRequest { /// ID of the project that contains the read replica. #[prost(string, tag = "2")] pub project: ::prost::alloc::string::String, + /// Set to true if the promote operation should attempt to re-add the original + /// primary as a replica when it comes back online. Otherwise, if this value is + /// false or not set, the original primary will be a standalone instance. + #[prost(bool, tag = "3")] + pub failover: bool, +} +/// Instance switchover request. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SqlInstancesSwitchoverRequest { + /// Cloud SQL read replica instance name. + #[prost(string, tag = "1")] + pub instance: ::prost::alloc::string::String, + /// ID of the project that contains the replica. + #[prost(string, tag = "2")] + pub project: ::prost::alloc::string::String, + /// Optional. (MySQL only) Cloud SQL instance operations timeout, which is a + /// sum of all database operations. Default value is 10 minutes and can be + /// modified to a maximum value of 24 hours. + #[prost(message, optional, tag = "3")] + pub db_timeout: ::core::option::Option<::prost_types::Duration>, } /// Instance reset SSL config request. #[allow(clippy::derive_partial_eq_without_eq)] @@ -2563,10 +2752,7 @@ pub struct SqlInstancesStartExternalSyncRequest { pub skip_verification: bool, /// Optional. Parallel level for initial data sync. Currently only applicable /// for MySQL. - #[prost( - enumeration = "sql_instances_start_external_sync_request::ExternalSyncParallelLevel", - tag = "7" - )] + #[prost(enumeration = "ExternalSyncParallelLevel", tag = "7")] pub sync_parallel_level: i32, #[prost(oneof = "sql_instances_start_external_sync_request::SyncConfig", tags = "6")] pub sync_config: ::core::option::Option< @@ -2575,55 +2761,6 @@ pub struct SqlInstancesStartExternalSyncRequest { } /// Nested message and enum types in `SqlInstancesStartExternalSyncRequest`. pub mod sql_instances_start_external_sync_request { - /// External Sync parallel level. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum ExternalSyncParallelLevel { - /// Unknown sync parallel level. Will be defaulted to OPTIMAL. - Unspecified = 0, - /// Minimal parallel level. - Min = 1, - /// Optimal parallel level. - Optimal = 2, - /// Maximum parallel level. - Max = 3, - } - impl ExternalSyncParallelLevel { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ExternalSyncParallelLevel::Unspecified => { - "EXTERNAL_SYNC_PARALLEL_LEVEL_UNSPECIFIED" - } - ExternalSyncParallelLevel::Min => "MIN", - ExternalSyncParallelLevel::Optimal => "OPTIMAL", - ExternalSyncParallelLevel::Max => "MAX", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "EXTERNAL_SYNC_PARALLEL_LEVEL_UNSPECIFIED" => Some(Self::Unspecified), - "MIN" => Some(Self::Min), - "OPTIMAL" => Some(Self::Optimal), - "MAX" => Some(Self::Max), - _ => None, - } - } - } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SyncConfig { @@ -2672,6 +2809,15 @@ pub struct InstancesDemoteMasterRequest { #[prost(message, optional, tag = "1")] pub demote_master_context: ::core::option::Option, } +/// This request is used to demote an existing standalone instance to be a +/// Cloud SQL read replica for an external database server. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstancesDemoteRequest { + /// Required. Contains details about the demote operation. + #[prost(message, optional, tag = "1")] + pub demote_context: ::core::option::Option, +} /// Database instance export request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2790,6 +2936,28 @@ pub struct SqlInstancesGetDiskShrinkConfigResponse { #[prost(string, tag = "3")] pub message: ::prost::alloc::string::String, } +/// Instance get latest recovery time request. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SqlInstancesGetLatestRecoveryTimeRequest { + /// Cloud SQL instance ID. This does not include the project ID. + #[prost(string, tag = "1")] + pub instance: ::prost::alloc::string::String, + /// Project ID of the project that contains the instance. + #[prost(string, tag = "2")] + pub project: ::prost::alloc::string::String, +} +/// Instance get latest recovery time response. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SqlInstancesGetLatestRecoveryTimeResponse { + /// This is always `sql#getLatestRecoveryTime`. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// Timestamp, identifies the latest recovery time of the source instance. + #[prost(message, optional, tag = "2")] + pub latest_recovery_time: ::core::option::Option<::prost_types::Timestamp>, +} /// Database instance clone context. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2825,6 +2993,11 @@ pub struct CloneContext { /// instance. Clone all databases if empty. #[prost(string, repeated, tag = "9")] pub database_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. (Point-in-time recovery for PostgreSQL only) Clone to an instance + /// in the specified zone. If no zone is specified, clone to the same zone as + /// the source instance. + #[prost(string, optional, tag = "10")] + pub preferred_zone: ::core::option::Option<::prost::alloc::string::String>, } /// Binary log coordinates. #[allow(clippy::derive_partial_eq_without_eq)] @@ -3004,6 +3177,27 @@ pub struct DatabaseInstance { /// The current software version on the instance. #[prost(string, tag = "42")] pub maintenance_version: ::prost::alloc::string::String, + #[prost( + enumeration = "database_instance::SqlNetworkArchitecture", + optional, + tag = "47" + )] + pub sql_network_architecture: ::core::option::Option, + /// Output only. The link to service attachment of PSC instance. + #[prost(string, optional, tag = "48")] + pub psc_service_attachment_link: ::core::option::Option< + ::prost::alloc::string::String, + >, + /// Output only. The dns name of the instance. + #[prost(string, optional, tag = "49")] + pub dns_name: ::core::option::Option<::prost::alloc::string::String>, + /// Output only. DEPRECATED: please use write_endpoint instead. + #[deprecated] + #[prost(string, optional, tag = "51")] + pub primary_dns_name: ::core::option::Option<::prost::alloc::string::String>, + /// Output only. The dns name of the primary instance in a replication group. + #[prost(string, optional, tag = "52")] + pub write_endpoint: ::core::option::Option<::prost::alloc::string::String>, } /// Nested message and enum types in `DatabaseInstance`. pub mod database_instance { @@ -3175,6 +3369,54 @@ pub mod database_instance { } } } + /// The current SQL network architecture for the instance. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum SqlNetworkArchitecture { + Unspecified = 0, + /// Instance is a Tenancy Unit (TU) instance. + NewNetworkArchitecture = 1, + /// Instance is an Umbrella instance. + OldNetworkArchitecture = 2, + } + impl SqlNetworkArchitecture { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SqlNetworkArchitecture::Unspecified => { + "SQL_NETWORK_ARCHITECTURE_UNSPECIFIED" + } + SqlNetworkArchitecture::NewNetworkArchitecture => { + "NEW_NETWORK_ARCHITECTURE" + } + SqlNetworkArchitecture::OldNetworkArchitecture => { + "OLD_NETWORK_ARCHITECTURE" + } + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SQL_NETWORK_ARCHITECTURE_UNSPECIFIED" => Some(Self::Unspecified), + "NEW_NETWORK_ARCHITECTURE" => Some(Self::NewNetworkArchitecture), + "OLD_NETWORK_ARCHITECTURE" => Some(Self::OldNetworkArchitecture), + _ => None, + } + } + } } /// Reschedule options for maintenance windows. #[allow(clippy::derive_partial_eq_without_eq)] @@ -3276,6 +3518,19 @@ pub struct DemoteMasterContext { #[prost(bool, tag = "5")] pub skip_replication_setup: bool, } +/// This context is used to demote an existing standalone instance to be +/// a Cloud SQL read replica for an external database server. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DemoteContext { + /// This is always `sql#demoteContext`. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// Required. The name of the instance which acts as the on-premises primary + /// instance in the replication setup. + #[prost(string, tag = "2")] + pub source_representative_instance_name: ::prost::alloc::string::String, +} /// Database instance failover context. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -3432,6 +3687,21 @@ pub mod sql_external_sync_setting_error { InvalidFileInfo = 32, /// The source instance has unsupported database settings for migration. UnsupportedDatabaseSettings = 33, + /// The replication user is missing parallel import specific privileges. + /// (e.g. LOCK TABLES) for MySQL. + MysqlParallelImportInsufficientPrivilege = 34, + /// The global variable local_infile is off on external server replica. + LocalInfileOff = 35, + /// This code instructs customers to turn on point-in-time recovery manually + /// for the instance after promoting the Cloud SQL for PostgreSQL instance. + TurnOnPitrAfterPromote = 36, + /// The minor version of replica database is incompatible with the source. + IncompatibleDatabaseMinorVersion = 37, + /// This warning message indicates that Cloud SQL uses the maximum number of + /// subscriptions to migrate data from the source to the destination. + SourceMaxSubscriptions = 38, + /// Unable to verify definers on the source for MySQL. + UnableToVerifyDefiners = 39, } impl SqlExternalSyncSettingErrorType { /// String value of the enum field names used in the ProtoBuf definition. @@ -3534,6 +3804,22 @@ pub mod sql_external_sync_setting_error { SqlExternalSyncSettingErrorType::UnsupportedDatabaseSettings => { "UNSUPPORTED_DATABASE_SETTINGS" } + SqlExternalSyncSettingErrorType::MysqlParallelImportInsufficientPrivilege => { + "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE" + } + SqlExternalSyncSettingErrorType::LocalInfileOff => "LOCAL_INFILE_OFF", + SqlExternalSyncSettingErrorType::TurnOnPitrAfterPromote => { + "TURN_ON_PITR_AFTER_PROMOTE" + } + SqlExternalSyncSettingErrorType::IncompatibleDatabaseMinorVersion => { + "INCOMPATIBLE_DATABASE_MINOR_VERSION" + } + SqlExternalSyncSettingErrorType::SourceMaxSubscriptions => { + "SOURCE_MAX_SUBSCRIPTIONS" + } + SqlExternalSyncSettingErrorType::UnableToVerifyDefiners => { + "UNABLE_TO_VERIFY_DEFINERS" + } } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -3589,6 +3875,16 @@ pub mod sql_external_sync_setting_error { "UNSUPPORTED_DATABASE_SETTINGS" => { Some(Self::UnsupportedDatabaseSettings) } + "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE" => { + Some(Self::MysqlParallelImportInsufficientPrivilege) + } + "LOCAL_INFILE_OFF" => Some(Self::LocalInfileOff), + "TURN_ON_PITR_AFTER_PROMOTE" => Some(Self::TurnOnPitrAfterPromote), + "INCOMPATIBLE_DATABASE_MINOR_VERSION" => { + Some(Self::IncompatibleDatabaseMinorVersion) + } + "SOURCE_MAX_SUBSCRIPTIONS" => Some(Self::SourceMaxSubscriptions), + "UNABLE_TO_VERIFY_DEFINERS" => Some(Self::UnableToVerifyDefiners), _ => None, } } @@ -3649,6 +3945,50 @@ pub struct ReplicaConfiguration { /// the replica has to be in different zone with the primary instance. #[prost(message, optional, tag = "3")] pub failover_target: ::core::option::Option, + /// Optional. Specifies if a SQL Server replica is a cascadable replica. A + /// cascadable replica is a SQL Server cross region replica that supports + /// replica(s) under it. + #[prost(message, optional, tag = "5")] + pub cascadable_replica: ::core::option::Option, +} +/// External Sync parallel level. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ExternalSyncParallelLevel { + /// Unknown sync parallel level. Will be defaulted to OPTIMAL. + Unspecified = 0, + /// Minimal parallel level. + Min = 1, + /// Optimal parallel level. + Optimal = 2, + /// Maximum parallel level. + Max = 3, +} +impl ExternalSyncParallelLevel { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ExternalSyncParallelLevel::Unspecified => { + "EXTERNAL_SYNC_PARALLEL_LEVEL_UNSPECIFIED" + } + ExternalSyncParallelLevel::Min => "MIN", + ExternalSyncParallelLevel::Optimal => "OPTIMAL", + ExternalSyncParallelLevel::Max => "MAX", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EXTERNAL_SYNC_PARALLEL_LEVEL_UNSPECIFIED" => Some(Self::Unspecified), + "MIN" => Some(Self::Min), + "OPTIMAL" => Some(Self::Optimal), + "MAX" => Some(Self::Max), + _ => None, + } + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -3930,6 +4270,32 @@ pub mod sql_instances_service_client { ); self.inner.unary(req, path, codec).await } + /// Demotes an existing standalone instance to be a Cloud SQL read replica + /// for an external database server. + pub async fn demote( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.cloud.sql.v1.SqlInstancesService/Demote", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("google.cloud.sql.v1.SqlInstancesService", "Demote"), + ); + self.inner.unary(req, path, codec).await + } /// Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL /// dump or CSV file. pub async fn export( @@ -4216,6 +4582,34 @@ pub mod sql_instances_service_client { ); self.inner.unary(req, path, codec).await } + /// Switches over from the primary instance to the replica instance. + pub async fn switchover( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.cloud.sql.v1.SqlInstancesService/Switchover", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "Switchover", + ), + ); + self.inner.unary(req, path, codec).await + } /// Deletes all client certificates and generates a new server SSL certificate /// for the instance. pub async fn reset_ssl_config( @@ -4652,5 +5046,38 @@ pub mod sql_instances_service_client { ); self.inner.unary(req, path, codec).await } + /// Get Latest Recovery Time for a given instance. + pub async fn get_latest_recovery_time( + &mut self, + request: impl tonic::IntoRequest< + super::SqlInstancesGetLatestRecoveryTimeRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.cloud.sql.v1.SqlInstancesService/GetLatestRecoveryTime", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.cloud.sql.v1.SqlInstancesService", + "GetLatestRecoveryTime", + ), + ); + self.inner.unary(req, path, codec).await + } } }