From 9754bacfc8f5b7cb94e440d05982ee9c357a6a9b Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 3 Sep 2024 15:39:17 -0500 Subject: [PATCH] go rewrite - general refresh and diffs 9/3 (#11626) --- mmv1/api/resource.go | 16 +- mmv1/api/type.go | 2 +- mmv1/description-copy.go | 3 + .../ServicePerimeterDryRunEgressPolicy.yaml | 164 - .../accesscontextmanager/go_AccessPolicy.yaml | 6 +- .../go_ServicePerimeter.yaml | 5 +- ...go_ServicePerimeterDryRunEgressPolicy.yaml | 189 + ...o_ServicePerimeterDryRunIngressPolicy.yaml | 198 + .../go_ServicePerimeterDryRunResource.yaml | 6 +- .../go_ServicePerimeterEgressPolicy.yaml | 2 + .../go_ServicePerimeterIngressPolicy.yaml | 7 +- mmv1/products/activedirectory/go_Domain.yaml | 30 +- .../activedirectory/go_DomainTrust.yaml | 1 + mmv1/products/activedirectory/go_Peering.yaml | 2 +- mmv1/products/alloydb/go_Backup.yaml | 1 - mmv1/products/alloydb/go_Cluster.yaml | 45 +- mmv1/products/alloydb/go_Instance.yaml | 14 +- mmv1/products/apigateway/go_Api.yaml | 1 - mmv1/products/apigateway/go_ApiConfig.yaml | 1 - mmv1/products/apigateway/go_Gateway.yaml | 1 - .../appengine/go_FlexibleAppVersion.yaml | 4 +- .../appengine/go_ServiceNetworkSettings.yaml | 2 +- .../appengine/go_ServiceSplitTraffic.yaml | 2 +- .../appengine/go_StandardAppVersion.yaml | 3 +- mmv1/products/apphub/Application.yaml | 2 +- mmv1/products/apphub/Service.yaml | 2 +- mmv1/products/apphub/Workload.yaml | 2 +- mmv1/products/apphub/go_Application.yaml | 2 +- mmv1/products/apphub/go_Service.yaml | 2 +- mmv1/products/apphub/go_Workload.yaml | 2 +- .../products/artifactregistry/Repository.yaml | 26 +- .../artifactregistry/go_Repository.yaml | 28 +- mmv1/products/backupdr/BackupVault.yaml | 129 +- mmv1/products/backupdr/go_BackupVault.yaml | 178 + mmv1/products/backupdr/go_product.yaml | 2 +- .../products/beyondcorp/go_AppConnection.yaml | 1 - mmv1/products/beyondcorp/go_AppConnector.yaml | 1 - mmv1/products/beyondcorp/go_AppGateway.yaml | 1 - mmv1/products/bigquery/go_DatasetAccess.yaml | 2 + mmv1/products/bigquery/go_Table.yaml | 8 +- .../bigqueryanalyticshub/DataExchange.yaml | 4 +- .../bigqueryanalyticshub/Listing.yaml | 6 +- .../bigqueryanalyticshub/go_DataExchange.yaml | 41 +- .../bigqueryanalyticshub/go_Listing.yaml | 32 +- .../bigqueryanalyticshub/go_product.yaml | 2 +- .../bigquerydatatransfer/go_Config.yaml | 23 + .../bigqueryreservation/go_Reservation.yaml | 5 - mmv1/products/billing/go_ProjectInfo.yaml | 2 +- .../binaryauthorization/go_Policy.yaml | 1 + .../go_BlockchainNodes.yaml | 1 - .../certificatemanager/go_Certificate.yaml | 1 - .../go_CertificateIssuanceConfig.yaml | 1 - .../certificatemanager/go_CertificateMap.yaml | 1 - .../go_CertificateMapEntry.yaml | 1 - .../go_DnsAuthorization.yaml | 1 - .../certificatemanager/go_TrustConfig.yaml | 1 - mmv1/products/clouddeploy/go_Automation.yaml | 1 - .../clouddeploy/go_CustomTargetType.yaml | 1 - .../clouddomains/go_Registration.yaml | 1 - .../cloudfunctions/go_CloudFunction.yaml | 1 - .../products/cloudfunctions2/go_Function.yaml | 2 +- .../cloudquotas/go_QuotaPreference.yaml | 1 + mmv1/products/cloudrunv2/go_Job.yaml | 42 +- mmv1/products/cloudrunv2/go_Service.yaml | 51 +- mmv1/products/cloudtasks/go_Queue.yaml | 173 + .../composer/go_UserWorkloadsConfigMap.yaml | 1 + mmv1/products/compute/go_Address.yaml | 2 +- mmv1/products/compute/go_Autoscaler.yaml | 2 +- mmv1/products/compute/go_BackendBucket.yaml | 2 +- .../compute/go_BackendBucketSignedUrlKey.yaml | 2 +- mmv1/products/compute/go_BackendService.yaml | 22 +- .../go_BackendServiceSignedUrlKey.yaml | 2 +- mmv1/products/compute/go_Disk.yaml | 2 +- .../go_DiskResourcePolicyAttachment.yaml | 2 +- .../compute/go_ExternalVpnGateway.yaml | 2 +- mmv1/products/compute/go_Firewall.yaml | 6 +- mmv1/products/compute/go_ForwardingRule.yaml | 11 +- mmv1/products/compute/go_GlobalAddress.yaml | 3 +- .../compute/go_GlobalForwardingRule.yaml | 3 +- .../compute/go_GlobalNetworkEndpoint.yaml | 2 +- .../go_GlobalNetworkEndpointGroup.yaml | 2 +- mmv1/products/compute/go_HaVpnGateway.yaml | 2 +- mmv1/products/compute/go_HealthCheck.yaml | 15 +- mmv1/products/compute/go_HttpHealthCheck.yaml | 2 +- .../products/compute/go_HttpsHealthCheck.yaml | 2 +- mmv1/products/compute/go_Image.yaml | 2 +- mmv1/products/compute/go_Instance.yaml | 8 +- mmv1/products/compute/go_InstanceGroup.yaml | 2 +- .../compute/go_InstanceGroupManager.yaml | 2 +- .../compute/go_InstanceGroupMembership.yaml | 2 +- .../compute/go_InstanceGroupNamedPort.yaml | 2 +- .../products/compute/go_InstanceSettings.yaml | 2 +- mmv1/products/compute/go_Interconnect.yaml | 14 +- .../compute/go_InterconnectAttachment.yaml | 2 +- mmv1/products/compute/go_MachineImage.yaml | 2 +- .../compute/go_ManagedSslCertificate.yaml | 3 +- mmv1/products/compute/go_Network.yaml | 2 +- .../go_NetworkEdgeSecurityService.yaml | 2 +- mmv1/products/compute/go_NetworkEndpoint.yaml | 2 +- .../compute/go_NetworkEndpointGroup.yaml | 2 +- .../products/compute/go_NetworkEndpoints.yaml | 4 +- .../compute/go_NetworkFirewallPolicy.yaml | 2 +- .../go_NetworkPeeringRoutesConfig.yaml | 3 +- mmv1/products/compute/go_NodeGroup.yaml | 2 +- mmv1/products/compute/go_NodeTemplate.yaml | 24 +- mmv1/products/compute/go_PacketMirroring.yaml | 2 +- .../compute/go_PerInstanceConfig.yaml | 2 +- .../compute/go_ProjectCloudArmorTier.yaml | 2 +- .../compute/go_PublicAdvertisedPrefix.yaml | 2 +- .../compute/go_PublicDelegatedPrefix.yaml | 2 +- .../products/compute/go_RegionAutoscaler.yaml | 2 +- .../compute/go_RegionBackendService.yaml | 28 +- .../products/compute/go_RegionCommitment.yaml | 2 +- mmv1/products/compute/go_RegionDisk.yaml | 2 +- ...go_RegionDiskResourcePolicyAttachment.yaml | 2 +- .../compute/go_RegionHealthCheck.yaml | 2 +- .../go_RegionInstanceGroupManager.yaml | 2 +- .../compute/go_RegionNetworkEndpoint.yaml | 2 +- .../go_RegionNetworkEndpointGroup.yaml | 2 +- .../go_RegionNetworkFirewallPolicy.yaml | 2 +- .../compute/go_RegionPerInstanceConfig.yaml | 2 +- .../compute/go_RegionSecurityPolicy.yaml | 2 +- .../compute/go_RegionSecurityPolicyRule.yaml | 2 +- .../compute/go_RegionSslCertificate.yaml | 10 +- mmv1/products/compute/go_RegionSslPolicy.yaml | 2 +- .../compute/go_RegionTargetHttpProxy.yaml | 2 +- .../compute/go_RegionTargetHttpsProxy.yaml | 11 +- .../compute/go_RegionTargetTcpProxy.yaml | 2 +- mmv1/products/compute/go_RegionUrlMap.yaml | 2 +- mmv1/products/compute/go_Reservation.yaml | 2 +- mmv1/products/compute/go_ResizeRequest.yaml | 2 +- mmv1/products/compute/go_ResourcePolicy.yaml | 10 +- mmv1/products/compute/go_Route.yaml | 2 +- mmv1/products/compute/go_Router.yaml | 2 +- mmv1/products/compute/go_RouterNat.yaml | 2 +- .../compute/go_RouterRoutePolicy.yaml | 2 +- .../compute/go_SecurityPolicyRule.yaml | 2 +- .../compute/go_ServiceAttachment.yaml | 2 +- mmv1/products/compute/go_SslCertificate.yaml | 10 +- mmv1/products/compute/go_SslPolicy.yaml | 2 +- mmv1/products/compute/go_Subnetwork.yaml | 55 +- mmv1/products/compute/go_TargetGrpcProxy.yaml | 2 +- mmv1/products/compute/go_TargetHttpProxy.yaml | 2 +- .../products/compute/go_TargetHttpsProxy.yaml | 12 +- mmv1/products/compute/go_TargetInstance.yaml | 2 +- mmv1/products/compute/go_TargetSslProxy.yaml | 2 +- mmv1/products/compute/go_TargetTcpProxy.yaml | 2 +- mmv1/products/compute/go_UrlMap.yaml | 2 +- mmv1/products/compute/go_VpnGateway.yaml | 2 +- mmv1/products/compute/go_VpnTunnel.yaml | 2 +- .../containerattached/go_Cluster.yaml | 3 +- .../go_ConnectionProfile.yaml | 69 +- .../go_PrivateConnection.yaml | 3 - mmv1/products/dataform/go_Repository.yaml | 11 +- mmv1/products/datafusion/go_Instance.yaml | 3 +- mmv1/products/dataplex/go_AspectType.yaml | 1 - mmv1/products/dataplex/go_Datascan.yaml | 2 +- mmv1/products/dataplex/go_EntryGroup.yaml | 1 - mmv1/products/dataplex/go_EntryType.yaml | 1 - mmv1/products/dataplex/go_Task.yaml | 2 +- mmv1/products/datastream/Stream.yaml | 4 - .../datastream/go_ConnectionProfile.yaml | 1 - .../datastream/go_PrivateConnection.yaml | 2 +- mmv1/products/datastream/go_Stream.yaml | 42 +- mmv1/products/dialogflowcx/go_Intent.yaml | 1 - .../discoveryengine/go_DataStore.yaml | 100 +- mmv1/products/discoveryengine/go_Schema.yaml | 103 + mmv1/products/dlp/go_DiscoveryConfig.yaml | 69 +- mmv1/products/dns/go_ManagedZone.yaml | 429 ++ mmv1/products/dns/go_Policy.yaml | 159 + mmv1/products/dns/go_ResponsePolicy.yaml | 90 + mmv1/products/dns/go_ResponsePolicyRule.yaml | 131 + mmv1/products/dns/go_product.yaml | 24 + mmv1/products/edgecontainer/go_Cluster.yaml | 2 + mmv1/products/edgenetwork/go_Network.yaml | 2 +- mmv1/products/edgenetwork/go_Subnet.yaml | 2 +- mmv1/products/firebase/go_Project.yaml | 6 +- mmv1/products/firebasehosting/go_Channel.yaml | 1 - .../firebasehosting/go_CustomDomain.yaml | 4 + mmv1/products/firebasehosting/go_Version.yaml | 5 + mmv1/products/firestore/go_Database.yaml | 3 - mmv1/products/firestore/go_Document.yaml | 3 + mmv1/products/firestore/go_Field.yaml | 3 + mmv1/products/firestore/go_Index.yaml | 3 +- mmv1/products/gkebackup/go_BackupPlan.yaml | 1 - mmv1/products/gkebackup/go_RestorePlan.yaml | 1 - mmv1/products/gkehub/go_Membership.yaml | 1 - mmv1/products/gkehub2/go_Feature.yaml | 8 +- .../gkehub2/go_MembershipBinding.yaml | 1 - mmv1/products/gkehub2/go_Namespace.yaml | 1 - mmv1/products/gkehub2/go_Scope.yaml | 1 - .../gkehub2/go_ScopeRBACRoleBinding.yaml | 1 - mmv1/products/iap/go_AppEngineService.yaml | 1 + mmv1/products/integrations/go_Client.yaml | 27 +- mmv1/products/kms/go_AutokeyConfig.yaml | 1 + mmv1/products/kms/go_EkmConnection.yaml | 7 + mmv1/products/kms/go_KeyHandle.yaml | 3 +- mmv1/products/logging/go_LogView.yaml | 1 - mmv1/products/managedkafka/go_Cluster.yaml | 10 +- .../go_ConnectivityTest.yaml | 1 - .../networksecurity/go_AddressGroup.yaml | 1 - .../go_AuthorizationPolicy.yaml | 1 - .../networksecurity/go_ClientTlsPolicy.yaml | 23 +- .../networksecurity/go_FirewallEndpoint.yaml | 1 - .../go_FirewallEndpointAssociation.yaml | 7 +- .../networksecurity/go_SecurityProfile.yaml | 1 - .../go_SecurityProfileGroup.yaml | 1 - .../networksecurity/go_ServerTlsPolicy.yaml | 26 - .../networkservices/go_EdgeCacheKeyset.yaml | 1 - .../networkservices/go_EdgeCacheOrigin.yaml | 1 - .../networkservices/go_EdgeCacheService.yaml | 1 - .../networkservices/go_EndpointPolicy.yaml | 1 - mmv1/products/networkservices/go_Gateway.yaml | 1 - .../networkservices/go_GrpcRoute.yaml | 1 - .../networkservices/go_HttpRoute.yaml | 1 - .../networkservices/go_LbRouteExtension.yaml | 1 - .../go_LbTrafficExtension.yaml | 1 - mmv1/products/networkservices/go_Mesh.yaml | 1 - .../networkservices/go_ServiceBinding.yaml | 1 - .../networkservices/go_ServiceLbPolicies.yaml | 1 - .../products/networkservices/go_TcpRoute.yaml | 10 +- mmv1/products/notebooks/go_Location.yaml | 1 - mmv1/products/parallelstore/Instance.yaml | 4 +- mmv1/products/parallelstore/go_Instance.yaml | 4 +- mmv1/products/privateca/go_CaPool.yaml | 1 - mmv1/products/privateca/go_Certificate.yaml | 1 - .../privateca/go_CertificateAuthority.yaml | 1 - .../privateca/go_CertificateTemplate.yaml | 1 - mmv1/products/pubsub/go_Schema.yaml | 1 + mmv1/products/pubsub/go_Subscription.yaml | 13 +- mmv1/products/pubsub/go_Topic.yaml | 3 +- mmv1/products/redis/go_Cluster.yaml | 119 + .../go_FolderNotificationConfig.yaml | 130 + .../securitycenterv2/go_FolderMuteConfig.yaml | 118 + .../go_FolderNotificationConfig.yaml | 138 + .../go_FolderSccBigQueryExports.yaml | 152 + .../go_OrganizationMuteConfig.yaml | 113 + .../go_OrganizationNotificationConfig.yaml | 134 + .../go_OrganizationSccBigQueryExports.yaml | 148 + .../go_OrganizationSource.yaml | 88 + .../go_ProjectMuteConfig.yaml | 106 + .../go_ProjectNotificationConfig.yaml | 131 + .../go_ProjectSccBigQueryExports.yaml | 143 + .../products/securitycenterv2/go_product.yaml | 23 + mmv1/products/vertexai/go_Dataset.yaml | 1 - mmv1/products/vertexai/go_Endpoint.yaml | 1 - mmv1/products/vertexai/go_FeatureGroup.yaml | 1 - .../vertexai/go_FeatureGroupFeature.yaml | 1 - .../vertexai/go_FeatureOnlineStore.yaml | 2 +- .../go_FeatureOnlineStoreFeatureview.yaml | 2 +- mmv1/products/vertexai/go_Featurestore.yaml | 1 - .../vertexai/go_FeaturestoreEntitytype.yaml | 1 - .../go_FeaturestoreEntitytypeFeature.yaml | 1 - mmv1/products/vertexai/go_Index.yaml | 1 - mmv1/products/vertexai/go_IndexEndpoint.yaml | 1 - .../go_IndexEndpointDeployedIndex.yaml | 297 ++ mmv1/products/vertexai/go_Tensorboard.yaml | 1 - .../vmwareengine/go_ExternalAccessRule.yaml | 2 +- .../vmwareengine/go_ExternalAddress.yaml | 3 +- .../vmwareengine/go_PrivateCloud.yaml | 1 + mmv1/products/vpcaccess/go_Connector.yaml | 12 +- mmv1/products/workflows/go_Workflow.yaml | 141 + mmv1/products/workflows/go_product.yaml | 36 + .../products/workstations/go_Workstation.yaml | 162 + .../workstations/go_WorkstationCluster.yaml | 237 + .../workstations/go_WorkstationConfig.yaml | 680 +++ mmv1/products/workstations/go_product.yaml | 22 + mmv1/provider/template_data.go | 15 +- mmv1/provider/terraform.go | 8 +- mmv1/provider/terraform.rb | 3 + .../go/bigquery_dataset_access.go.tmpl | 16 +- .../go/compute_resource_policy.go.tmpl | 5 + .../constants/go/notebooks_instance.go.tmpl | 3 +- .../terraform/constants/go/subnetwork.tmpl | 31 + .../go/vmwareengine_private_cloud.go.tmpl | 46 + .../constants/go/workbench_instance.go.tmpl | 109 +- ...udquotas_quota_preference_trace_id.go.tmpl | 3 + .../go/name_or_name_prefix.go.tmpl | 7 +- .../go/string_to_lower_case.go.tmpl | 19 + ...ctions2_function_source_generation.go.tmpl | 38 + ...discoveryengine_schema_json_schema.go.tmpl | 23 + .../go/service_directory_service.go.tmpl | 1 + ...x_ai_index_endpoint_deployed_index.go.tmpl | 17 + .../decoders/go/backend_service.go.tmpl | 12 - .../go/bigquery_data_transfer.go.tmpl | 13 + .../go/region_backend_service.go.tmpl | 13 +- ...x_ai_index_endpoint_deployed_index.go.tmpl | 20 + .../encoders/go/backend_service.go.tmpl | 18 - .../go/bigquery_data_transfer.go.tmpl | 20 +- .../compute_region_target_https_proxy.go.tmpl | 10 +- .../go/compute_target_https_proxy.go.tmpl | 10 +- .../go/region_backend_service.go.tmpl | 17 - ...x_ai_index_endpoint_deployed_index.go.tmpl | 7 + .../terraform/encoders/go/workflow.go.tmpl | 7 +- .../examples/apphub_application_full.tf.erb | 2 +- .../examples/apphub_service_full.tf.erb | 2 +- .../examples/apphub_workload_full.tf.erb | 2 +- .../artifact_registry_repository_basic.tf.erb | 2 +- ...rtifact_registry_repository_cleanup.tf.erb | 2 +- ...artifact_registry_repository_docker.tf.erb | 2 +- ...artifact_registry_repository_remote.tf.erb | 2 +- ...fact_registry_repository_remote_apt.tf.erb | 2 +- ...tory_remote_docker_custom_with_auth.tf.erb | 2 +- ...ry_repository_remote_dockerhub_auth.tf.erb | 2 +- ...itory_remote_maven_custom_with_auth.tf.erb | 2 +- ...ository_remote_npm_custom_with_auth.tf.erb | 2 +- ...tory_remote_python_custom_with_auth.tf.erb | 2 +- ...fact_registry_repository_remote_yum.tf.erb | 2 +- ...rtifact_registry_repository_virtual.tf.erb | 6 +- ...ry_analyticshub_data_exchange_basic.tf.erb | 2 +- ...uery_analyticshub_data_exchange_dcr.tf.erb | 2 +- ...bigquery_analyticshub_listing_basic.tf.erb | 6 +- .../bigquery_analyticshub_listing_dcr.tf.erb | 6 +- ...ery_analyticshub_listing_restricted.tf.erb | 6 +- ...ce_perimeter_dry_run_egress_policy.tf.tmpl | 36 + ...e_perimeter_dry_run_ingress_policy.tf.tmpl | 39 + .../go/active_directory_domain_basic.tf.tmpl | 2 +- ...ctive_directory_domain_trust_basic.tf.tmpl | 1 + .../go/active_directory_peering_basic.tf.tmpl | 2 + .../go/alloydb_cluster_restore.tf.tmpl | 1 - .../go/alloydb_instance_basic_test.tf.tmpl | 1 - .../go/alloydb_instance_psc_test.tf.tmpl | 21 + .../alloydb_secondary_instance_basic.tf.tmpl | 2 +- .../examples/go/alloydb_user_builtin.tf.tmpl | 3 +- .../go/alloydb_user_builtin_test.tf.tmpl | 1 - .../examples/go/alloydb_user_iam_test.tf.tmpl | 1 - ...environment_keyvaluemaps_beta_test.tf.tmpl | 1 + ...ent_keyvaluemaps_entries_beta_test.tf.tmpl | 1 + ...ironment_keyvaluemaps_entries_test.tf.tmpl | 1 + ...igee_environment_keyvaluemaps_test.tf.tmpl | 1 + .../go/apphub_application_full.tf.tmpl | 2 +- .../examples/go/apphub_service_full.tf.tmpl | 2 +- .../examples/go/apphub_workload_full.tf.tmpl | 2 +- ...artifact_registry_repository_basic.tf.tmpl | 2 +- ...tifact_registry_repository_cleanup.tf.tmpl | 2 +- ...rtifact_registry_repository_docker.tf.tmpl | 2 +- ...rtifact_registry_repository_remote.tf.tmpl | 2 +- ...act_registry_repository_remote_apt.tf.tmpl | 2 +- ...ory_remote_docker_custom_with_auth.tf.tmpl | 2 +- ...y_repository_remote_dockerhub_auth.tf.tmpl | 2 +- ...tory_remote_maven_custom_with_auth.tf.tmpl | 2 +- ...sitory_remote_npm_custom_with_auth.tf.tmpl | 2 +- ...ory_remote_python_custom_with_auth.tf.tmpl | 2 +- ...act_registry_repository_remote_yum.tf.tmpl | 2 +- ...tifact_registry_repository_virtual.tf.tmpl | 6 +- .../go/backend_service_external_iap.tf.tmpl | 1 + ...service_traffic_director_ring_hash.tf.tmpl | 10 +- .../go/backup_dr_backup_vault_full.tf.tmpl | 18 + ...y_analyticshub_data_exchange_basic.tf.tmpl | 2 +- ...ery_analyticshub_data_exchange_dcr.tf.tmpl | 9 + ...igquery_analyticshub_listing_basic.tf.tmpl | 6 +- .../bigquery_analyticshub_listing_dcr.tf.tmpl | 60 + ...ry_analyticshub_listing_restricted.tf.tmpl | 6 +- .../go/bigquery_connection_kms.tf.tmpl | 10 + .../bigquerydatatransfer_config_cmek.tf.tmpl | 46 + ...uerydatatransfer_config_salesforce.tf.tmpl | 21 + ...loud_tasks_queue_http_target_oauth.tf.tmpl | 41 + ...cloud_tasks_queue_http_target_oidc.tf.tmpl | 41 + .../examples/go/cloudfunctions2_full.tf.tmpl | 3 +- .../examples/go/cloudrunv2_job_sql.tf.tmpl | 1 - ...e_health_check_http_source_regions.tf.tmpl | 1 - ..._health_check_https_source_regions.tf.tmpl | 1 - ...te_health_check_tcp_source_regions.tf.tmpl | 1 - ...onnection_profile_existing_alloydb.tf.tmpl | 55 + ..._connection_profile_existing_mysql.tf.tmpl | 25 + ...nnection_profile_existing_postgres.tf.tmpl | 25 + .../examples/go/dataform_repository.tf.tmpl | 30 + ...m_repository_with_cloudsource_repo.tf.tmpl | 34 + .../go/datastream_stream_bigquery.tf.tmpl | 1 - .../discoveryengine_datastore_basic.tf.tmpl | 15 +- ..._document_processing_config_layout.tf.tmpl | 20 + .../go/discoveryengine_schema_basic.tf.tmpl | 17 + .../go/dlp_discovery_config_actions.tf.tmpl | 42 + ...iscovery_config_conditions_cadence.tf.tmpl | 3 + ...basehosting_customdomain_cloud_run.tf.tmpl | 2 + .../firebasehosting_version_cloud_run.tf.tmpl | 2 + .../go/firestore_cmek_database.tf.tmpl | 9 - ...re_cmek_database_in_datastore_mode.tf.tmpl | 9 - .../go/integrations_client_full.tf.tmpl | 2 +- .../examples/go/kms_key_handle_basic.tf.tmpl | 1 + ...ecurity_client_tls_policy_advanced.tf.tmpl | 6 - ...k_security_client_tls_policy_basic.tf.tmpl | 1 - ...irewall_endpoint_association_basic.tf.tmpl | 21 +- ...ecurity_server_tls_policy_advanced.tf.tmpl | 1 - ...k_security_server_tls_policy_basic.tf.tmpl | 11 - ...rk_security_server_tls_policy_mtls.tf.tmpl | 3 - ...rity_server_tls_policy_server_cert.tf.tmpl | 1 - ...network_services_tcp_route_actions.tf.tmpl | 1 + .../go/node_template_accelerators.tf.tmpl | 15 + .../go/parallelstore_instance_basic.tf.tmpl | 2 - ...sub_subscription_push_cloudstorage.tf.tmpl | 1 + ...ubscription_push_cloudstorage_avro.tf.tmpl | 2 + .../examples/go/redis_cluster_ha.tf.tmpl | 11 + .../go/redis_cluster_ha_single_zone.tf.tmpl | 11 + ...ion_backend_service_balancing_mode.tf.tmpl | 12 +- ...egion_backend_service_external_iap.tf.tmpl | 1 + .../go/resource_policy_hourly_format.tf.tmpl | 12 + ...c_folder_notification_config_basic.tf.tmpl | 20 + ...lder_big_query_export_config_basic.tf.tmpl | 32 + .../scc_v2_folder_mute_config_basic.tf.tmpl | 13 + ...2_folder_notification_config_basic.tf.tmpl | 20 + ...tion_big_query_export_config_basic.tf.tmpl | 26 + .../scc_v2_organization_source_basic.tf.tmpl | 5 + ...ject_big_query_export_config_basic.tf.tmpl | 26 + .../scc_v2_project_mute_config_basic.tf.tmpl | 8 + ..._project_notification_config_basic.tf.tmpl | 15 + .../go/storage_managed_folder_basic.tf.tmpl | 5 +- ...subnetwork_reserved_internal_range.tf.tmpl | 25 + ...ubnetwork_reserved_secondary_range.tf.tmpl | 42 + .../examples/go/tpu_node_full.tf.tmpl | 1 + .../examples/go/tpu_node_full_test.tf.tmpl | 9 +- ...deployed_index_automatic_resources.tf.tmpl | 67 + ...ndex_endpoint_deployed_index_basic.tf.tmpl | 77 + ..._endpoint_deployed_index_basic_two.tf.tmpl | 80 + ...deployed_index_dedicated_resources.tf.tmpl | 69 + ...vmware_engine_network_policy_basic.tf.tmpl | 4 +- .../examples/go/vpc_access_connector.tf.tmpl | 2 + .../vpc_access_connector_shared_vpc.tf.tmpl | 2 + .../examples/redis_cluster_ha.tf.erb | 2 +- .../redis_cluster_ha_single_zone.tf.erb | 2 +- .../terraform/examples/tpu_node_full.tf.erb | 1 + .../examples/tpu_node_full_test.tf.erb | 13 +- .../go/ssl_certificate.tmpl | 6 +- .../service_management_consumer.tf.erb | 1 + .../post_delete/go/private_cloud.go.tmpl | 11 +- .../scc_v1_folder_notification_config.go.tmpl | 12 + .../scc_v2_folder_notification_config.go.tmpl | 12 + ...ganization_big_query_export_config.go.tmpl | 12 + .../post_update/go/compute_subnetwork.go.tmpl | 72 + ...s_context_manager_dry_run_resource.go.tmpl | 1 + .../go/vmwareengine_private_cloud.go.tmpl | 15 + .../go/cloudrunv2_job_deletion_policy.go.tmpl | 3 + ...cloudrunv2_service_deletion_policy.go.tmpl | 3 + .../pre_delete/go/private_connection.go.tmpl | 5 + ...x_ai_index_endpoint_deployed_index.go.tmpl | 3 + .../go/bigtable_app_profile.go.tmpl | 10 + .../pre_update/go/spanner_database.go.tmpl | 3 +- .../property_documentation.html.markdown.tmpl | 2 +- .../go/spanner_database.go.tmpl | 3 +- ...x_ai_index_endpoint_deployed_index.go.tmpl | 3 + mmv1/templates/terraform/yaml_conversion.erb | 5 +- .../terraform/acctest/go/test_utils.go.tmpl | 5 +- mmv1/third_party/terraform/go/go.mod | 2 +- mmv1/third_party/terraform/go/main.go.tmpl | 10 - ...context_manager_access_policy_test.go.tmpl | 2 + ...p_engine_flexible_app_version_test.go.tmpl | 445 ++ ...source_backup_dr_backup_vault_test.go.tmpl | 97 + .../go/resource_bigquery_dataset_test.go.tmpl | 867 ---- .../go/resource_bigquery_table.go.tmpl | 2961 ------------ .../go/resource_bigquery_table_test.go.tmpl | 4261 ----------------- ...source_binary_authorization_policy_test.go | 8 +- ...ource_cloudfunctions_function_test.go.tmpl | 4 + ...ntity_group_transitive_memberships.go.tmpl | 179 + ...resource_cloud_identity_group_test.go.tmpl | 7 +- .../resource_cloud_run_service_test.go.tmpl | 4 + .../go/resource_cloud_run_v2_job_test.go.tmpl | 42 +- ...resource_cloud_run_v2_service_test.go.tmpl | 60 +- .../go/resource_cloud_tasks_queue_test.go | 155 + .../go/resource_composer_environment.go.tmpl | 1 - ...resource_composer_environment_test.go.tmpl | 5 + .../go/compute_instance_helpers.go.tmpl | 23 +- .../go/data_source_google_compute_instance.go | 2 +- ...ource_compute_backend_service_test.go.tmpl | 7 +- .../go/resource_compute_disk_test.go.tmpl | 4 +- ...ource_compute_firewall_policy_rule_test.go | 148 +- .../resource_compute_firewall_policy_test.go | 5 +- .../resource_compute_global_address_test.go | 84 + ...resource_compute_health_check_test.go.tmpl | 6 +- .../go/resource_compute_instance.go.tmpl | 84 +- ...ompute_instance_from_machine_image.go.tmpl | 11 - ...e_instance_from_machine_image_test.go.tmpl | 361 +- ...rce_compute_instance_from_template.go.tmpl | 11 - ...ompute_instance_from_template_test.go.tmpl | 483 +- ...resource_compute_instance_settings_test.go | 2 +- ...resource_compute_instance_template.go.tmpl | 26 +- ...rce_compute_instance_template_test.go.tmpl | 111 +- .../go/resource_compute_instance_test.go.tmpl | 634 ++- ...mpute_network_firewall_policy_rule_test.go | 2 + ...ompute_region_backend_service_test.go.tmpl | 18 +- ...e_compute_region_instance_template.go.tmpl | 29 +- ...pute_region_instance_template_test.go.tmpl | 47 +- ...ute_region_target_https_proxy_test.go.tmpl | 992 ++++ .../resource_compute_subnetwork_test.go.tmpl | 126 +- ...ce_compute_target_https_proxy_test.go.tmpl | 240 +- .../services/container/go/node_config.go.tmpl | 88 +- .../go/resource_container_cluster.go.tmpl | 219 +- ...source_container_cluster_migratev1.go.tmpl | 7 + .../resource_container_cluster_test.go.tmpl | 778 ++- .../resource_container_node_pool_test.go.tmpl | 128 +- ...ce_dataflow_flex_template_job_test.go.tmpl | 2 +- .../resource_dataform_repository_test.go.tmpl | 2 +- .../go/resource_dataproc_cluster_test.go | 2 + .../go/resource_dialogflow_agent_test.go | 2 +- .../go/resource_dns_record_set_test.go.tmpl | 130 + ...e_firebase_android_app_config_test.go.tmpl | 2 +- ...ce_gke_hub_feature_membership_test.go.tmpl | 2 + .../go/resource_gke_hub_feature_test.go.tmpl | 83 + .../iam2/go/resource_iam_deny_policy_test.go | 2 +- .../go/resource_kms_crypto_key_test.go.tmpl | 1309 +++++ ...rk_security_client_tls_policy_test.go.tmpl | 1 + ...ce_network_services_tcp_route_test.go.tmpl | 2 + ...source_parallelstore_instance_test.go.tmpl | 23 +- .../go/resource_redis_cluster_test.go.tmpl | 55 +- ...esource_google_project_iam_binding_test.go | 5 +- ...resource_google_project_iam_member_test.go | 5 +- ...resource_google_project_iam_policy_test.go | 12 +- .../resource_google_project_service.go.tmpl | 40 +- ...source_google_project_service_test.go.tmpl | 42 +- .../go/resource_sql_database_instance.go.tmpl | 2553 ++++++++++ .../storage/go/resource_storage_bucket.go | 54 +- .../go/resource_storage_bucket_test.go | 35 +- 511 files changed, 16863 insertions(+), 9902 deletions(-) create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml create mode 100644 mmv1/products/backupdr/go_BackupVault.yaml create mode 100644 mmv1/products/discoveryengine/go_Schema.yaml create mode 100644 mmv1/products/dns/go_ManagedZone.yaml create mode 100644 mmv1/products/dns/go_Policy.yaml create mode 100644 mmv1/products/dns/go_ResponsePolicy.yaml create mode 100644 mmv1/products/dns/go_ResponsePolicyRule.yaml create mode 100644 mmv1/products/dns/go_product.yaml create mode 100644 mmv1/products/securitycenter/go_FolderNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationSource.yaml create mode 100644 mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml create mode 100644 mmv1/products/securitycenterv2/go_product.yaml create mode 100644 mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml create mode 100644 mmv1/products/workflows/go_Workflow.yaml create mode 100644 mmv1/products/workflows/go_product.yaml create mode 100644 mmv1/products/workstations/go_Workstation.yaml create mode 100644 mmv1/products/workstations/go_WorkstationCluster.yaml create mode 100644 mmv1/products/workstations/go_WorkstationConfig.yaml create mode 100644 mmv1/products/workstations/go_product.yaml create mode 100644 mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl create mode 100644 mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl create mode 100644 mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl create mode 100644 mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquerydatatransfer_config_cmek.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquerydatatransfer_config_salesforce.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloud_tasks_queue_http_target_oauth.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloud_tasks_queue_http_target_oidc.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/database_migration_service_connection_profile_existing_alloydb.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/database_migration_service_connection_profile_existing_mysql.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/database_migration_service_connection_profile_existing_postgres.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/discoveryengine_datastore_document_processing_config_layout.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/discoveryengine_schema_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/node_template_accelerators.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/resource_policy_hourly_format.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_folder_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_folder_big_query_export_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_folder_mute_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_folder_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_organization_big_query_export_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_organization_source_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_project_big_query_export_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_project_mute_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_project_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/subnetwork_reserved_internal_range.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/subnetwork_reserved_secondary_range.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_automatic_resources.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_basic_two.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_dedicated_resources.tf.tmpl create mode 100644 mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl create mode 100644 mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl create mode 100644 mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl create mode 100644 mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_transitive_memberships.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 0e589f1b64cc..f45908b8da70 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -248,6 +248,9 @@ type Resource struct { StateUpgraders bool `yaml:"state_upgraders"` + // Do not apply the default attribution label + SkipAttributionLabel bool `yaml:"skip_attribution_label"` + // This block inserts the named function and its attribute into the // resource schema -- the code for the migrate_state function must // be included in the resource constants or come from tpgresource @@ -545,7 +548,11 @@ func (r *Resource) AddLabelsRelatedFields(props []*Type, parent *Type) []*Type { // def add_labels_fields(props, parent, labels) func (r *Resource) addLabelsFields(props []*Type, parent *Type, labels *Type) []*Type { if parent == nil || parent.FlattenObject { - r.CustomDiff = append(r.CustomDiff, "tpgresource.SetLabelsDiff") + if r.SkipAttributionLabel { + r.CustomDiff = append(r.CustomDiff, "tpgresource.SetLabelsDiffWithoutAttributionLabel") + } else { + r.CustomDiff = append(r.CustomDiff, "tpgresource.SetLabelsDiff") + } } else if parent.Name == "metadata" { r.CustomDiff = append(r.CustomDiff, "tpgresource.SetMetadataLabelsDiff") } @@ -832,12 +839,7 @@ func (r Resource) ClientNamePascal() string { } func (r Resource) PackageName() string { - clientName := r.ProductMetadata.ClientName - if clientName == "" { - clientName = r.ProductMetadata.Name - } - - return strings.ToLower(clientName) + return strings.ToLower(r.ProductMetadata.Name) } // In order of preference, use TF override, diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 1f2e4d26e489..b83028e81b5f 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -709,7 +709,7 @@ func (t Type) Deprecated() bool { } func (t *Type) GetDescription() string { - return strings.TrimRight(t.Description, "\n") + return strings.TrimSpace(strings.TrimRight(t.Description, "\n")) } // // private diff --git a/mmv1/description-copy.go b/mmv1/description-copy.go index 1cd004b31a2d..77b72b6466f7 100644 --- a/mmv1/description-copy.go +++ b/mmv1/description-copy.go @@ -39,6 +39,9 @@ func CopyText(identifier string, last bool) { } for _, productPath := range allProductFiles { + if strings.Contains(productPath, "healthcare") { + continue + } // Gather go and ruby file pairs yamlMap := make(map[string][]string) yamlPaths, err := filepath.Glob(fmt.Sprintf("%s/*", productPath)) diff --git a/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml b/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml index aadd137e0aff..064ad347710a 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml @@ -161,167 +161,3 @@ properties: description: | Value for permission should be a valid Cloud IAM permission for the corresponding `serviceName` in `ApiOperation`. -# Copyright 2018 Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- !ruby/object:Api::Resource -name: 'ServicePerimeterDryRunEgressPolicy' -create_url: '{{perimeter}}' -base_url: '' -self_link: '{{perimeter}}' -create_verb: :PATCH -delete_verb: :PATCH -update_mask: true -immutable: true -identity: - - egressFrom - - egressTo -nested_query: !ruby/object:Api::Resource::NestedQuery - modify_by_patch: true - is_list_of_ids: false - keys: - - spec - - egressPolicies -references: !ruby/object:Api::Resource::ReferenceLinks - api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#egresspolicy' -description: | - Manage a single EgressPolicy in the spec (dry-run) configuration for a service perimeter. - EgressPolicies match requests based on egressFrom and egressTo stanzas. - For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. - If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter - boundary. For example, an EgressPolicy can be used to allow VMs on networks - within the ServicePerimeter to access a defined set of projects outside the - perimeter in certain contexts (e.g. to read data from a Cloud Storage bucket - or query against a BigQuery dataset). - - ~> **Note:** By default, updates to this resource will remove the EgressPolicy from the - from the perimeter and add it back in a non-atomic manner. To ensure that the new EgressPolicy - is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. -examples: - - !ruby/object:Provider::Terraform::Examples - name: 'access_context_manager_service_perimeter_dry_run_egress_policy' - skip_test: true -autogen_async: true -exclude_tgc: true -# Skipping the sweeper due to the non-standard base_url and because this is fine-grained under ServicePerimeter -skip_sweeper: true -exclude_import: true -id_format: '{{perimeter}}' -import_format: ['{{perimeter}}'] -mutex: '{{perimeter}}' -custom_code: !ruby/object:Provider::Terraform::CustomCode - custom_import: templates/terraform/custom_import/access_context_manager_service_perimeter_ingress_policy.go.erb - pre_update: templates/terraform/pre_create/access_context_manager_dry_run_resource.go.erb - pre_create: templates/terraform/pre_create/access_context_manager_dry_run_resource.go.erb - pre_delete: templates/terraform/pre_create/access_context_manager_dry_run_resource.go.erb -parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'perimeter' - resource: 'ServicePerimeter' - imports: 'name' - description: | - The name of the Service Perimeter to add this resource to. - required: true - url_param_only: true -properties: - - !ruby/object:Api::Type::NestedObject - name: 'egressFrom' - description: | - Defines conditions on the source of a request causing this `EgressPolicy` to apply. - properties: - - !ruby/object:Api::Type::Enum - name: 'identityType' - description: | - Specifies the type of identities that are allowed access to outside the - perimeter. If left unspecified, then members of `identities` field will - be allowed access. - values: - - :ANY_IDENTITY - - :ANY_USER_ACCOUNT - - :ANY_SERVICE_ACCOUNT - - !ruby/object:Api::Type::Array - name: 'identities' - description: | - A list of identities that are allowed access through this `EgressPolicy`. - Should be in the format of email address. The email address should - represent individual user or service account only. - item_type: Api::Type::String - - !ruby/object:Api::Type::Array - name: 'sources' - description: 'Sources that this EgressPolicy authorizes access from.' - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'accessLevel' - description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' - - !ruby/object:Api::Type::Enum - name: 'sourceRestriction' - description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' - values: - - :SOURCE_RESTRICTION_UNSPECIFIED - - :SOURCE_RESTRICTION_ENABLED - - :SOURCE_RESTRICTION_DISABLED - - !ruby/object:Api::Type::NestedObject - name: 'egressTo' - description: | - Defines the conditions on the `ApiOperation` and destination resources that - cause this `EgressPolicy` to apply. - properties: - - !ruby/object:Api::Type::Array - name: 'resources' - item_type: Api::Type::String - description: | - A list of resources, currently only projects in the form - `projects/`, that match this to stanza. A request matches - if it contains a resource in this list. If * is specified for resources, - then this `EgressTo` rule will authorize access to all resources outside - the perimeter. - - !ruby/object:Api::Type::Array - name: 'externalResources' - item_type: Api::Type::String - description: | - A list of external resources that are allowed to be accessed. A request - matches if it contains an external resource in this list (Example: - s3://bucket/path). Currently '*' is not allowed. - - !ruby/object:Api::Type::Array - name: 'operations' - description: | - A list of `ApiOperations` that this egress rule applies to. A request matches - if it contains an operation/service in this list. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'serviceName' - description: | - The name of the API whose methods or permissions the `IngressPolicy` or - `EgressPolicy` want to allow. A single `ApiOperation` with serviceName - field set to `*` will allow all methods AND permissions for all services. - - !ruby/object:Api::Type::Array - name: 'methodSelectors' - description: | - API methods or permissions to allow. Method or permission must belong - to the service specified by `serviceName` field. A single MethodSelector - entry with `*` specified for the `method` field will allow all methods - AND permissions for the service specified in `serviceName`. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'method' - description: | - Value for `method` should be a valid method name for the corresponding - `serviceName` in `ApiOperation`. If `*` used as value for method, - then ALL methods and permissions are allowed. - - !ruby/object:Api::Type::String - name: 'permission' - description: | - Value for permission should be a valid Cloud IAM permission for the - corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml b/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml index 80787619400d..039589f75bb7 100644 --- a/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml +++ b/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml @@ -84,7 +84,7 @@ parameters: type: String description: | The parent of this AccessPolicy in the Cloud Resource Hierarchy. - Format: organizations/{organization_id} + Format: 'organizations/{{organization_id}}' required: true immutable: true - name: 'title' @@ -96,7 +96,7 @@ parameters: type: Array description: | Folder or project on which this policy is applicable. - Format: folders/{{folder_id}} or projects/{{project_id}} + Format: 'folders/{{folder_id}}' or 'projects/{{project_number}}' item_type: type: String max_size: 1 @@ -104,7 +104,7 @@ properties: - name: 'name' type: String description: | - Resource name of the AccessPolicy. Format: {policy_id} + Resource name of the AccessPolicy. Format: '{{policy_id}}' output: true custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' - name: 'createTime' diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml index e85986dff856..d764a22a21b0 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml @@ -279,7 +279,10 @@ properties: description: | A Google Cloud resource that is allowed to ingress the perimeter. Requests from these resources will be allowed to access perimeter data. - Currently only projects are allowed. Format `projects/{project_number}` + Currently only projects and VPCs are allowed. + Project format: `projects/{projectNumber}` + VPC network format: + `//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}`. The project may be in any Google Cloud organization, not just the organization that the perimeter is defined in. `*` is not allowed, the case of allowing all Google Cloud resources only is not supported. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml new file mode 100644 index 000000000000..1803cf24bcea --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml @@ -0,0 +1,189 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterDryRunEgressPolicy' +description: | + Manage a single EgressPolicy in the spec (dry-run) configuration for a service perimeter. + EgressPolicies match requests based on egressFrom and egressTo stanzas. + For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. + If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter + boundary. For example, an EgressPolicy can be used to allow VMs on networks + within the ServicePerimeter to access a defined set of projects outside the + perimeter in certain contexts (e.g. to read data from a Cloud Storage bucket + or query against a BigQuery dataset). + + ~> **Note:** By default, updates to this resource will remove the EgressPolicy from the + from the perimeter and add it back in a non-atomic manner. To ensure that the new EgressPolicy + is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#egresspolicy' +docs: +id_format: '{{perimeter}}' +base_url: '' +self_link: '{{perimeter}}' +create_url: '{{perimeter}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter}}' +import_format: + - '{{perimeter}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - egressFrom + - egressTo +nested_query: + keys: + - spec + - egressPolicies + is_list_of_ids: false + modify_by_patch: true +custom_code: + pre_create: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_dry_run_egress_policy' + skip_test: true +parameters: + - name: 'perimeter' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + item_type: + type: String + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml new file mode 100644 index 000000000000..dbbdc68165e6 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml @@ -0,0 +1,198 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterDryRunIngressPolicy' +description: | + Manage a single IngressPolicy in the spec (dry-run) configuration for a service perimeter. + IngressPolicies match requests based on ingressFrom and ingressTo stanzas. For an ingress policy to match, + both the ingressFrom and ingressTo stanzas must be matched. If an IngressPolicy matches a request, + the request is allowed through the perimeter boundary from outside the perimeter. + For example, access from the internet can be allowed either based on an AccessLevel or, + for traffic hosted on Google Cloud, the project of the source network. + For access from private networks, using the project of the hosting network is required. + Individual ingress policies can be limited by restricting which services and/ + or actions they match using the ingressTo field. + + ~> **Note:** By default, updates to this resource will remove the IngressPolicy from the + from the perimeter and add it back in a non-atomic manner. To ensure that the new IngressPolicy + is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#ingresspolicy' +docs: +id_format: '{{perimeter}}' +base_url: '' +self_link: '{{perimeter}}' +create_url: '{{perimeter}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter}}' +import_format: + - '{{perimeter}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - ingressFrom + - ingressTo +nested_query: + keys: + - spec + - ingressPolicies + is_list_of_ids: false + modify_by_patch: true +custom_code: + pre_create: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_dry_run_ingress_policy' + skip_test: true +parameters: + - name: 'perimeter' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml index 33401434214b..ec19b9600bba 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml @@ -73,9 +73,9 @@ nested_query: is_list_of_ids: true modify_by_patch: true custom_code: - pre_create: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' - pre_update: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' - pre_delete: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl' exclude_tgc: true skip_sweeper: true diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml index 849dea461231..d6f02371e73b 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml @@ -15,6 +15,7 @@ --- name: 'ServicePerimeterEgressPolicy' description: | + Manage a single EgressPolicy in the status (enforced) configuration for a service perimeter. EgressPolicies match requests based on egressFrom and egressTo stanzas. For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter @@ -41,6 +42,7 @@ immutable: true mutex: '{{perimeter}}' import_format: - '{{perimeter}}' +exclude_import: true timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml index e5d15022dc31..0fbbe31b5a51 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml @@ -15,6 +15,7 @@ --- name: 'ServicePerimeterIngressPolicy' description: | + Manage a single IngressPolicy in the status (enforced) configuration for a service perimeter. IngressPolicies match requests based on ingressFrom and ingressTo stanzas. For an ingress policy to match, both the ingressFrom and ingressTo stanzas must be matched. If an IngressPolicy matches a request, the request is allowed through the perimeter boundary from outside the perimeter. @@ -42,6 +43,7 @@ immutable: true mutex: '{{perimeter}}' import_format: - '{{perimeter}}' +exclude_import: true timeouts: insert_minutes: 20 update_minutes: 20 @@ -133,7 +135,10 @@ properties: description: | A Google Cloud resource that is allowed to ingress the perimeter. Requests from these resources will be allowed to access perimeter data. - Currently only projects are allowed. Format `projects/{project_number}` + Currently only projects and VPCs are allowed. + Project format: `projects/{projectNumber}` + VPC network format: + `//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}`. The project may be in any Google Cloud organization, not just the organization that the perimeter is defined in. `*` is not allowed, the case of allowing all Google Cloud resources only is not supported. diff --git a/mmv1/products/activedirectory/go_Domain.yaml b/mmv1/products/activedirectory/go_Domain.yaml index 5e44da23b5f8..4be2e557fc74 100644 --- a/mmv1/products/activedirectory/go_Domain.yaml +++ b/mmv1/products/activedirectory/go_Domain.yaml @@ -36,6 +36,7 @@ timeouts: delete_minutes: 60 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' @@ -51,20 +52,9 @@ async: error: path: 'error' message: 'message' -virtual_fields: - - !ruby/object:Api::Type::Boolean - name: 'deletion_protection' - default_value: true - description: | - Whether Terraform will be prevented from destroying the domain. Defaults to true. - When a`terraform destroy` or `terraform apply` would delete the domain, - the command will fail if this field is not set to false in Terraform state. - When the field is set to true or unset in Terraform state, a `terraform apply` - or `terraform destroy` that would delete the domain will fail. - When the field is set to false, deleting the domain is allowed. custom_code: - custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' pre_delete: 'templates/terraform/pre_delete/go/active_directory_domain.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' error_abort_predicates: - 'transport_tpg.Is429QuotaError' @@ -74,9 +64,20 @@ examples: vars: name: 'myorg' domain_name: 'tfgen' - skip_test: true ignore_read_extra: - 'deletion_protection' + skip_test: true +virtual_fields: + - name: 'deletion_protection' + description: | + Whether Terraform will be prevented from destroying the domain. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the domain, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the domain will fail. + When the field is set to false, deleting the domain is allowed. + type: Boolean + default_value: true parameters: - name: 'domainName' type: String @@ -98,7 +99,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Resource labels that can contain user-provided metadata' - immutable: false - name: 'authorizedNetworks' type: Array description: | @@ -128,7 +128,7 @@ properties: The name of delegated administrator account used to perform Active Directory operations. If not specified, setupadmin will be used. immutable: true - default_value: setupadmin + default_value: "setupadmin" - name: 'fqdn' type: String description: | diff --git a/mmv1/products/activedirectory/go_DomainTrust.yaml b/mmv1/products/activedirectory/go_DomainTrust.yaml index 5540fc424a36..fc6ff2e0846e 100644 --- a/mmv1/products/activedirectory/go_DomainTrust.yaml +++ b/mmv1/products/activedirectory/go_DomainTrust.yaml @@ -37,6 +37,7 @@ timeouts: delete_minutes: 20 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' diff --git a/mmv1/products/activedirectory/go_Peering.yaml b/mmv1/products/activedirectory/go_Peering.yaml index 97a81343d131..7b5d44244b55 100644 --- a/mmv1/products/activedirectory/go_Peering.yaml +++ b/mmv1/products/activedirectory/go_Peering.yaml @@ -36,6 +36,7 @@ timeouts: delete_minutes: 20 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' @@ -78,7 +79,6 @@ properties: type: KeyValueLabels description: 'Resource labels that can contain user-provided metadata' min_version: 'beta' - immutable: false - name: 'authorizedNetwork' type: String description: | diff --git a/mmv1/products/alloydb/go_Backup.yaml b/mmv1/products/alloydb/go_Backup.yaml index c5ad9794c13b..6685eade1fbc 100644 --- a/mmv1/products/alloydb/go_Backup.yaml +++ b/mmv1/products/alloydb/go_Backup.yaml @@ -143,7 +143,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the alloydb backup. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'state' type: String description: Output only. The current state of the backup. diff --git a/mmv1/products/alloydb/go_Cluster.yaml b/mmv1/products/alloydb/go_Cluster.yaml index 7c067f2458ea..8bda78a9fd3a 100644 --- a/mmv1/products/alloydb/go_Cluster.yaml +++ b/mmv1/products/alloydb/go_Cluster.yaml @@ -145,7 +145,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'User-defined labels for the alloydb cluster.' - immutable: false - name: 'encryptionConfig' type: NestedObject description: | @@ -217,19 +216,6 @@ properties: output: true item_type: type: String - - name: 'network' - type: String - description: | - The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - - "projects/{projectNumber}/global/networks/{network_id}". - default_from_api: true - exactly_one_of: - - 'network' - - 'network_config.0.network' - - 'psc_config.0.psc_enabled' - diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' - deprecation_message: '`network` is deprecated and will be removed in a future major release. Instead, use `network_config` to define the network configuration.' - name: 'networkConfig' type: NestedObject description: | @@ -242,7 +228,6 @@ properties: The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". exactly_one_of: - - 'network' - 'network_config.0.network' - 'psc_config.0.psc_enabled' diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' @@ -569,3 +554,33 @@ properties: type: Integer description: | Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. + - name: 'subscriptionType' + type: Enum + description: | + The subscrition type of cluster. + default_from_api: true + enum_values: + - 'TRIAL' + - 'STANDARD' + - name: 'trialMetadata' + type: NestedObject + description: | + Contains information and all metadata related to TRIAL clusters. + output: true + properties: + - name: 'startTime' + type: String + description: | + Start time of the trial cluster. + - name: 'endTime' + type: String + description: | + End time of the trial cluster. + - name: 'upgradeTime' + type: String + description: | + Upgrade time of the trial cluster to standard cluster. + - name: 'graceEndTime' + type: String + description: | + Grace end time of the trial cluster. diff --git a/mmv1/products/alloydb/go_Instance.yaml b/mmv1/products/alloydb/go_Instance.yaml index 531c73834079..a4a37eca0763 100644 --- a/mmv1/products/alloydb/go_Instance.yaml +++ b/mmv1/products/alloydb/go_Instance.yaml @@ -107,6 +107,16 @@ examples: - 'reconciling' - 'update_time' skip_docs: true + - name: 'alloydb_instance_psc_test' + primary_resource_id: 'default' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true parameters: - name: 'cluster' type: ResourceRef @@ -149,7 +159,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'User-defined labels for the alloydb instance.' - immutable: false - name: 'annotations' type: KeyValueAnnotations description: @@ -238,8 +247,8 @@ properties: - name: 'observabilityConfig' type: NestedObject description: 'Configuration for enhanced query insights.' + min_version: 'beta' default_from_api: true - min_version: beta properties: - name: 'enabled' type: Boolean @@ -311,6 +320,7 @@ properties: type: NestedObject description: | Configuration for Private Service Connect (PSC) for the instance. + default_from_api: true properties: - name: 'serviceAttachmentLink' type: String diff --git a/mmv1/products/apigateway/go_Api.yaml b/mmv1/products/apigateway/go_Api.yaml index 97832cc6be8d..1222360c0879 100644 --- a/mmv1/products/apigateway/go_Api.yaml +++ b/mmv1/products/apigateway/go_Api.yaml @@ -112,4 +112,3 @@ properties: description: | Resource labels to represent user-provided metadata. min_version: 'beta' - immutable: false diff --git a/mmv1/products/apigateway/go_ApiConfig.yaml b/mmv1/products/apigateway/go_ApiConfig.yaml index 353500ec8445..9feafb296052 100644 --- a/mmv1/products/apigateway/go_ApiConfig.yaml +++ b/mmv1/products/apigateway/go_ApiConfig.yaml @@ -139,7 +139,6 @@ properties: description: | Resource labels to represent user-provided metadata. min_version: 'beta' - immutable: false - name: 'gatewayConfig' type: NestedObject description: | diff --git a/mmv1/products/apigateway/go_Gateway.yaml b/mmv1/products/apigateway/go_Gateway.yaml index ad642df0cd63..db8dccc8072c 100644 --- a/mmv1/products/apigateway/go_Gateway.yaml +++ b/mmv1/products/apigateway/go_Gateway.yaml @@ -129,4 +129,3 @@ properties: description: | Resource labels to represent user-provided metadata. min_version: 'beta' - immutable: false diff --git a/mmv1/products/appengine/go_FlexibleAppVersion.yaml b/mmv1/products/appengine/go_FlexibleAppVersion.yaml index da141620af6c..4a4707ce7e40 100644 --- a/mmv1/products/appengine/go_FlexibleAppVersion.yaml +++ b/mmv1/products/appengine/go_FlexibleAppVersion.yaml @@ -45,7 +45,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 @@ -76,6 +76,7 @@ examples: ignore_read_extra: - 'noop_on_destroy' - 'deployment.0.zip' + skip_test: true virtual_fields: - name: 'noop_on_destroy' description: | @@ -557,6 +558,7 @@ properties: key_description: | name of file value_type: + name: files type: NestedObject properties: - name: 'sha1Sum' diff --git a/mmv1/products/appengine/go_ServiceNetworkSettings.yaml b/mmv1/products/appengine/go_ServiceNetworkSettings.yaml index bcb415b7f01a..cb98cfd59850 100644 --- a/mmv1/products/appengine/go_ServiceNetworkSettings.yaml +++ b/mmv1/products/appengine/go_ServiceNetworkSettings.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/appengine/go_ServiceSplitTraffic.yaml b/mmv1/products/appengine/go_ServiceSplitTraffic.yaml index 64267de811d0..9413ab42b709 100644 --- a/mmv1/products/appengine/go_ServiceSplitTraffic.yaml +++ b/mmv1/products/appengine/go_ServiceSplitTraffic.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/appengine/go_StandardAppVersion.yaml b/mmv1/products/appengine/go_StandardAppVersion.yaml index 132fece11508..93a5f71fadb9 100644 --- a/mmv1/products/appengine/go_StandardAppVersion.yaml +++ b/mmv1/products/appengine/go_StandardAppVersion.yaml @@ -43,7 +43,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 @@ -289,6 +289,7 @@ properties: key_description: | name of file value_type: + name: files type: NestedObject properties: - name: 'sha1Sum' diff --git a/mmv1/products/apphub/Application.yaml b/mmv1/products/apphub/Application.yaml index 1a6d163c6e16..94f0ec009493 100644 --- a/mmv1/products/apphub/Application.yaml +++ b/mmv1/products/apphub/Application.yaml @@ -56,7 +56,7 @@ examples: vars: application_id: "example-application" display_name: "Application Full" - description: 'Application for testing' + desc: 'Application for testing' business_name: "Alice" business_email: "alice@google.com" developer_name: "Bob" diff --git a/mmv1/products/apphub/Service.yaml b/mmv1/products/apphub/Service.yaml index 8249a4dbae52..84e3c408c1a2 100644 --- a/mmv1/products/apphub/Service.yaml +++ b/mmv1/products/apphub/Service.yaml @@ -66,7 +66,7 @@ examples: application_id: "example-application-1" service_project_attachment_id: "project-1" display_name: "Example Service Full" - description: 'Register service for testing' + desc: 'Register service for testing' business_name: "Alice" business_email: "alice@google.com" developer_name: "Bob" diff --git a/mmv1/products/apphub/Workload.yaml b/mmv1/products/apphub/Workload.yaml index 28ee7b2cb6af..42b9a9d80d24 100644 --- a/mmv1/products/apphub/Workload.yaml +++ b/mmv1/products/apphub/Workload.yaml @@ -45,7 +45,7 @@ examples: application_id: "example-application-1" service_project_attachment_id: "project-1" display_name: "Example Service Full" - description: 'Register service for testing' + desc: 'Register service for testing' business_name: "Alice" business_email: "alice@google.com" developer_name: "Bob" diff --git a/mmv1/products/apphub/go_Application.yaml b/mmv1/products/apphub/go_Application.yaml index a8536f9ad262..241e2b4cc1f5 100644 --- a/mmv1/products/apphub/go_Application.yaml +++ b/mmv1/products/apphub/go_Application.yaml @@ -56,7 +56,7 @@ examples: vars: application_id: 'example-application' display_name: 'Application Full' - description: 'Application for testing' + desc: 'Application for testing' business_name: 'Alice' business_email: 'alice@google.com' developer_name: 'Bob' diff --git a/mmv1/products/apphub/go_Service.yaml b/mmv1/products/apphub/go_Service.yaml index 4c87d342df09..c821f75d5cec 100644 --- a/mmv1/products/apphub/go_Service.yaml +++ b/mmv1/products/apphub/go_Service.yaml @@ -65,7 +65,7 @@ examples: application_id: 'example-application-1' service_project_attachment_id: 'project-1' display_name: 'Example Service Full' - description: 'Register service for testing' + desc: 'Register service for testing' business_name: 'Alice' business_email: 'alice@google.com' developer_name: 'Bob' diff --git a/mmv1/products/apphub/go_Workload.yaml b/mmv1/products/apphub/go_Workload.yaml index c7038b28cfa2..b020d2925e0c 100644 --- a/mmv1/products/apphub/go_Workload.yaml +++ b/mmv1/products/apphub/go_Workload.yaml @@ -64,7 +64,7 @@ examples: application_id: 'example-application-1' service_project_attachment_id: 'project-1' display_name: 'Example Service Full' - description: 'Register service for testing' + desc: 'Register service for testing' business_name: 'Alice' business_email: 'alice@google.com' developer_name: 'Bob' diff --git a/mmv1/products/artifactregistry/Repository.yaml b/mmv1/products/artifactregistry/Repository.yaml index 1ffd201923a8..573c53133848 100644 --- a/mmv1/products/artifactregistry/Repository.yaml +++ b/mmv1/products/artifactregistry/Repository.yaml @@ -52,13 +52,13 @@ examples: ])" vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_docker' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_cmek' primary_resource_id: 'my-repo' @@ -72,34 +72,34 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example virtual docker repository' + desc: 'example virtual docker repository' upstream_repository_id: 'my-repository-upstream' - upstream_description: 'example docker repository (upstream source)' + upstream_desc: 'example docker repository (upstream source)' upstream_policy_id: 'my-repository-upstream' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example remote docker repository' + desc: 'example remote docker repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote_apt' primary_resource_id: 'my-repo' vars: repository_id: 'debian-buster' - description: 'example remote apt repository' + desc: 'example remote apt repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote_yum' primary_resource_id: 'my-repo' vars: repository_id: 'rocky-9' - description: 'example remote yum repository' + desc: 'example remote yum repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_cleanup' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository with cleanup policies' + desc: 'example docker repository with cleanup policies' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote_dockerhub_auth' primary_resource_id: 'my-repo' @@ -109,7 +109,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-dockerhub-remote' - description: 'example remote dockerhub repository with credentials' + desc: 'example remote dockerhub repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -123,7 +123,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-docker-custom-remote' - description: 'example remote custom docker repository with credentials' + desc: 'example remote custom docker repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -137,7 +137,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-maven-custom-remote' - description: 'example remote custom maven repository with credentials' + desc: 'example remote custom maven repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -151,7 +151,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-npm-custom-remote' - description: 'example remote custom npm repository with credentials' + desc: 'example remote custom npm repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -165,7 +165,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-python-custom-remote' - description: 'example remote custom python repository with credentials' + desc: 'example remote custom python repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' diff --git a/mmv1/products/artifactregistry/go_Repository.yaml b/mmv1/products/artifactregistry/go_Repository.yaml index 92eb331afea0..02159eb7daae 100644 --- a/mmv1/products/artifactregistry/go_Repository.yaml +++ b/mmv1/products/artifactregistry/go_Repository.yaml @@ -67,12 +67,12 @@ examples: primary_resource_name: 'fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])' vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - name: 'artifact_registry_repository_docker' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - name: 'artifact_registry_repository_cmek' primary_resource_id: 'my-repo' vars: @@ -84,35 +84,35 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example virtual docker repository' + desc: 'example virtual docker repository' upstream_repository_id: 'my-repository-upstream' - upstream_description: 'example docker repository (upstream source)' + upstream_desc: 'example docker repository (upstream source)' upstream_policy_id: 'my-repository-upstream' - name: 'artifact_registry_repository_remote' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example remote docker repository' + desc: 'example remote docker repository' - name: 'artifact_registry_repository_remote_apt' primary_resource_id: 'my-repo' vars: repository_id: 'debian-buster' - description: 'example remote apt repository' + desc: 'example remote apt repository' - name: 'artifact_registry_repository_remote_yum' primary_resource_id: 'my-repo' vars: repository_id: 'rocky-9' - description: 'example remote yum repository' + desc: 'example remote yum repository' - name: 'artifact_registry_repository_cleanup' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository with cleanup policies' + desc: 'example docker repository with cleanup policies' - name: 'artifact_registry_repository_remote_dockerhub_auth' primary_resource_id: 'my-repo' vars: repository_id: 'example-dockerhub-remote' - description: 'example remote dockerhub repository with credentials' + desc: 'example remote dockerhub repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -123,7 +123,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-docker-custom-remote' - description: 'example remote custom docker repository with credentials' + desc: 'example remote custom docker repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -134,7 +134,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-maven-custom-remote' - description: 'example remote custom maven repository with credentials' + desc: 'example remote custom maven repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -145,7 +145,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-npm-custom-remote' - description: 'example remote custom npm repository with credentials' + desc: 'example remote custom npm repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -156,7 +156,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-python-custom-remote' - description: 'example remote custom python repository with credentials' + desc: 'example remote custom python repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -212,7 +212,6 @@ properties: longer than 63 characters. Label keys must begin with a lowercase letter and may only contain lowercase letters, numeric characters, underscores, and dashes. - immutable: false - name: 'kmsKeyName' type: String description: |- @@ -314,6 +313,7 @@ properties: key_description: |- The policy ID. Must be unique within a repository. value_type: + name: cleanupPolicies type: NestedObject properties: - name: 'action' diff --git a/mmv1/products/backupdr/BackupVault.yaml b/mmv1/products/backupdr/BackupVault.yaml index 253d6a3dfc6b..b8e3df6e5c0d 100644 --- a/mmv1/products/backupdr/BackupVault.yaml +++ b/mmv1/products/backupdr/BackupVault.yaml @@ -1,3 +1,16 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + --- !ruby/object:Api::Resource base_url: projects/{{project}}/locations/{{location}}/backupVaults create_url: projects/{{project}}/locations/{{location}}/backupVaults?backupVaultId={{backup_vault_id}} @@ -10,6 +23,26 @@ import_format: name: BackupVault description: Container to store and organize immutable and indelible backups. autogen_async: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message +update_verb: :PATCH +update_mask: true examples: - !ruby/object:Provider::Terraform::Examples min_version: beta @@ -19,6 +52,42 @@ examples: backup_vault_id: 'backup-vault-test' test_env_vars: project: :PROJECT_NAME +parameters: + - !ruby/object:Api::Type::String + name: location + description: "The GCP location for the backup vault. " + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: backupVaultId + description: "Required. ID of the requesting object." + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::Boolean + name: 'force_update' + default_value: false + url_param_only: true + description: | + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + - !ruby/object:Api::Type::Boolean + name: 'force_delete' + default_value: false + url_param_only: true + description: | + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + - !ruby/object:Api::Type::Boolean + name: 'allow_missing' + default_value: false + url_param_only: true + description: | + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. properties: - !ruby/object:Api::Type::String name: name @@ -56,8 +125,8 @@ properties: simultaneous updates from overwiting each other. " - !ruby/object:Api::Type::String name: state - description: "Output only. The BackupVault resource instance state. \n Possible - values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" + description: "Output only. The BackupVault resource instance state. \n + Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" output: true - !ruby/object:Api::Type::String name: effectiveTime @@ -85,59 +154,3 @@ properties: name: annotations description: "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data. " -parameters: - - !ruby/object:Api::Type::String - name: location - description: "The GCP location for the backup vault. " - url_param_only: true - required: true - immutable: true - - !ruby/object:Api::Type::String - name: backupVaultId - description: "Required. ID of the requesting object." - url_param_only: true - required: true - immutable: true - - !ruby/object:Api::Type::Boolean - name: 'force_update' - default_value: false - url_param_only: true - description: | - If set, allow update to extend the minimum enforced retention for backup vault. This overrides - the restriction against conflicting retention periods. This conflict may occur when the - expiration schedule defined by the associated backup plan is shorter than the minimum - retention set by the backup vault. - - !ruby/object:Api::Type::Boolean - name: 'force_delete' - default_value: false - url_param_only: true - description: | - If set, the following restrictions against deletion of the backup vault instance can be overridden: - * deletion of a backup vault instance containing no backups, but still containing empty datasources. - * deletion of a backup vault instance that is being referenced by an active backup plan. - - !ruby/object:Api::Type::Boolean - name: 'allow_missing' - default_value: false - url_param_only: true - description: | - Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. -async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - path: name - base_url: "{{op_id}}" - wait_ms: 1000 - timeouts: - result: !ruby/object:Api::OpAsync::Result - path: response - resource_inside_response: true - status: !ruby/object:Api::OpAsync::Status - path: done - complete: true - allowed: - - true - - false - error: !ruby/object:Api::OpAsync::Error - path: error - message: message -update_verb: :PATCH -update_mask: true diff --git a/mmv1/products/backupdr/go_BackupVault.yaml b/mmv1/products/backupdr/go_BackupVault.yaml new file mode 100644 index 000000000000..2270f1b45c9d --- /dev/null +++ b/mmv1/products/backupdr/go_BackupVault.yaml @@ -0,0 +1,178 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BackupVault' +description: Container to store and organize immutable and indelible backups. +min_version: 'beta' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' +base_url: 'projects/{{project}}/locations/{{location}}/backupVaults' +self_link: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/backupVaults?backupVaultId={{backup_vault_id}}' +update_url: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}?force={{force_update}}' +update_verb: 'PATCH' +update_mask: true +delete_url: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}?force={{force_delete}}&allowMissing={{allow_missing}}' +import_format: + - 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'backup_dr_backup_vault_full' + primary_resource_id: 'backup-vault-test' + min_version: 'beta' + vars: + backup_vault_id: 'backup-vault-test' + test_env_vars: + project: 'PROJECT_NAME' +parameters: + - name: 'location' + type: String + description: "The GCP location for the backup vault. " + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'backupVaultId' + type: String + description: "Required. ID of the requesting object." + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'force_update' + type: Boolean + description: | + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + min_version: 'beta' + url_param_only: true + default_value: false + - name: 'force_delete' + type: Boolean + description: | + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + min_version: 'beta' + url_param_only: true + default_value: false + - name: 'allow_missing' + type: Boolean + description: | + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + min_version: 'beta' + url_param_only: true + default_value: false +properties: + - name: 'name' + type: String + description: 'Output only. Identifier. The resource name. ' + min_version: 'beta' + output: true + - name: 'description' + type: String + description: 'Optional. The description of the BackupVault instance (2048 characters + or less). ' + min_version: 'beta' + - name: 'labels' + type: KeyValueLabels + description: "Optional. Resource labels to represent user provided metadata. " + min_version: 'beta' + - name: 'createTime' + type: String + description: 'Output only. The time when the instance was created. ' + min_version: 'beta' + output: true + - name: 'updateTime' + type: String + description: 'Output only. The time when the instance was updated. ' + min_version: 'beta' + output: true + - name: 'backupMinimumEnforcedRetentionDuration' + type: String + description: "Required. The default and minimum enforced retention for each backup + within the backup vault. The enforced retention for each backup can be extended. " + min_version: 'beta' + required: true + - name: 'deletable' + type: Boolean + description: 'Output only. Set to true when there are no backups nested under this + resource. ' + min_version: 'beta' + output: true + - name: 'etag' + type: String + description: "Optional. Server specified ETag for the backup vault resource to prevent + simultaneous updates from overwiting each other. " + min_version: 'beta' + output: true + - name: 'state' + type: String + description: "Output only. The BackupVault resource instance state. \n + Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" + min_version: 'beta' + output: true + - name: 'effectiveTime' + type: String + description: 'Optional. Time after which the BackupVault resource is locked. ' + min_version: 'beta' + - name: 'backupCount' + type: String + description: 'Output only. The number of backups in this backup vault. ' + min_version: 'beta' + output: true + - name: 'serviceAccount' + type: String + description: "Output only. Service account used by the BackupVault Service for this + BackupVault. The user should grant this account permissions in their workload + project to enable the service to run backups and restores there. " + min_version: 'beta' + output: true + - name: 'totalStoredBytes' + type: String + description: 'Output only. Total size of the storage used by all backup resources. ' + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: "Output only. Output only Immutable after resource creation until + resource deletion. " + min_version: 'beta' + output: true + - name: 'annotations' + type: KeyValueAnnotations + description: "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores + small amounts of arbitrary data. " + min_version: 'beta' diff --git a/mmv1/products/backupdr/go_product.yaml b/mmv1/products/backupdr/go_product.yaml index 5a35bae35181..c2a50b6b6705 100644 --- a/mmv1/products/backupdr/go_product.yaml +++ b/mmv1/products/backupdr/go_product.yaml @@ -14,7 +14,7 @@ # Warning: This is a temporary file, and should not be edited directly --- name: 'BackupDR' -display_name: 'Backup and DR' +display_name: 'Backup and DR Service' versions: - name: 'beta' base_url: 'https://backupdr.googleapis.com/v1/' diff --git a/mmv1/products/beyondcorp/go_AppConnection.yaml b/mmv1/products/beyondcorp/go_AppConnection.yaml index 1c244f6ece44..287bed2319fa 100644 --- a/mmv1/products/beyondcorp/go_AppConnection.yaml +++ b/mmv1/products/beyondcorp/go_AppConnection.yaml @@ -92,7 +92,6 @@ properties: type: KeyValueLabels description: | Resource labels to represent user provided metadata. - immutable: false - name: 'type' type: String description: | diff --git a/mmv1/products/beyondcorp/go_AppConnector.yaml b/mmv1/products/beyondcorp/go_AppConnector.yaml index 32fa325f82a0..1e322033d474 100644 --- a/mmv1/products/beyondcorp/go_AppConnector.yaml +++ b/mmv1/products/beyondcorp/go_AppConnector.yaml @@ -86,7 +86,6 @@ properties: type: KeyValueLabels description: | Resource labels to represent user provided metadata. - immutable: false - name: 'principalInfo' type: NestedObject description: | diff --git a/mmv1/products/beyondcorp/go_AppGateway.yaml b/mmv1/products/beyondcorp/go_AppGateway.yaml index 87c0b3a542e0..ce6b17482dd0 100644 --- a/mmv1/products/beyondcorp/go_AppGateway.yaml +++ b/mmv1/products/beyondcorp/go_AppGateway.yaml @@ -104,7 +104,6 @@ properties: type: KeyValueLabels description: | Resource labels to represent user provided metadata. - immutable: false - name: 'state' type: Enum description: | diff --git a/mmv1/products/bigquery/go_DatasetAccess.yaml b/mmv1/products/bigquery/go_DatasetAccess.yaml index b126307ef693..735c90460b51 100644 --- a/mmv1/products/bigquery/go_DatasetAccess.yaml +++ b/mmv1/products/bigquery/go_DatasetAccess.yaml @@ -130,6 +130,7 @@ properties: - 'dataset' - 'routine' diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + custom_expand: 'templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl' - name: 'groupByEmail' type: String description: An email address of a Google Group to grant access to. @@ -143,6 +144,7 @@ properties: - 'dataset' - 'routine' diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + custom_expand: 'templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl' - name: 'domain' type: String description: | diff --git a/mmv1/products/bigquery/go_Table.yaml b/mmv1/products/bigquery/go_Table.yaml index f45fc5b3e9d2..0dc0faf20302 100644 --- a/mmv1/products/bigquery/go_Table.yaml +++ b/mmv1/products/bigquery/go_Table.yaml @@ -35,7 +35,6 @@ iam_policy: fetch_iam_policy_verb: 'POST' allowed_iam_role: 'roles/bigquery.dataOwner' parent_resource_attribute: 'table_id' - iam_conditions_request_type: 'REQUEST_BODY' example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' iam_policy_version: '1' custom_code: @@ -46,12 +45,6 @@ examples: vars: dataset_id: 'dataset_id' table_id: 'table_id' -virtual_fields: - - name: 'allow_resource_tags_on_deletion' - description: | - If set to true, it allows table deletion when there are still resource tags attached. - type: Boolean - default_value: false parameters: - name: 'dataset' type: String @@ -333,6 +326,7 @@ properties: - 'ORC' - 'PARQUET' - 'ICEBERG' + - 'DELTA_LAKE' - name: 'sourceUris' type: Array description: | diff --git a/mmv1/products/bigqueryanalyticshub/DataExchange.yaml b/mmv1/products/bigqueryanalyticshub/DataExchange.yaml index c74a33e466e0..4727ea1df30d 100644 --- a/mmv1/products/bigqueryanalyticshub/DataExchange.yaml +++ b/mmv1/products/bigqueryanalyticshub/DataExchange.yaml @@ -48,7 +48,7 @@ examples: region_override: 'US' vars: data_exchange_id: 'my_data_exchange' - description: 'example data exchange' + desc: 'example data exchange' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_analyticshub_data_exchange_dcr' primary_resource_id: 'data_exchange' @@ -58,7 +58,7 @@ examples: region_override: 'US' vars: data_exchange_id: 'dcr_data_exchange' - description: 'example dcr data exchange' + desc: 'example dcr data exchange' properties: - !ruby/object:Api::Type::String name: name diff --git a/mmv1/products/bigqueryanalyticshub/Listing.yaml b/mmv1/products/bigqueryanalyticshub/Listing.yaml index 03b0206f834a..b5342de37dc7 100644 --- a/mmv1/products/bigqueryanalyticshub/Listing.yaml +++ b/mmv1/products/bigqueryanalyticshub/Listing.yaml @@ -52,7 +52,7 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_analyticshub_listing_restricted' primary_resource_id: 'listing' @@ -65,7 +65,7 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_analyticshub_listing_dcr' primary_resource_id: 'listing' @@ -78,7 +78,7 @@ examples: vars: data_exchange_id: 'dcr_data_exchange' listing_id: 'dcr_listing' - description: 'example dcr data exchange' + desc: 'example dcr data exchange' properties: - !ruby/object:Api::Type::String name: name diff --git a/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml b/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml index 0a1a3312ab59..207306765337 100644 --- a/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml +++ b/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml @@ -49,7 +49,14 @@ examples: region_override: 'US' vars: data_exchange_id: 'my_data_exchange' - description: 'example data exchange' + desc: 'example data exchange' + - name: 'bigquery_analyticshub_data_exchange_dcr' + primary_resource_id: 'data_exchange' + primary_resource_name: 'fmt.Sprintf("tf_test_my_data_exchange%s", context["random_suffix"])' + region_override: 'US' + vars: + data_exchange_id: 'dcr_data_exchange' + desc: 'example dcr data exchange' parameters: properties: - name: 'name' @@ -98,3 +105,35 @@ properties: type: String description: |- Base64 encoded image representing the data exchange. + - name: 'sharingEnvironmentConfig' + type: NestedObject + description: | + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + immutable: true + default_from_api: true + properties: + - name: 'defaultExchangeConfig' + type: NestedObject + description: | + Default Analytics Hub data exchange, used for secured data sharing. + immutable: true + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'sharing_environment_config.0.default_exchange_config' + - 'sharing_environment_config.0.dcr_exchange_config' + properties: + [] + - name: 'dcrExchangeConfig' + type: NestedObject + description: | + Data Clean Room (DCR), used for privacy-safe and secured data sharing. + immutable: true + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'sharing_environment_config.0.default_exchange_config' + - 'sharing_environment_config.0.dcr_exchange_config' + properties: + [] diff --git a/mmv1/products/bigqueryanalyticshub/go_Listing.yaml b/mmv1/products/bigqueryanalyticshub/go_Listing.yaml index 47161cd592f2..a88cb74158f5 100644 --- a/mmv1/products/bigqueryanalyticshub/go_Listing.yaml +++ b/mmv1/products/bigqueryanalyticshub/go_Listing.yaml @@ -50,7 +50,7 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' - name: 'bigquery_analyticshub_listing_restricted' primary_resource_id: 'listing' primary_resource_name: 'fmt.Sprintf("tf_test_my_data_exchange%s", context["random_suffix"]), fmt.Sprintf("tf_test_my_listing%s", context["random_suffix"])' @@ -58,7 +58,15 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' + - name: 'bigquery_analyticshub_listing_dcr' + primary_resource_id: 'listing' + primary_resource_name: 'fmt.Sprintf("tf_test_my_data_exchange%s", context["random_suffix"]), fmt.Sprintf("tf_test_my_listing%s", context["random_suffix"])' + region_override: 'US' + vars: + data_exchange_id: 'dcr_data_exchange' + listing_id: 'dcr_listing' + desc: 'example dcr data exchange' parameters: properties: - name: 'name' @@ -146,6 +154,7 @@ properties: type: NestedObject description: Shared dataset i.e. BigQuery dataset source. required: true + immutable: true properties: - name: 'dataset' type: String @@ -153,7 +162,21 @@ properties: Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 required: true + immutable: true diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'selectedResources' + type: Array + description: Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + immutable: true + item_type: + type: NestedObject + properties: + - name: 'table' + type: String + description: | + Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' - name: 'restrictedExportConfig' type: NestedObject description: If set, restricted export configuration will be propagated and enforced on the linked dataset. @@ -162,6 +185,11 @@ properties: type: Boolean description: If true, enable restricted export. + - name: 'restrictDirectTableAccess' + type: Boolean + description: + If true, restrict direct table access(read api/tabledata.list) on linked table. + output: true - name: 'restrictQueryResult' type: Boolean description: diff --git a/mmv1/products/bigqueryanalyticshub/go_product.yaml b/mmv1/products/bigqueryanalyticshub/go_product.yaml index 3873b7aa7338..c8f3297c4891 100644 --- a/mmv1/products/bigqueryanalyticshub/go_product.yaml +++ b/mmv1/products/bigqueryanalyticshub/go_product.yaml @@ -17,7 +17,7 @@ name: 'BigqueryAnalyticsHub' display_name: 'Bigquery Analytics Hub' versions: - name: 'beta' - base_url: 'https://analyticshub.googleapis.com/v1beta1/' + base_url: 'https://analyticshub.googleapis.com/v1/' - name: 'ga' base_url: 'https://analyticshub.googleapis.com/v1/' scopes: diff --git a/mmv1/products/bigquerydatatransfer/go_Config.yaml b/mmv1/products/bigquerydatatransfer/go_Config.yaml index 0e086afe91b4..c66fafb6ccd8 100644 --- a/mmv1/products/bigquerydatatransfer/go_Config.yaml +++ b/mmv1/products/bigquerydatatransfer/go_Config.yaml @@ -53,6 +53,19 @@ examples: display_name: 'my-query' dataset_id: 'my_dataset' skip_test: true + - name: 'bigquerydatatransfer_config_cmek' + primary_resource_id: 'query_config_cmek' + vars: + dataset_id: 'example_dataset' + key_name: 'example-key' + keyring_name: 'example-keyring' + skip_test: true + - name: 'bigquerydatatransfer_config_salesforce' + primary_resource_id: 'salesforce_config' + vars: + display_name: 'my-salesforce-config' + dataset_id: 'my_dataset' + skip_test: true parameters: - name: 'location' type: String @@ -172,6 +185,16 @@ properties: reingests data for [today-10, today-1], rather than ingesting data for just [today-1]. Only valid if the data source supports the feature. Set the value to 0 to use the default value. + - name: 'encryptionConfiguration' + type: NestedObject + description: | + Represents the encryption configuration for a transfer. + properties: + - name: 'kmsKeyName' + type: String + description: | + The name of the KMS key used for encrypting BigQuery data. + required: true - name: 'disabled' type: Boolean description: | diff --git a/mmv1/products/bigqueryreservation/go_Reservation.yaml b/mmv1/products/bigqueryreservation/go_Reservation.yaml index ba0d8f27d598..ea773b411be5 100644 --- a/mmv1/products/bigqueryreservation/go_Reservation.yaml +++ b/mmv1/products/bigqueryreservation/go_Reservation.yaml @@ -71,11 +71,6 @@ properties: description: | Maximum number of queries that are allowed to run concurrently in this reservation. This is a soft limit due to asynchronous nature of the system and various optimizations for small queries. Default value is 0 which means that concurrency will be automatically set based on the reservation size. default_value: 0 - - name: 'multiRegionAuxiliary' - type: Boolean - description: | - Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - name: 'edition' type: String description: | diff --git a/mmv1/products/billing/go_ProjectInfo.yaml b/mmv1/products/billing/go_ProjectInfo.yaml index e837e0beb82e..9741a10ec7c6 100644 --- a/mmv1/products/billing/go_ProjectInfo.yaml +++ b/mmv1/products/billing/go_ProjectInfo.yaml @@ -21,7 +21,7 @@ references: 'Enable, disable, or change billing for a project': 'https://cloud.google.com/billing/docs/how-to/modify-project' api: 'https://cloud.google.com/billing/docs/reference/rest/v1/projects' docs: -id_format: 'projects/{{project}}/billingInfo' +id_format: 'projects/{{project}}' base_url: 'projects/{{project}}/billingInfo' create_verb: 'PUT' delete_verb: 'PUT' diff --git a/mmv1/products/binaryauthorization/go_Policy.yaml b/mmv1/products/binaryauthorization/go_Policy.yaml index 4a3aa59f4f5f..c8a9530d02cd 100644 --- a/mmv1/products/binaryauthorization/go_Policy.yaml +++ b/mmv1/products/binaryauthorization/go_Policy.yaml @@ -120,6 +120,7 @@ properties: } key_name: 'cluster' value_type: + name: clusterAdmissionRule type: NestedObject properties: - name: 'evaluationMode' diff --git a/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml b/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml index e7708589cd91..52a335f99759 100644 --- a/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml +++ b/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml @@ -88,7 +88,6 @@ properties: type: KeyValueLabels description: | User-provided key-value pairs - immutable: false - name: 'connectionInfo' type: NestedObject description: | diff --git a/mmv1/products/certificatemanager/go_Certificate.yaml b/mmv1/products/certificatemanager/go_Certificate.yaml index 708c706fb4ca..8450def41b28 100644 --- a/mmv1/products/certificatemanager/go_Certificate.yaml +++ b/mmv1/products/certificatemanager/go_Certificate.yaml @@ -117,7 +117,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the Certificate resource.' - immutable: false - name: 'scope' type: String description: | diff --git a/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml b/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml index 83105994c483..db1ec306dab8 100644 --- a/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml +++ b/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml @@ -115,7 +115,6 @@ properties: description: | 'Set of label tags associated with the CertificateIssuanceConfig resource. An object containing a list of "key": value pairs. Example: { "name": "wrench", "count": "3" }. - immutable: false - name: 'certificateAuthorityConfig' type: NestedObject description: | diff --git a/mmv1/products/certificatemanager/go_CertificateMap.yaml b/mmv1/products/certificatemanager/go_CertificateMap.yaml index 0d5419e4040a..48d4ac7d859b 100644 --- a/mmv1/products/certificatemanager/go_CertificateMap.yaml +++ b/mmv1/products/certificatemanager/go_CertificateMap.yaml @@ -81,7 +81,6 @@ properties: type: KeyValueLabels description: | Set of labels associated with a Certificate Map resource. - immutable: false - name: 'gclbTargets' type: Array description: | diff --git a/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml b/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml index 87075965dd9a..94217838ffd0 100644 --- a/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml +++ b/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml @@ -98,7 +98,6 @@ properties: Set of labels associated with a Certificate Map Entry. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'certificates' type: Array description: | diff --git a/mmv1/products/certificatemanager/go_DnsAuthorization.yaml b/mmv1/products/certificatemanager/go_DnsAuthorization.yaml index 8c17d205e5b3..8923a5e2ed55 100644 --- a/mmv1/products/certificatemanager/go_DnsAuthorization.yaml +++ b/mmv1/products/certificatemanager/go_DnsAuthorization.yaml @@ -84,7 +84,6 @@ properties: type: KeyValueLabels description: 'Set of label tags associated with the DNS Authorization resource.' - immutable: false - name: 'domain' type: String description: | diff --git a/mmv1/products/certificatemanager/go_TrustConfig.yaml b/mmv1/products/certificatemanager/go_TrustConfig.yaml index 5357f70d30bf..f811963ba005 100644 --- a/mmv1/products/certificatemanager/go_TrustConfig.yaml +++ b/mmv1/products/certificatemanager/go_TrustConfig.yaml @@ -91,7 +91,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the trust config.' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/clouddeploy/go_Automation.yaml b/mmv1/products/clouddeploy/go_Automation.yaml index c32ee0bc28d9..d7dadd6404b5 100644 --- a/mmv1/products/clouddeploy/go_Automation.yaml +++ b/mmv1/products/clouddeploy/go_Automation.yaml @@ -104,7 +104,6 @@ properties: - name: 'labels' type: KeyValueLabels description: "Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 63 characters." - immutable: false - name: 'etag' type: String description: "Optional. The weak etag of the `Automation` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding." diff --git a/mmv1/products/clouddeploy/go_CustomTargetType.yaml b/mmv1/products/clouddeploy/go_CustomTargetType.yaml index 183e1e1bbc12..33e61223005b 100644 --- a/mmv1/products/clouddeploy/go_CustomTargetType.yaml +++ b/mmv1/products/clouddeploy/go_CustomTargetType.yaml @@ -109,7 +109,6 @@ properties: - name: 'labels' type: KeyValueLabels description: "Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes." - immutable: false - name: 'createTime' type: String description: "Time at which the `CustomTargetType` was created." diff --git a/mmv1/products/clouddomains/go_Registration.yaml b/mmv1/products/clouddomains/go_Registration.yaml index 611824e3c475..d74db92ecbbb 100644 --- a/mmv1/products/clouddomains/go_Registration.yaml +++ b/mmv1/products/clouddomains/go_Registration.yaml @@ -95,7 +95,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Set of labels associated with the Registration. - immutable: false - name: 'domainNotices' type: Array description: The list of domain notices that you acknowledge. Possible value is HSTS_PRELOADED diff --git a/mmv1/products/cloudfunctions/go_CloudFunction.yaml b/mmv1/products/cloudfunctions/go_CloudFunction.yaml index e63aab6647ef..15d0e8a3df5a 100644 --- a/mmv1/products/cloudfunctions/go_CloudFunction.yaml +++ b/mmv1/products/cloudfunctions/go_CloudFunction.yaml @@ -135,7 +135,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs associated with this Cloud Function. - immutable: false - name: 'environmentVariables' type: KeyValuePairs description: | diff --git a/mmv1/products/cloudfunctions2/go_Function.yaml b/mmv1/products/cloudfunctions2/go_Function.yaml index de2ae6bae4b6..002d0be758d7 100644 --- a/mmv1/products/cloudfunctions2/go_Function.yaml +++ b/mmv1/products/cloudfunctions2/go_Function.yaml @@ -376,6 +376,7 @@ properties: Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl' - name: 'repoSource' type: NestedObject description: @@ -709,7 +710,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs associated with this Cloud Function. - immutable: false - name: 'kmsKeyName' type: String description: | diff --git a/mmv1/products/cloudquotas/go_QuotaPreference.yaml b/mmv1/products/cloudquotas/go_QuotaPreference.yaml index 2e6b5d5fe675..2a48249d5ca1 100644 --- a/mmv1/products/cloudquotas/go_QuotaPreference.yaml +++ b/mmv1/products/cloudquotas/go_QuotaPreference.yaml @@ -106,6 +106,7 @@ properties: description: | The trace id that the Google Cloud uses to provision the requested quota. This trace id may be used by the client to contact Cloud support to track the state of a quota preference request. The trace id is only produced for increase requests and is unique for each request. The quota decrease requests do not have a trace id. output: true + custom_expand: 'templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl' - name: 'annotations' type: KeyValuePairs description: |- diff --git a/mmv1/products/cloudrunv2/go_Job.yaml b/mmv1/products/cloudrunv2/go_Job.yaml index ed2b78d7a96f..77160e111161 100644 --- a/mmv1/products/cloudrunv2/go_Job.yaml +++ b/mmv1/products/cloudrunv2/go_Job.yaml @@ -55,6 +55,7 @@ iam_policy: - 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' - '{{name}}' custom_code: + pre_delete: 'templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl' taint_resource_on_failed_create: true examples: - name: 'cloudrunv2_job_basic' @@ -62,10 +63,14 @@ examples: primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_limits' primary_resource_id: 'default' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_sql' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -76,6 +81,8 @@ examples: deletion_protection: 'true' test_vars_overrides: 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_vpcaccess' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -84,29 +91,50 @@ examples: vpc_access_connector_name: 'run-vpc' vpc_compute_subnetwork_name: 'run-subnetwork' compute_network_name: 'run-network' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_directvpc' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_secret' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_job_name: 'cloudrun-job' secret_id: 'secret' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_emptydir' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' min_version: 'beta' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_run_job' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' min_version: 'beta' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' +virtual_fields: + - name: 'deletion_protection' + description: | + Whether Terraform will be prevented from destroying the job. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the job, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the job will fail. + When the field is set to false, deleting the job is allowed. + type: Boolean + default_value: true parameters: - name: 'location' type: String @@ -143,7 +171,6 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Job. - immutable: false - name: 'annotations' type: KeyValueAnnotations description: |- @@ -220,6 +247,14 @@ properties: type: Boolean description: | If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + conflicts: + - policy + - name: 'policy' + type: String + description: | + The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} + conflicts: + - use_default - name: 'startExecutionToken' type: String description: |- @@ -302,13 +337,14 @@ properties: - name: 'args' type: Array description: |- - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. item_type: type: String - name: 'env' type: Array description: |- List of environment variables to set in the container. + is_set: true item_type: type: NestedObject properties: @@ -320,7 +356,7 @@ properties: - name: 'value' type: String description: |- - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. # exactly_one_of: # - template.0.template.0.containers.0.env.0.value # - template.0.template.0.containers.0.env.0.valueSource diff --git a/mmv1/products/cloudrunv2/go_Service.yaml b/mmv1/products/cloudrunv2/go_Service.yaml index 84222388d0b0..c33947cb0a89 100644 --- a/mmv1/products/cloudrunv2/go_Service.yaml +++ b/mmv1/products/cloudrunv2/go_Service.yaml @@ -55,6 +55,7 @@ iam_policy: - 'projects/{{project}}/locations/{{location}}/services/{{name}}' - '{{name}}' custom_code: + pre_delete: 'templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl' taint_resource_on_failed_create: true examples: - name: 'cloudrunv2_service_basic' @@ -62,10 +63,14 @@ examples: primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_limits' primary_resource_id: 'default' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_sql' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -76,6 +81,8 @@ examples: deletion_protection: 'true' test_vars_overrides: 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_vpcaccess' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -84,38 +91,65 @@ examples: vpc_access_connector_name: 'run-vpc' vpc_compute_subnetwork_name: 'run-subnetwork' compute_network_name: 'run-network' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_directvpc' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_probes' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_secret' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' secret_id: 'secret-1' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_multicontainer' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' min_version: 'beta' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_mount_gcs' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' + skip_vcr: true - name: 'cloudrunv2_service_mount_nfs' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' + skip_vcr: true +virtual_fields: + - name: 'deletion_protection' + description: | + Whether Terraform will be prevented from destroying the service. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the service, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the service will fail. + When the field is set to false, deleting the service is allowed. + type: Boolean + default_value: true parameters: - name: 'location' type: String @@ -156,7 +190,6 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. - immutable: false - name: 'annotations' type: KeyValueAnnotations description: |- @@ -242,6 +275,14 @@ properties: type: Boolean description: | If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + conflicts: + - policy + - name: 'policy' + type: String + description: | + The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} + conflicts: + - use_default - name: 'customAudiences' type: Array description: | @@ -388,25 +429,26 @@ properties: - name: 'args' type: Array description: |- - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. item_type: type: String - name: 'env' type: Array description: |- List of environment variables to set in the container. + is_set: true item_type: type: NestedObject properties: - name: 'name' type: String description: |- - Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters. + Name of the environment variable. Must be a C_IDENTIFIER, and may not exceed 32768 characters. required: true - name: 'value' type: String description: |- - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. # exactly_one_of: # - template.0.containers.0.env.0.value # - template.0.containers.0.env.0.valueSource @@ -497,7 +539,6 @@ properties: type: NestedObject description: |- Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - default_from_api: true properties: - name: 'initialDelaySeconds' type: Integer diff --git a/mmv1/products/cloudtasks/go_Queue.yaml b/mmv1/products/cloudtasks/go_Queue.yaml index 2b2fde3f626e..870d58be1384 100644 --- a/mmv1/products/cloudtasks/go_Queue.yaml +++ b/mmv1/products/cloudtasks/go_Queue.yaml @@ -49,6 +49,14 @@ examples: - 'app_engine_routing_override.0.service' - 'app_engine_routing_override.0.version' - 'app_engine_routing_override.0.instance' + - name: 'cloud_tasks_queue_http_target_oidc' + primary_resource_id: 'http_target_oidc' + vars: + name: 'cloud-tasks-queue-http-target-oidc' + - name: 'cloud_tasks_queue_http_target_oauth' + primary_resource_id: 'http_target_oauth' + vars: + name: 'cloud-tasks-queue-http-target-oauth' parameters: - name: 'location' type: String @@ -199,3 +207,168 @@ properties: This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the default and means that no operations are logged. required: true + - name: 'httpTarget' + type: NestedObject + description: Modifies HTTP target for HTTP tasks. + properties: + - name: 'httpMethod' + type: Enum + description: | + The HTTP method to use for the request. + + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + default_from_api: true + enum_values: + - 'HTTP_METHOD_UNSPECIFIED' + - 'POST' + - 'GET' + - 'HEAD' + - 'PUT' + - 'DELETE' + - 'PATCH' + - 'OPTIONS' + - name: 'uriOverride' + type: NestedObject + description: | + URI override. + + When specified, overrides the execution URI for all the tasks in the queue. + properties: + - name: 'scheme' + type: Enum + description: | + Scheme override. + + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + default_from_api: true + enum_values: + - 'HTTP' + - 'HTTPS' + - name: 'host' + type: String + description: | + Host override. + + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + - name: 'port' + type: String + description: | + Port override. + + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + - name: 'pathOverride' + type: NestedObject + description: | + URI path. + + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + properties: + - name: 'path' + type: String + description: The URI path (e.g., /users/1234). Default is an empty string. + default_from_api: true + - name: 'queryOverride' + type: NestedObject + description: | + URI query. + + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + properties: + - name: 'queryParams' + type: String + description: The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + default_from_api: true + - name: 'uriOverrideEnforceMode' + type: Enum + description: | + URI Override Enforce Mode + + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + default_from_api: true + enum_values: + - 'ALWAYS' + - 'IF_NOT_EXISTS' + - name: 'headerOverrides' + type: Array + description: | + HTTP target headers. + + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + item_type: + type: NestedObject + properties: + - name: 'header' + type: NestedObject + description: | + Header embodying a key and a value. + required: true + properties: + - name: 'key' + type: String + description: The Key of the header. + required: true + - name: 'value' + type: String + description: The Value of the header. + required: true + - name: 'oauthToken' + type: NestedObject + description: | + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + conflicts: + - oidcToken + properties: + - name: 'serviceAccountEmail' + type: String + description: | + Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + required: true + - name: 'scope' + type: String + description: | + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + default_from_api: true + - name: 'oidcToken' + type: NestedObject + description: | + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + conflicts: + - oauthToken + properties: + - name: 'serviceAccountEmail' + type: String + description: | + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + required: true + - name: 'audience' + type: String + description: | + Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + default_from_api: true diff --git a/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml b/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml index 3d1da0e594ff..3fbf966eb003 100644 --- a/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml +++ b/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml @@ -28,6 +28,7 @@ timeouts: insert_minutes: 1 update_minutes: 1 delete_minutes: 1 +custom_code: examples: - name: 'composer_user_workloads_config_map_basic' primary_resource_id: 'config_map' diff --git a/mmv1/products/compute/go_Address.yaml b/mmv1/products/compute/go_Address.yaml index 644e8c27d57d..7c55a613f836 100644 --- a/mmv1/products/compute/go_Address.yaml +++ b/mmv1/products/compute/go_Address.yaml @@ -46,7 +46,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Autoscaler.yaml b/mmv1/products/compute/go_Autoscaler.yaml index 58af3f1aae8b..d34abc20405f 100644 --- a/mmv1/products/compute/go_Autoscaler.yaml +++ b/mmv1/products/compute/go_Autoscaler.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_BackendBucket.yaml b/mmv1/products/compute/go_BackendBucket.yaml index 2cd35c63275c..d65a522c5c3c 100644 --- a/mmv1/products/compute/go_BackendBucket.yaml +++ b/mmv1/products/compute/go_BackendBucket.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml index 3c98d9c4729e..f7a24f705d2d 100644 --- a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_BackendService.yaml b/mmv1/products/compute/go_BackendService.yaml index 0ff6dc96a582..7bc25a46fb42 100644 --- a/mmv1/products/compute/go_BackendService.yaml +++ b/mmv1/products/compute/go_BackendService.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -150,8 +150,6 @@ properties: See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) for an explanation of load balancing modes. - - From version 6.0.0 default value will be UTILIZATION to match default GCP value. default_value: "UTILIZATION" enum_values: - 'UTILIZATION' @@ -746,16 +744,18 @@ properties: description: Settings for enabling Cloud Identity Aware Proxy send_empty_value: true properties: + - name: 'enabled' + type: Boolean + description: Whether the serving infrastructure will authenticate and authorize all incoming requests. + required: true - name: 'oauth2ClientId' type: String description: | OAuth2 Client ID for IAP - required: true - name: 'oauth2ClientSecret' type: String description: | OAuth2 Client Secret for IAP - required: true ignore_read: true sensitive: true send_empty_value: true @@ -960,9 +960,6 @@ properties: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - - From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. - Default values are enforce by GCP without providing them. properties: - name: 'baseEjectionTime' type: NestedObject @@ -1013,7 +1010,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'consecutiveGatewayFailure' type: Integer description: | @@ -1032,7 +1028,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'enforcingConsecutiveErrors' type: Integer description: | @@ -1051,7 +1046,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'enforcingConsecutiveGatewayFailure' type: Integer description: | @@ -1070,7 +1064,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 0 - name: 'enforcingSuccessRate' type: Integer description: | @@ -1089,7 +1082,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'interval' type: NestedObject description: | @@ -1137,7 +1129,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 10 - name: 'successRateMinimumHosts' type: Integer description: | @@ -1157,7 +1148,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'successRateRequestVolume' type: Integer description: | @@ -1178,7 +1168,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'successRateStdevFactor' type: Integer description: | @@ -1201,7 +1190,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 1900 - name: 'portName' type: String description: | diff --git a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml index 6878e0000f40..5b6ed7d594b0 100644 --- a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml index 0c05ca647a61..dbd62234755c 100644 --- a/mmv1/products/compute/go_Disk.yaml +++ b/mmv1/products/compute/go_Disk.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml index b5e5fa203277..744b21f80c6b 100644 --- a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ExternalVpnGateway.yaml b/mmv1/products/compute/go_ExternalVpnGateway.yaml index c27de4033e84..763cc50878f6 100644 --- a/mmv1/products/compute/go_ExternalVpnGateway.yaml +++ b/mmv1/products/compute/go_ExternalVpnGateway.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Firewall.yaml b/mmv1/products/compute/go_Firewall.yaml index 6deaee15a1ae..0b36c5309d57 100644 --- a/mmv1/products/compute/go_Firewall.yaml +++ b/mmv1/products/compute/go_Firewall.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -113,7 +113,7 @@ properties: either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. item_type: type: String @@ -153,7 +153,7 @@ properties: either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. item_type: type: String diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index c1976d6981b4..e3a13970e4c2 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -19,6 +19,7 @@ description: | A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple. +skip_attribution_label: true references: guides: 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules' @@ -35,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -47,8 +48,8 @@ async: message: 'message' collection_url_key: 'items' custom_code: - pre_create: 'templates/terraform/pre_create/go/compute_forwarding_rule.go.tmpl' constants: 'templates/terraform/constants/go/compute_forwarding_rule.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/compute_forwarding_rule.go.tmpl' post_create: 'templates/terraform/post_create/go/labels.tmpl' custom_diff: - 'forwardingRuleCustomizeDiff' @@ -243,6 +244,12 @@ properties: This can only be set to true for load balancers that have their `loadBalancingScheme` set to `INTERNAL`. + - name: 'forwardingRuleId' + type: Integer + description: | + The unique identifier number for the resource. This identifier is defined by the server. + api_name: id + output: true - name: 'pscConnectionId' type: String description: 'The PSC connection id of the PSC Forwarding Rule.' diff --git a/mmv1/products/compute/go_GlobalAddress.yaml b/mmv1/products/compute/go_GlobalAddress.yaml index aa0d9774991b..056511a5f12d 100644 --- a/mmv1/products/compute/go_GlobalAddress.yaml +++ b/mmv1/products/compute/go_GlobalAddress.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -99,7 +99,6 @@ properties: description: | The fingerprint used for optimistic locking of this resource. Used internally during updates. - min_version: 'beta' output: true update_url: 'projects/{{project}}/global/addresses/{{name}}/setLabels' update_verb: 'POST' diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index 26e3be4ee14f..708e9064d70e 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -22,6 +22,7 @@ description: | balancing. For more information, see https://cloud.google.com/compute/docs/load-balancing/http/ +skip_attribution_label: true docs: base_url: 'projects/{{project}}/global/forwardingRules' has_self_link: true @@ -34,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml index f6b78196a286..6971b7160623 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml index cbcc8744fea8..fe76b39a76f3 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_HaVpnGateway.yaml b/mmv1/products/compute/go_HaVpnGateway.yaml index 3cb96b3356cd..ec6ec91a0fa3 100644 --- a/mmv1/products/compute/go_HaVpnGateway.yaml +++ b/mmv1/products/compute/go_HaVpnGateway.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_HealthCheck.yaml b/mmv1/products/compute/go_HealthCheck.yaml index f2ea88f02cba..43ef731d0ada 100644 --- a/mmv1/products/compute/go_HealthCheck.yaml +++ b/mmv1/products/compute/go_HealthCheck.yaml @@ -46,7 +46,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -116,6 +116,18 @@ examples: min_version: 'beta' vars: health_check_name: 'tcp-health-check' + - name: 'compute_health_check_http_source_regions' + primary_resource_id: 'http-health-check-with-source-regions' + vars: + health_check_name: 'http-health-check' + - name: 'compute_health_check_https_source_regions' + primary_resource_id: 'https-health-check-with-source-regions' + vars: + health_check_name: 'https-health-check' + - name: 'compute_health_check_tcp_source_regions' + primary_resource_id: 'tcp-health-check-with-source-regions' + vars: + health_check_name: 'tcp-health-check' parameters: properties: - name: 'checkIntervalSec' @@ -179,7 +191,6 @@ properties: * The health check cannot be used with BackendService nor with managed instance group auto-healing. - min_version: 'beta' item_type: type: String min_size: 3 diff --git a/mmv1/products/compute/go_HttpHealthCheck.yaml b/mmv1/products/compute/go_HttpHealthCheck.yaml index 7ee7bc77a0d7..e88b8fa0d9a8 100644 --- a/mmv1/products/compute/go_HttpHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpHealthCheck.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_HttpsHealthCheck.yaml b/mmv1/products/compute/go_HttpsHealthCheck.yaml index 34adda55888c..26ae15e47605 100644 --- a/mmv1/products/compute/go_HttpsHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpsHealthCheck.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Image.yaml b/mmv1/products/compute/go_Image.yaml index 86a701c6a04b..a69df4bbe8b3 100644 --- a/mmv1/products/compute/go_Image.yaml +++ b/mmv1/products/compute/go_Image.yaml @@ -48,7 +48,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Instance.yaml b/mmv1/products/compute/go_Instance.yaml index ddaf6f6021bc..5d2eba3bf9d1 100644 --- a/mmv1/products/compute/go_Instance.yaml +++ b/mmv1/products/compute/go_Instance.yaml @@ -30,7 +30,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -608,14 +608,16 @@ properties: type: Enum description: | The confidential computing technology the instance uses. - SEV is an AMD feature. One of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required. TDX is only available in beta. at_least_one_of: - 'confidential_instance_config.0.enable_confidential_compute' - 'confidential_instance_config.0.confidential_instance_type' enum_values: - 'SEV' - 'SEV_SNP' + - 'TDX' - name: 'status' type: Enum description: | diff --git a/mmv1/products/compute/go_InstanceGroup.yaml b/mmv1/products/compute/go_InstanceGroup.yaml index 48e0fa36a110..f647746e873c 100644 --- a/mmv1/products/compute/go_InstanceGroup.yaml +++ b/mmv1/products/compute/go_InstanceGroup.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceGroupManager.yaml b/mmv1/products/compute/go_InstanceGroupManager.yaml index 3a52e01f41f3..395d3d79857c 100644 --- a/mmv1/products/compute/go_InstanceGroupManager.yaml +++ b/mmv1/products/compute/go_InstanceGroupManager.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceGroupMembership.yaml b/mmv1/products/compute/go_InstanceGroupMembership.yaml index fd910f82c8d2..8b9c31172f23 100644 --- a/mmv1/products/compute/go_InstanceGroupMembership.yaml +++ b/mmv1/products/compute/go_InstanceGroupMembership.yaml @@ -50,7 +50,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml index fd1d5514a0e6..b5f75c41bb05 100644 --- a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml +++ b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml @@ -42,7 +42,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceSettings.yaml b/mmv1/products/compute/go_InstanceSettings.yaml index 3069656e3b43..a249f423c9ad 100644 --- a/mmv1/products/compute/go_InstanceSettings.yaml +++ b/mmv1/products/compute/go_InstanceSettings.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Interconnect.yaml b/mmv1/products/compute/go_Interconnect.yaml index abc890c5a1bf..a43e4f6f9a88 100644 --- a/mmv1/products/compute/go_Interconnect.yaml +++ b/mmv1/products/compute/go_Interconnect.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 10000 @@ -379,16 +379,18 @@ properties: - name: 'requestedFeatures' type: Array description: | - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. item_type: type: Enum description: | interconnects.list of features requested for this Interconnect connection enum_values: - 'MACSEC' + - 'IF_MACSEC' - name: 'availableFeatures' type: Array description: | @@ -398,8 +400,4 @@ properties: ports and MACsec isn't supported and enabling MACsec fails). output: true item_type: - type: Enum - description: | - interconnects.list of features available for this Interconnect connection, - enum_values: - - 'MACSEC' + type: String diff --git a/mmv1/products/compute/go_InterconnectAttachment.yaml b/mmv1/products/compute/go_InterconnectAttachment.yaml index 8ec6cc6738ee..34e9be6dac7a 100644 --- a/mmv1/products/compute/go_InterconnectAttachment.yaml +++ b/mmv1/products/compute/go_InterconnectAttachment.yaml @@ -30,7 +30,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_MachineImage.yaml b/mmv1/products/compute/go_MachineImage.yaml index 5f9a605e6787..0eaa8c794e23 100644 --- a/mmv1/products/compute/go_MachineImage.yaml +++ b/mmv1/products/compute/go_MachineImage.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml index a83f40296587..18c4f8028e3e 100644 --- a/mmv1/products/compute/go_ManagedSslCertificate.yaml +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -51,7 +51,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -96,7 +96,6 @@ properties: type: Integer description: 'The unique identifier for the resource.' api_name: id - default_from_api: true output: true - name: 'name' type: String diff --git a/mmv1/products/compute/go_Network.yaml b/mmv1/products/compute/go_Network.yaml index cec97ad33d59..690cdd17dc0c 100644 --- a/mmv1/products/compute/go_Network.yaml +++ b/mmv1/products/compute/go_Network.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml index 2a79092c8698..f186199aba04 100644 --- a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml +++ b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEndpoint.yaml b/mmv1/products/compute/go_NetworkEndpoint.yaml index abe4429c67f6..861d6acbb722 100644 --- a/mmv1/products/compute/go_NetworkEndpoint.yaml +++ b/mmv1/products/compute/go_NetworkEndpoint.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEndpointGroup.yaml b/mmv1/products/compute/go_NetworkEndpointGroup.yaml index c5d3c85558e8..12a7f6adfbb3 100644 --- a/mmv1/products/compute/go_NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_NetworkEndpointGroup.yaml @@ -46,7 +46,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEndpoints.yaml b/mmv1/products/compute/go_NetworkEndpoints.yaml index 7242e983788f..0320bffeef75 100644 --- a/mmv1/products/compute/go_NetworkEndpoints.yaml +++ b/mmv1/products/compute/go_NetworkEndpoints.yaml @@ -33,7 +33,7 @@ references: 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' docs: -id_format: '{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints' +id_format: '{{project}}/{{zone}}/{{network_endpoint_group}}' base_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}' self_link: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints' create_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints' @@ -53,7 +53,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml index 6324ccdf3966..feb6e205653a 100644 --- a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml +++ b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml @@ -28,7 +28,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml index bfc331e0f42d..588b1897eea6 100644 --- a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml +++ b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml @@ -43,7 +43,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -79,6 +79,7 @@ examples: deletion_protection: 'true' test_vars_overrides: 'deletion_protection': 'false' + skip_vcr: true parameters: - name: 'network' type: ResourceRef diff --git a/mmv1/products/compute/go_NodeGroup.yaml b/mmv1/products/compute/go_NodeGroup.yaml index 16df58318d41..c16e81b24363 100644 --- a/mmv1/products/compute/go_NodeGroup.yaml +++ b/mmv1/products/compute/go_NodeGroup.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NodeTemplate.yaml b/mmv1/products/compute/go_NodeTemplate.yaml index ae6d1d82a6ad..d6a061d91ec1 100644 --- a/mmv1/products/compute/go_NodeTemplate.yaml +++ b/mmv1/products/compute/go_NodeTemplate.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -56,6 +56,10 @@ examples: primary_resource_id: 'template' vars: template_name: 'soletenant-with-licenses' + - name: 'node_template_accelerators' + primary_resource_id: 'template' + vars: + template_name: 'soletenant-with-accelerators' parameters: - name: 'region' type: ResourceRef @@ -146,6 +150,24 @@ properties: enum_values: - 'RESTART_NODE_ON_ANY_SERVER' - 'RESTART_NODE_ON_MINIMAL_SERVERS' + - name: 'accelerators' + type: Array + description: | + List of the type and count of accelerator cards attached to the + node template + item_type: + type: NestedObject + properties: + - name: 'acceleratorCount' + type: Integer + description: | + The number of the guest accelerator cards exposed to this + node template. + - name: 'acceleratorType' + type: String + description: | + Full or partial URL of the accelerator type resource to expose + to this node template. - name: 'cpuOvercommitType' type: Enum description: | diff --git a/mmv1/products/compute/go_PacketMirroring.yaml b/mmv1/products/compute/go_PacketMirroring.yaml index 99ff103ff7f7..63a6871bda00 100644 --- a/mmv1/products/compute/go_PacketMirroring.yaml +++ b/mmv1/products/compute/go_PacketMirroring.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_PerInstanceConfig.yaml b/mmv1/products/compute/go_PerInstanceConfig.yaml index be33eac134ed..724932323c04 100644 --- a/mmv1/products/compute/go_PerInstanceConfig.yaml +++ b/mmv1/products/compute/go_PerInstanceConfig.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml index 9b086511eed3..cb4daf09f4b2 100644 --- a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml +++ b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml index 42558bd8a439..5cd3db293ebc 100644 --- a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml +++ b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml index 5144500c2ac1..8fbf38ddac9a 100644 --- a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml +++ b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionAutoscaler.yaml b/mmv1/products/compute/go_RegionAutoscaler.yaml index 494945ff6369..da1746780c73 100644 --- a/mmv1/products/compute/go_RegionAutoscaler.yaml +++ b/mmv1/products/compute/go_RegionAutoscaler.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionBackendService.yaml b/mmv1/products/compute/go_RegionBackendService.yaml index f8b2e6a04482..33dc130adcd5 100644 --- a/mmv1/products/compute/go_RegionBackendService.yaml +++ b/mmv1/products/compute/go_RegionBackendService.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -149,9 +149,7 @@ properties: See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) for an explanation of load balancing modes. - - From version 6.0.0 default value will be UTILIZATION to match default GCP value. - default_value: "CONNECTION" + default_value: "UTILIZATION" enum_values: - 'UTILIZATION' - 'RATE' @@ -651,11 +649,9 @@ properties: description: | Time for which instance will be drained (not accept new connections, but still work to finish started). - - From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. api_name: drainingTimeoutSec send_empty_value: true - default_value: 0 + default_value: 300 - name: 'creationTimestamp' type: Time description: | @@ -752,16 +748,18 @@ properties: description: Settings for enabling Cloud Identity Aware Proxy send_empty_value: true properties: + - name: 'enabled' + type: Boolean + description: Whether the serving infrastructure will authenticate and authorize all incoming requests. + required: true - name: 'oauth2ClientId' type: String description: | OAuth2 Client ID for IAP - required: true - name: 'oauth2ClientSecret' type: String description: | OAuth2 Client Secret for IAP - required: true ignore_read: true sensitive: true send_empty_value: true @@ -869,9 +867,6 @@ properties: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. - - From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. - Default values are enforce by GCP without providing them. properties: - name: 'baseEjectionTime' type: NestedObject @@ -922,7 +917,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'consecutiveGatewayFailure' type: Integer description: | @@ -941,7 +935,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'enforcingConsecutiveErrors' type: Integer description: | @@ -960,7 +953,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'enforcingConsecutiveGatewayFailure' type: Integer description: | @@ -979,7 +971,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 0 - name: 'enforcingSuccessRate' type: Integer description: | @@ -998,7 +989,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'interval' type: NestedObject description: | @@ -1046,7 +1036,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 10 - name: 'successRateMinimumHosts' type: Integer description: | @@ -1066,7 +1055,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'successRateRequestVolume' type: Integer description: | @@ -1087,7 +1075,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'successRateStdevFactor' type: Integer description: | @@ -1109,7 +1096,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 1900 - name: 'portName' type: String description: | diff --git a/mmv1/products/compute/go_RegionCommitment.yaml b/mmv1/products/compute/go_RegionCommitment.yaml index f01b2ff61484..083d0215efeb 100644 --- a/mmv1/products/compute/go_RegionCommitment.yaml +++ b/mmv1/products/compute/go_RegionCommitment.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml index db2bc7196d50..7fac3d3a0c88 100644 --- a/mmv1/products/compute/go_RegionDisk.yaml +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml index 1c41e05ebb19..f70f7da5fd89 100644 --- a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionHealthCheck.yaml b/mmv1/products/compute/go_RegionHealthCheck.yaml index 62dcea119fdc..c53f7405f68e 100644 --- a/mmv1/products/compute/go_RegionHealthCheck.yaml +++ b/mmv1/products/compute/go_RegionHealthCheck.yaml @@ -42,7 +42,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml index 46c46ed67bca..ef3fb3c81954 100644 --- a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml +++ b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml index 347a086a82bb..b8b956c01adf 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml @@ -43,7 +43,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml index 6a57b76776d1..916e7fb82e2c 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml index e172f020417b..f727fdbc6f72 100644 --- a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml +++ b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml @@ -28,7 +28,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml index e0e532997721..2da626974fda 100644 --- a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSecurityPolicy.yaml b/mmv1/products/compute/go_RegionSecurityPolicy.yaml index f6473c18f264..65d078bd284d 100644 --- a/mmv1/products/compute/go_RegionSecurityPolicy.yaml +++ b/mmv1/products/compute/go_RegionSecurityPolicy.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml index 370fb57bf869..a8ca5e9bfdbd 100644 --- a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml index f3c5a78db137..6893009d4216 100644 --- a/mmv1/products/compute/go_RegionSslCertificate.yaml +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -26,7 +26,13 @@ references: docs: optional_properties: | * `name_prefix` - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with `name`. + specified prefix. Conflicts with `name`. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. + Resulting name for a `name_prefix` <= 37 characters: + `name_prefix` + YYYYmmddHHSSssss + 8 digit incremental counter + Resulting name for a `name_prefix` 38 - 54 characters: + `name_prefix` + YYmmdd + 3 digit incremental counter base_url: 'projects/{{project}}/regions/{{region}}/sslCertificates' has_self_link: true immutable: true @@ -38,7 +44,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSslPolicy.yaml b/mmv1/products/compute/go_RegionSslPolicy.yaml index 25f9dabca134..baab545b92e2 100644 --- a/mmv1/products/compute/go_RegionSslPolicy.yaml +++ b/mmv1/products/compute/go_RegionSslPolicy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml index e82dbaa539fe..d0c2c2295b89 100644 --- a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml index f3e5b50a1ee3..3a870fb4eb03 100644 --- a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -180,5 +180,14 @@ properties: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}' + update_verb: 'PATCH' + update_id: 'serverTlsPolicy' + fingerprint_name: 'fingerprint' resource: 'SslPolicy' imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml index b787d8fd3ae8..66f45937d3fd 100644 --- a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionUrlMap.yaml b/mmv1/products/compute/go_RegionUrlMap.yaml index afa052ed8e67..8804464c6851 100644 --- a/mmv1/products/compute/go_RegionUrlMap.yaml +++ b/mmv1/products/compute/go_RegionUrlMap.yaml @@ -29,7 +29,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Reservation.yaml b/mmv1/products/compute/go_Reservation.yaml index b24fab3c88b6..30401dcdd6c1 100644 --- a/mmv1/products/compute/go_Reservation.yaml +++ b/mmv1/products/compute/go_Reservation.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ResizeRequest.yaml b/mmv1/products/compute/go_ResizeRequest.yaml index 6a1c965eef58..0ef3a61c7849 100644 --- a/mmv1/products/compute/go_ResizeRequest.yaml +++ b/mmv1/products/compute/go_ResizeRequest.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml index 38a7877c8d83..035ab0cc7644 100644 --- a/mmv1/products/compute/go_ResourcePolicy.yaml +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -45,11 +45,17 @@ async: message: 'message' collection_url_key: 'items' custom_code: + constants: 'templates/terraform/constants/go/compute_resource_policy.go.tmpl' examples: - name: 'resource_policy_basic' primary_resource_id: 'foo' vars: name: 'gce-policy' + - name: 'resource_policy_hourly_format' + primary_resource_id: 'foo' + vars: + name: 'gce-policy' + skip_docs: true - name: 'resource_policy_full' primary_resource_id: 'bar' vars: @@ -138,6 +144,7 @@ properties: It must be in an hourly format "HH:MM", where HH : [00-23] and MM : [00] GMT. eg: 21:00 required: true + diff_suppress_func: 'HourlyFormatSuppressDiff' validation: function: 'verify.ValidateHourlyOnly' - name: 'dailySchedule' @@ -161,6 +168,7 @@ properties: 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. required: true + diff_suppress_func: 'HourlyFormatSuppressDiff' validation: function: 'verify.ValidateHourlyOnly' - name: 'weeklySchedule' diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index af854f00bb37..089cc6ff29b9 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -61,7 +61,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Router.yaml b/mmv1/products/compute/go_Router.yaml index 530a6a8142c9..a444f3212099 100644 --- a/mmv1/products/compute/go_Router.yaml +++ b/mmv1/products/compute/go_Router.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml index bd91cff799c6..fc97f73023bb 100644 --- a/mmv1/products/compute/go_RouterNat.yaml +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RouterRoutePolicy.yaml b/mmv1/products/compute/go_RouterRoutePolicy.yaml index afa5e8f73af3..67d22276db4d 100644 --- a/mmv1/products/compute/go_RouterRoutePolicy.yaml +++ b/mmv1/products/compute/go_RouterRoutePolicy.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_SecurityPolicyRule.yaml b/mmv1/products/compute/go_SecurityPolicyRule.yaml index 38faecb63b84..cb2fd3bce77a 100644 --- a/mmv1/products/compute/go_SecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_SecurityPolicyRule.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ServiceAttachment.yaml b/mmv1/products/compute/go_ServiceAttachment.yaml index f35f6bb89b94..b10914454e7f 100644 --- a/mmv1/products/compute/go_ServiceAttachment.yaml +++ b/mmv1/products/compute/go_ServiceAttachment.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_SslCertificate.yaml b/mmv1/products/compute/go_SslCertificate.yaml index 0c12f39c56b0..913043468170 100644 --- a/mmv1/products/compute/go_SslCertificate.yaml +++ b/mmv1/products/compute/go_SslCertificate.yaml @@ -26,7 +26,13 @@ references: docs: optional_properties: | * `name_prefix` - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with `name`. + specified prefix. Conflicts with `name`. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. + Resulting name for a `name_prefix` <= 37 characters: + `name_prefix` + YYYYmmddHHSSssss + 8 digit incremental counter + Resulting name for a `name_prefix` 38 - 54 characters: + `name_prefix` + YYmmdd + 3 digit incremental counter base_url: 'projects/{{project}}/global/sslCertificates' has_self_link: true immutable: true @@ -38,7 +44,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_SslPolicy.yaml b/mmv1/products/compute/go_SslPolicy.yaml index fedccef00273..7ee4898da65e 100644 --- a/mmv1/products/compute/go_SslPolicy.yaml +++ b/mmv1/products/compute/go_SslPolicy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Subnetwork.yaml b/mmv1/products/compute/go_Subnetwork.yaml index 288bb3b3429e..ed9e1d601931 100644 --- a/mmv1/products/compute/go_Subnetwork.yaml +++ b/mmv1/products/compute/go_Subnetwork.yaml @@ -55,7 +55,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -74,8 +74,10 @@ iam_policy: custom_code: extra_schema_entry: 'templates/terraform/extra_schema_entry/go/subnetwork.tmpl' constants: 'templates/terraform/constants/go/subnetwork.tmpl' + post_update: 'templates/terraform/post_update/go/compute_subnetwork.go.tmpl' custom_diff: - 'customdiff.ForceNewIfChange("ip_cidr_range", IsShrinkageIpCidr)' + - 'sendSecondaryIpRangeIfEmptyDiff' examples: - name: 'subnetwork_basic' primary_resource_id: 'network-with-private-secondary-ip-ranges' @@ -116,6 +118,28 @@ examples: vars: subnetwork_name: 'subnet-cidr-overlap' network_name: 'net-cidr-overlap' + - name: 'subnetwork_reserved_internal_range' + primary_resource_id: 'subnetwork-reserved-internal-range' + min_version: 'beta' + vars: + subnetwork_name: 'subnetwork-reserved-internal-range' + network_name: 'network-reserved-internal-range' + - name: 'subnetwork_reserved_secondary_range' + primary_resource_id: 'subnetwork-reserved-secondary-range' + min_version: 'beta' + vars: + subnetwork_name: 'subnetwork-reserved-secondary-range' + network_name: 'network-reserved-secondary-range' +virtual_fields: + - name: 'send_secondary_ip_range_if_empty' + description: | + Controls the removal behavior of secondary_ip_range. + When false, removing secondary_ip_range from config will not produce a diff as + the provider will default to the API's value. + When true, the provider will treat removing secondary_ip_range as sending an + empty list of secondary IP ranges to the API. + Defaults to false. + type: Boolean parameters: properties: - name: 'creationTimestamp' @@ -141,11 +165,20 @@ properties: Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. - required: true + Field is optional when `reserved_internal_range` is defined, otherwise required. + required: false + default_from_api: true update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/expandIpCidrRange' update_verb: 'POST' validation: function: 'verify.ValidateIpCidrRange' + - name: 'reservedInternalRange' + type: ResourceRef + description: | + The ID of the reserved internal range. Must be prefixed with `networkconnectivity.googleapis.com` + E.g. `networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}` + resource: 'InternalRange' + imports: 'selfLink' - name: 'name' type: String description: | @@ -205,13 +238,10 @@ properties: to either primary or secondary ranges. **Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid - breaking users during the 0.12 upgrade. To explicitly send a list - of zero objects you must use the following syntax: - `example=[]` - For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value). + breaking users during the 0.12 upgrade. To explicitly send a list of zero objects, + set `send_secondary_ip_range_if_empty = true` api_name: secondaryIpRanges unordered_list: true - schema_config_mode_attr: true default_from_api: true send_empty_value: true update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' @@ -238,9 +268,18 @@ properties: range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. - required: true + Field is optional when `reserved_internal_range` is defined, otherwise required. + required: false + default_from_api: true validation: function: 'verify.ValidateIpCidrRange' + - name: 'reservedInternalRange' + type: ResourceRef + description: | + The ID of the reserved internal range. Must be prefixed with `networkconnectivity.googleapis.com` + E.g. `networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}` + resource: 'InternalRange' + imports: 'selfLink' - name: 'privateIpGoogleAccess' type: Boolean description: | diff --git a/mmv1/products/compute/go_TargetGrpcProxy.yaml b/mmv1/products/compute/go_TargetGrpcProxy.yaml index 74b096d7d4b5..d4f48f713af9 100644 --- a/mmv1/products/compute/go_TargetGrpcProxy.yaml +++ b/mmv1/products/compute/go_TargetGrpcProxy.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetHttpProxy.yaml b/mmv1/products/compute/go_TargetHttpProxy.yaml index aceddd3b42da..6291862a1be6 100644 --- a/mmv1/products/compute/go_TargetHttpProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpProxy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetHttpsProxy.yaml b/mmv1/products/compute/go_TargetHttpsProxy.yaml index f37b0918cc8e..c22e7eed42a8 100644 --- a/mmv1/products/compute/go_TargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpsProxy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -229,5 +229,13 @@ properties: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. - resource: 'SslPolicy' + + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. + update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}' + update_verb: 'PATCH' + fingerprint_name: 'fingerprint' + resource: 'ServerTlsPolicy' imports: 'selfLink' diff --git a/mmv1/products/compute/go_TargetInstance.yaml b/mmv1/products/compute/go_TargetInstance.yaml index 3f9b985e344b..085880499322 100644 --- a/mmv1/products/compute/go_TargetInstance.yaml +++ b/mmv1/products/compute/go_TargetInstance.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetSslProxy.yaml b/mmv1/products/compute/go_TargetSslProxy.yaml index f77aac55132b..c18954fcb63e 100644 --- a/mmv1/products/compute/go_TargetSslProxy.yaml +++ b/mmv1/products/compute/go_TargetSslProxy.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetTcpProxy.yaml b/mmv1/products/compute/go_TargetTcpProxy.yaml index e135ab5eed35..88ca21a43799 100644 --- a/mmv1/products/compute/go_TargetTcpProxy.yaml +++ b/mmv1/products/compute/go_TargetTcpProxy.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_UrlMap.yaml b/mmv1/products/compute/go_UrlMap.yaml index c6febafa9843..49ab1fa5a2c1 100644 --- a/mmv1/products/compute/go_UrlMap.yaml +++ b/mmv1/products/compute/go_UrlMap.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_VpnGateway.yaml b/mmv1/products/compute/go_VpnGateway.yaml index 54202d714178..24be342df8cc 100644 --- a/mmv1/products/compute/go_VpnGateway.yaml +++ b/mmv1/products/compute/go_VpnGateway.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_VpnTunnel.yaml b/mmv1/products/compute/go_VpnTunnel.yaml index 56bb530a4375..116133853a5f 100644 --- a/mmv1/products/compute/go_VpnTunnel.yaml +++ b/mmv1/products/compute/go_VpnTunnel.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/containerattached/go_Cluster.yaml b/mmv1/products/containerattached/go_Cluster.yaml index 239916e6afab..75869c34066d 100644 --- a/mmv1/products/containerattached/go_Cluster.yaml +++ b/mmv1/products/containerattached/go_Cluster.yaml @@ -133,7 +133,8 @@ properties: type: String description: | The Kubernetes distribution of the underlying attached cluster. Supported values: - "eks", "aks". + "eks", "aks", "generic". The generic distribution provides the ability to register + or migrate any CNCF conformant cluster. required: true immutable: true - name: 'clusterRegion' diff --git a/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml b/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml index be3934a600a5..8e8d67cc9efc 100644 --- a/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml +++ b/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml @@ -94,6 +94,21 @@ examples: ignore_read_extra: - 'alloydb.0.settings.0.initial_user.0.password' skip_test: true + - name: 'database_migration_service_connection_profile_existing_mysql' + primary_resource_id: 'existing-mysql' + vars: + destination_csql: 'destination-csql' + destination_cp: 'destination-cp' + - name: 'database_migration_service_connection_profile_existing_postgres' + primary_resource_id: 'existing-psql' + vars: + destination_csql: 'destination-csql' + destination_cp: 'destination-cp' + - name: 'database_migration_service_connection_profile_existing_alloydb' + primary_resource_id: 'existing-alloydb' + vars: + destination_alloydb: 'destination-alloydb' + destination_cp: 'destination-cp' parameters: - name: 'connectionProfileId' type: String @@ -127,7 +142,6 @@ properties: type: KeyValueLabels description: | The resource labels for connection profile to use to annotate any related underlying resources such as Compute Engine VMs. - immutable: false - name: 'state' type: Enum description: | @@ -185,24 +199,29 @@ properties: - name: 'host' type: String description: | - Required. The IP or hostname of the source MySQL database. - required: true + The IP or hostname of the source MySQL database. + required_with: + - 'mysql.0.port' + - 'mysql.0.username' - name: 'port' type: Integer description: | - Required. The network port of the source MySQL database. - required: true + The network port of the source MySQL database. + required_with: + - 'mysql.0.host' + - 'mysql.0.username' - name: 'username' type: String description: | - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - required: true + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + required_with: + - 'mysql.0.host' + - 'mysql.0.port' - name: 'password' type: String description: | - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - required: true immutable: true sensitive: true custom_flatten: 'templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_password.go.tmpl' @@ -267,26 +286,38 @@ properties: - name: 'host' type: String description: | - Required. The IP or hostname of the source MySQL database. - required: true + The IP or hostname of the source MySQL database. + required_with: + - 'postgresql.0.port' + - 'postgresql.0.username' + - 'postgresql.0.password' - name: 'port' type: Integer description: | - Required. The network port of the source MySQL database. - required: true + The network port of the source MySQL database. + required_with: + - 'postgresql.0.host' + - 'postgresql.0.username' + - 'postgresql.0.password' - name: 'username' type: String description: | - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - required: true + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + required_with: + - 'postgresql.0.host' + - 'postgresql.0.port' + - 'postgresql.0.password' - name: 'password' type: String description: | - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - required: true immutable: true sensitive: true + required_with: + - 'postgresql.0.host' + - 'postgresql.0.port' + - 'postgresql.0.username' custom_flatten: 'templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_password.go.tmpl' - name: 'passwordSet' type: Boolean @@ -339,6 +370,10 @@ properties: type: String description: | If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + - name: 'alloydbClusterId' + type: String + description: | + If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. - name: 'networkArchitecture' type: Enum description: | diff --git a/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml b/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml index aab6d6f0caa8..b14a6ca551b6 100644 --- a/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml +++ b/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml @@ -49,8 +49,6 @@ examples: vars: private_connection_id: 'my-connection' network_name: 'my-network' - test_vars_overrides: - 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "dbms-privateconnection")' parameters: - name: 'privateConnectionId' type: String @@ -74,7 +72,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. diff --git a/mmv1/products/dataform/go_Repository.yaml b/mmv1/products/dataform/go_Repository.yaml index 9525a76fc97c..fa9cb9abfa1e 100644 --- a/mmv1/products/dataform/go_Repository.yaml +++ b/mmv1/products/dataform/go_Repository.yaml @@ -47,6 +47,8 @@ examples: dataform_repository_name: 'dataform_repository' data: 'secret-data' secret_name: 'my-secret' + key_ring_name: 'example-key-ring' + crypto_key_name: 'example-crypto-key-name' skip_test: true - name: 'dataform_repository_with_cloudsource_repo' primary_resource_id: 'dataform_repository' @@ -57,6 +59,8 @@ examples: dataform_repository_name: 'dataform_repository' data: 'secret-data' secret_name: 'my-secret' + key_ring_name: 'example-key-ring' + crypto_key_name: 'example-crypto-key-name' skip_docs: true - name: 'dataform_repository_with_cloudsource_repo_and_ssh' primary_resource_id: 'dataform_repository' @@ -161,10 +165,15 @@ properties: type: String description: Optional. The repository's user-friendly name. min_version: 'beta' + - name: 'kmsKeyName' + type: String + description: | + Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. + It is not possible to add or update the encryption key after the repository is created. Example projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key] + min_version: 'beta' - name: 'labels' type: KeyValueLabels description: | Optional. Repository user labels. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. min_version: 'beta' - immutable: false diff --git a/mmv1/products/datafusion/go_Instance.yaml b/mmv1/products/datafusion/go_Instance.yaml index a4db1f7fef0f..8b442a23bb01 100644 --- a/mmv1/products/datafusion/go_Instance.yaml +++ b/mmv1/products/datafusion/go_Instance.yaml @@ -30,6 +30,7 @@ timeouts: delete_minutes: 50 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' @@ -44,6 +45,7 @@ async: iam_policy: method_name_separator: ':' parent_resource_attribute: 'name' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' import_format: - 'projects/{{project}}/locations/{{location}}/instances/{{name}}' - '{{name}}' @@ -157,7 +159,6 @@ properties: description: | The resource labels for instance to use to annotate any related underlying resources, such as Compute Engine VMs. - immutable: false - name: 'options' type: KeyValuePairs description: | diff --git a/mmv1/products/dataplex/go_AspectType.yaml b/mmv1/products/dataplex/go_AspectType.yaml index a5f93176cba3..942121ef6264 100644 --- a/mmv1/products/dataplex/go_AspectType.yaml +++ b/mmv1/products/dataplex/go_AspectType.yaml @@ -116,7 +116,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the AspectType. - immutable: false - name: 'metadataTemplate' type: String description: | diff --git a/mmv1/products/dataplex/go_Datascan.yaml b/mmv1/products/dataplex/go_Datascan.yaml index 04e2f29b6bc9..2277fd3499b5 100644 --- a/mmv1/products/dataplex/go_Datascan.yaml +++ b/mmv1/products/dataplex/go_Datascan.yaml @@ -16,6 +16,7 @@ name: 'Datascan' description: | Represents a user-visible job which provides the insights for the related data source. +skip_attribution_label: true references: guides: 'Official Documentation': 'https://cloud.google.com/dataplex/docs' @@ -126,7 +127,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the scan. A list of key->value pairs. - immutable: false - name: 'state' type: Enum description: | diff --git a/mmv1/products/dataplex/go_EntryGroup.yaml b/mmv1/products/dataplex/go_EntryGroup.yaml index 277c3c61a44c..9669bafe26af 100644 --- a/mmv1/products/dataplex/go_EntryGroup.yaml +++ b/mmv1/products/dataplex/go_EntryGroup.yaml @@ -116,7 +116,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the EntryGroup. - immutable: false - name: 'transferStatus' type: Enum description: | diff --git a/mmv1/products/dataplex/go_EntryType.yaml b/mmv1/products/dataplex/go_EntryType.yaml index ec0e7fdec87d..5af9b29a45e6 100644 --- a/mmv1/products/dataplex/go_EntryType.yaml +++ b/mmv1/products/dataplex/go_EntryType.yaml @@ -116,7 +116,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the EntryType. - immutable: false - name: 'typeAliases' type: Array description: | diff --git a/mmv1/products/dataplex/go_Task.yaml b/mmv1/products/dataplex/go_Task.yaml index 31b40da3cf09..f45b8d9f4cb4 100644 --- a/mmv1/products/dataplex/go_Task.yaml +++ b/mmv1/products/dataplex/go_Task.yaml @@ -16,6 +16,7 @@ name: 'Task' description: | A Dataplex task represents the work that you want Dataplex to do on a schedule. It encapsulates code, parameters, and the schedule. +skip_attribution_label: true references: guides: 'Official Documentation': 'https://cloud.google.com/dataplex/docs' @@ -139,7 +140,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the task. - immutable: false - name: 'triggerSpec' type: NestedObject description: | diff --git a/mmv1/products/datastream/Stream.yaml b/mmv1/products/datastream/Stream.yaml index 28bac8b38ec5..419080af8ed7 100644 --- a/mmv1/products/datastream/Stream.yaml +++ b/mmv1/products/datastream/Stream.yaml @@ -1020,8 +1020,6 @@ properties: name: 'transactionLogs' allow_empty_object: true send_empty_value: true - conflicts: - - source_config.0.sql_server_source_config.change_tables description: | CDC reader reads from transaction logs. properties: [] @@ -1029,8 +1027,6 @@ properties: name: 'changeTables' allow_empty_object: true send_empty_value: true - conflicts: - - source_config.0.sql_server_source_config.transaction_logs description: | CDC reader reads from change tables. properties: [] diff --git a/mmv1/products/datastream/go_ConnectionProfile.yaml b/mmv1/products/datastream/go_ConnectionProfile.yaml index 20ac6ee464ea..1a750190635f 100644 --- a/mmv1/products/datastream/go_ConnectionProfile.yaml +++ b/mmv1/products/datastream/go_ConnectionProfile.yaml @@ -123,7 +123,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. diff --git a/mmv1/products/datastream/go_PrivateConnection.yaml b/mmv1/products/datastream/go_PrivateConnection.yaml index af4658f81f78..7410dc1d50f2 100644 --- a/mmv1/products/datastream/go_PrivateConnection.yaml +++ b/mmv1/products/datastream/go_PrivateConnection.yaml @@ -48,6 +48,7 @@ async: custom_code: constants: 'templates/terraform/constants/go/private_connection.go.tmpl' post_create: 'templates/terraform/post_create/go/private_connection.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/private_connection.go.tmpl' post_import: 'templates/terraform/post_import/go/private_connection.go.tmpl' skip_sweeper: true schema_version: 1 @@ -89,7 +90,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. diff --git a/mmv1/products/datastream/go_Stream.yaml b/mmv1/products/datastream/go_Stream.yaml index 94c13a5cd173..addcf7da8bcb 100644 --- a/mmv1/products/datastream/go_Stream.yaml +++ b/mmv1/products/datastream/go_Stream.yaml @@ -118,6 +118,21 @@ examples: test_vars_overrides: 'deletion_protection': 'false' skip_test: true + - name: 'datastream_stream_sql_server_change_tables' + primary_resource_id: 'default' + vars: + database_name: 'db' + database_password: 'password' + database_user: 'user' + deletion_protection: 'true' + destination_connection_profile_id: 'destination-profile' + source_connection_profile_id: 'source-profile' + sql_server_name: 'sql-server' + sql_server_root_password: 'root-password' + stream_id: 'stream' + test_vars_overrides: + 'deletion_protection': 'false' + skip_test: true - name: 'datastream_stream_postgresql_bigquery_dataset_id' primary_resource_id: 'default' vars: @@ -197,7 +212,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. @@ -1018,6 +1032,22 @@ properties: send_empty_value: true validation: function: 'validation.IntAtLeast(0)' + - name: 'transactionLogs' + type: NestedObject + description: | + CDC reader reads from transaction logs. + send_empty_value: true + allow_empty_object: true + properties: + [] + - name: 'changeTables' + type: NestedObject + description: | + CDC reader reads from change tables. + send_empty_value: true + allow_empty_object: true + properties: + [] - name: 'destinationConfig' type: NestedObject description: | @@ -1160,9 +1190,8 @@ properties: immutable: true send_empty_value: true allow_empty_object: true - exactly_one_of: - - 'destination_config.0.bigquery_destination_config.0.merge' - - 'destination_config.0.bigquery_destination_config.0.append_only' + conflicts: + - destination_config.0.bigquery_destination_config.0.append_only properties: [] - name: 'appendOnly' @@ -1174,9 +1203,8 @@ properties: immutable: true send_empty_value: true allow_empty_object: true - exactly_one_of: - - 'destination_config.0.bigquery_destination_config.0.merge' - - 'destination_config.0.bigquery_destination_config.0.append_only' + conflicts: + - destination_config.0.bigquery_destination_config.0.merge properties: [] - name: 'state' diff --git a/mmv1/products/dialogflowcx/go_Intent.yaml b/mmv1/products/dialogflowcx/go_Intent.yaml index 7d027647d9e3..0d8b1dc7314f 100644 --- a/mmv1/products/dialogflowcx/go_Intent.yaml +++ b/mmv1/products/dialogflowcx/go_Intent.yaml @@ -184,7 +184,6 @@ properties: The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. Prefix "sys-" is reserved for Dialogflow defined labels. Currently allowed Dialogflow defined labels include: * sys-head * sys-contextual The above labels do not require value. "sys-head" means the intent is a head intent. "sys.contextual" means the intent is a contextual intent. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/discoveryengine/go_DataStore.yaml b/mmv1/products/discoveryengine/go_DataStore.yaml index a999bd7f9f55..bfa83611338e 100644 --- a/mmv1/products/discoveryengine/go_DataStore.yaml +++ b/mmv1/products/discoveryengine/go_DataStore.yaml @@ -25,34 +25,24 @@ references: docs: base_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores' self_link: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}' -create_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores?dataStoreId={{data_store_id}}&createAdvancedSiteSearch={{create_advanced_site_search}}' +create_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores?dataStoreId={{data_store_id}}&createAdvancedSiteSearch={{create_advanced_site_search}}&skipDefaultSchemaCreation={{skip_default_schema_creation}}' update_verb: 'PATCH' update_mask: true delete_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}' import_format: - 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}' timeouts: - insert_minutes: 60 - update_minutes: 60 - delete_minutes: 60 + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 autogen_async: true async: - actions: ['create', 'delete', 'update'] + actions: ['create', 'delete'] type: 'OpAsync' operation: base_url: '{{op_id}}' - path: 'name' - wait_ms: 1000 - timeouts: - insert_minutes: 60 - update_minutes: 60 - delete_minutes: 60 result: - path: 'response' resource_inside_response: false - error: - path: 'error' - message: 'message' custom_code: examples: - name: 'discoveryengine_datastore_basic' @@ -71,6 +61,12 @@ examples: vars: data_store_id: 'data-store-id' skip_docs: true + - name: 'discoveryengine_datastore_document_processing_config_layout' + primary_resource_id: 'document_processing_config_layout' + primary_resource_name: 'fmt.Sprintf("tf_test_data_store%s", context["random_suffix"])' + vars: + data_store_id: 'data-store-id' + skip_docs: true parameters: - name: 'location' type: String @@ -93,6 +89,18 @@ parameters: If true, an advanced data store for site search will be created. If the data store is not configured as site search (GENERIC vertical and PUBLIC_WEBSITE contentConfig), this flag will be ignored. + url_param_only: true + default_value: false + - name: 'skipDefaultSchemaCreation' + type: Boolean + description: | + A boolean flag indicating whether to skip the default schema creation for + the data store. Only enable this flag if you are certain that the default + schema is incompatible with your use case. + If set to true, you must manually create a schema for the data store + before any documents can be ingested. + This flag cannot be specified if `data_store.starting_schema` is + specified. url_param_only: true default_value: false @@ -120,6 +128,7 @@ properties: enum_values: - 'GENERIC' - 'MEDIA' + - 'HEALTHCARE_FHIR' - name: 'solutionTypes' type: Array description: | @@ -133,6 +142,7 @@ properties: - 'SOLUTION_TYPE_RECOMMENDATION' - 'SOLUTION_TYPE_SEARCH' - 'SOLUTION_TYPE_CHAT' + - 'SOLUTION_TYPE_GENERATIVE_CHAT' - name: 'defaultSchemaId' type: String description: | @@ -162,6 +172,33 @@ properties: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/documentProcessingConfig`. required: false output: true + - name: 'chunkingConfig' + type: NestedObject + description: | + Whether chunking mode is enabled. + required: false + properties: + - name: 'layoutBasedChunkingConfig' + type: NestedObject + description: | + Configuration for the layout based chunking. + required: false + send_empty_value: true + allow_empty_object: true + properties: + - name: 'chunkSize' + type: Integer + description: | + The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + required: false + - name: 'includeAncestorHeadings' + type: Boolean + description: | + Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + + required: false - name: 'defaultParsingConfig' type: NestedObject description: | @@ -180,6 +217,7 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: [] - name: 'ocrParsingConfig' @@ -190,13 +228,26 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: - name: 'useNativeText' type: Boolean description: | If true, will use native text instead of OCR text on pages containing native text. - required: false + - name: 'layoutParsingConfig' + type: NestedObject + description: | + Configurations applied to layout parser. + required: false + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'default_parsing_config.0.digital_parsing_config' + - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' + properties: + [] - name: 'parsingConfigOverrides' type: Map description: | @@ -206,6 +257,7 @@ properties: * `docx`: Override parsing config for DOCX files, only digital parsing and or layout parsing are supported. key_name: 'file_type' value_type: + name: parsingConfigOverrides type: NestedObject properties: - name: 'digitalParsingConfig' @@ -218,6 +270,7 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: [] - name: 'ocrParsingConfig' @@ -228,13 +281,26 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: - name: 'useNativeText' type: Boolean description: | If true, will use native text instead of OCR text on pages containing native text. - required: false + - name: 'layoutParsingConfig' + type: NestedObject + description: | + Configurations applied to layout parser. + required: false + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'default_parsing_config.0.digital_parsing_config' + - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' + properties: + [] - name: 'createTime' type: Time description: | diff --git a/mmv1/products/discoveryengine/go_Schema.yaml b/mmv1/products/discoveryengine/go_Schema.yaml new file mode 100644 index 000000000000..3ec0b6d8e311 --- /dev/null +++ b/mmv1/products/discoveryengine/go_Schema.yaml @@ -0,0 +1,103 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Schema' +description: | + Schema defines the structure and layout of a type of document data. +references: + guides: + 'Provide a schema for your data store': 'https://cloud.google.com/generative-ai-app-builder/docs/provide-schema' + api: 'https://cloud.google.com/generative-ai-app-builder/docs/reference/rest/v1/projects.locations.collections.dataStores.schemas' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas' +self_link: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas?schemaId={{schema_id}}' +delete_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}' +immutable: true +import_format: + - 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}' +timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'discoveryengine_schema_basic' + primary_resource_id: 'basic' + vars: + data_store_id: 'data-store-id' + schema_id: 'schema-id' +parameters: + - name: 'location' + type: String + description: | + The geographic location where the data store should reside. The value can + only be one of "global", "us" and "eu". + url_param_only: true + required: true + immutable: true + - name: 'dataStoreId' + type: String + description: | + The unique id of the data store. + url_param_only: true + required: true + immutable: true + - name: 'schemaId' + type: String + description: | + The unique id of the schema. + + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The unique full resource name of the schema. Values are of the format + `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/schemas/{schema_id}`. + This field must be a UTF-8 encoded string with a length limit of 1024 + characters. + output: true + - name: 'jsonSchema' + type: String + description: | + The JSON representation of the schema. + immutable: true + exactly_one_of: + - 'json_schema' + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl' + validation: + function: 'validation.StringIsJSON' diff --git a/mmv1/products/dlp/go_DiscoveryConfig.yaml b/mmv1/products/dlp/go_DiscoveryConfig.yaml index ff7fd3f887e3..a4be9b81b1f1 100644 --- a/mmv1/products/dlp/go_DiscoveryConfig.yaml +++ b/mmv1/products/dlp/go_DiscoveryConfig.yaml @@ -32,12 +32,12 @@ timeouts: insert_minutes: 20 update_minutes: 20 delete_minutes: 20 -skip_sweeper: true custom_code: encoder: 'templates/terraform/encoders/go/wrap_object.go.tmpl' update_encoder: 'templates/terraform/encoders/go/wrap_object.go.tmpl' decoder: 'templates/terraform/decoders/go/unwrap_resource.go.tmpl' custom_import: 'templates/terraform/custom_import/go/dlp_import.go.tmpl' +skip_sweeper: true examples: - name: 'dlp_discovery_config_basic' primary_resource_id: 'basic' @@ -205,6 +205,50 @@ properties: enum_values: - 'TABLE_PROFILE' - 'RESOURCE_NAME' + - name: 'tagResources' + type: NestedObject + description: Publish a message into the Pub/Sub topic. + properties: + - name: 'tagConditions' + type: Array + description: The tags to associate with different conditions. + item_type: + type: NestedObject + properties: + - name: 'tag' + type: NestedObject + description: The tag value to attach to resources. + properties: + - name: 'namespacedValue' + type: String + description: The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + - name: 'sensitivityScore' + type: NestedObject + description: Conditions attaching the tag to a resource on its profile having this sensitivity score. + properties: + - name: 'score' + type: Enum + description: | + The sensitivity score applied to the resource. + required: true + enum_values: + - 'SENSITIVITY_LOW' + - 'SENSITIVITY_MODERATE' + - 'SENSITIVITY_HIGH' + - name: 'profileGenerationsToTag' + type: Array + description: The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'PROFILE_GENERATION_NEW' + - 'PROFILE_GENERATION_UPDATE' + - name: 'lowerDataRiskToLow' + type: Boolean + description: Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. - name: 'targets' type: Array description: Target to match against for determining what to scan and how frequently @@ -346,6 +390,17 @@ properties: - 'UPDATE_FREQUENCY_NEVER' - 'UPDATE_FREQUENCY_DAILY' - 'UPDATE_FREQUENCY_MONTHLY' + - name: 'inspectTemplateModifiedCadence' + type: NestedObject + description: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + properties: + - name: 'frequency' + type: Enum + description: How frequently data profiles can be updated when the template is modified. Defaults to never. + enum_values: + - 'UPDATE_FREQUENCY_NEVER' + - 'UPDATE_FREQUENCY_DAILY' + - 'UPDATE_FREQUENCY_MONTHLY' - name: 'disabled' type: NestedObject description: 'Tables that match this filter will not have profiles created.' @@ -475,6 +530,18 @@ properties: - 'UPDATE_FREQUENCY_NEVER' - 'UPDATE_FREQUENCY_DAILY' - 'UPDATE_FREQUENCY_MONTHLY' + - name: 'inspectTemplateModifiedCadence' + type: NestedObject + description: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + properties: + - name: 'frequency' + type: Enum + description: How frequently data profiles can be updated when the template is modified. Defaults to never. + required: true + enum_values: + - 'UPDATE_FREQUENCY_NEVER' + - 'UPDATE_FREQUENCY_DAILY' + - 'UPDATE_FREQUENCY_MONTHLY' - name: 'disabled' type: NestedObject description: 'Disable profiling for database resources that match this filter.' diff --git a/mmv1/products/dns/go_ManagedZone.yaml b/mmv1/products/dns/go_ManagedZone.yaml new file mode 100644 index 000000000000..57f23525eb92 --- /dev/null +++ b/mmv1/products/dns/go_ManagedZone.yaml @@ -0,0 +1,429 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ManagedZone' +kind: 'dns#managedZone' +description: | + A zone is a subtree of the DNS namespace under one administrative + responsibility. A ManagedZone is a resource that represents a DNS zone + hosted by the Cloud DNS service. +references: + guides: + 'Managing Zones': 'https://cloud.google.com/dns/zones/' + api: 'https://cloud.google.com/dns/api/v1/managedZones' +docs: +id_format: 'projects/{{project}}/managedZones/{{name}}' +base_url: 'projects/{{project}}/managedZones' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + fetch_iam_policy_verb: 'POST' + parent_resource_attribute: 'managed_zone' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/managedZones/{{managed_zone}}' + - '{{project}}/{{managed_zone}}' +custom_code: + update_encoder: 'templates/terraform/update_encoder/go/managed_dns_zone.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/managed_dns_zone.go.tmpl' +examples: + - name: 'dns_managed_zone_quickstart' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-example-zone-googlecloudexample%s", context["random_suffix"])' + vars: + dns_compute_instance: 'dns-compute-instance' + allow_http_traffic: 'allow-http-traffic' + example_zone_googlecloudexample: 'example-zone-googlecloudexample' + dns_name: 'googlecloudexample.net.' + test_vars_overrides: + 'dns_name': '"m-z.gcp.tfacc.hashicorptest.com."' + ignore_read_extra: + - 'force_destroy' + skip_docs: true + - name: 'dns_record_set_basic' + primary_resource_id: 'parent-zone' + vars: + sample_zone: 'sample-zone' + skip_docs: true + - name: 'dns_managed_zone_basic' + primary_resource_id: 'example-zone' + external_providers: ["random", "time"] + skip_vcr: true + - name: 'dns_managed_zone_private' + primary_resource_id: 'private-zone' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + network_2_name: 'network-2' + - name: 'dns_managed_zone_private_multiproject' + primary_resource_id: 'private-zone' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + network_2_name: 'network-2' + project_1_name: 'project-1' + project_2_name: 'project-2' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + - name: 'dns_managed_zone_private_forwarding' + primary_resource_id: 'private-zone' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + network_2_name: 'network-2' + skip_test: true + - name: 'dns_managed_zone_private_gke' + primary_resource_id: 'private-zone-gke' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + cluster_1_name: 'cluster-1' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + - name: 'dns_managed_zone_private_peering' + primary_resource_id: 'peering-zone' + vars: + zone_name: 'peering-zone' + network_source_name: 'network-source' + network_target_name: 'network-target' + - name: 'dns_managed_zone_service_directory' + primary_resource_id: 'sd-zone' + min_version: 'beta' + vars: + zone_name: 'peering-zone' + network_name: 'network' + - name: 'dns_managed_zone_cloud_logging' + primary_resource_id: 'cloud-logging-enabled-zone' + vars: + zone_name: 'cloud-logging-enabled-zone' +virtual_fields: + - name: 'force_destroy' + description: 'Set this true to delete all records in the zone.' + type: Boolean + default_value: false +parameters: +properties: + - name: 'description' + type: String + description: | + A textual description field. Defaults to 'Managed by Terraform'. + required: false + validation: + function: 'validation.StringIsNotEmpty' + default_value: "Managed by Terraform" + - name: 'dnsName' + type: String + description: | + The DNS name of this managed zone, for instance "example.com.". + required: true + immutable: true + - name: 'dnssecConfig' + type: NestedObject + description: DNSSEC configuration + properties: + - name: 'kind' + type: String + description: Identifies what kind of resource this is + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + default_value: "dns#managedZoneDnsSecConfig" + - name: 'nonExistence' + type: Enum + description: | + Specifies the mechanism used to provide authenticated denial-of-existence responses. + non_existence can only be updated when the state is `off`. + default_from_api: true + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + enum_values: + - 'nsec' + - 'nsec3' + - name: 'state' + type: Enum + description: Specifies whether DNSSEC is enabled, and what mode it is in + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + enum_values: + - 'off' + - 'on' + - 'transfer' + - name: 'defaultKeySpecs' + type: Array + description: | + Specifies parameters that will be used for generating initial DnsKeys + for this ManagedZone. If you provide a spec for keySigning or zoneSigning, + you must also provide one for the other. + default_key_specs can only be updated when the state is `off`. + default_from_api: true + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + item_type: + type: NestedObject + properties: + - name: 'algorithm' + type: Enum + description: + String mnemonic specifying the DNSSEC algorithm of this key + enum_values: + - 'ecdsap256sha256' + - 'ecdsap384sha384' + - 'rsasha1' + - 'rsasha256' + - 'rsasha512' + - name: 'keyLength' + type: Integer + description: Length of the keys in bits + - name: 'keyType' + type: Enum + description: | + Specifies whether this is a key signing key (KSK) or a zone + signing key (ZSK). Key signing keys have the Secure Entry + Point flag set and, when active, will only be used to sign + resource record sets of type DNSKEY. Zone signing keys do + not have the Secure Entry Point flag set and will be used + to sign all other types of resource record sets. + enum_values: + - 'keySigning' + - 'zoneSigning' + - name: 'kind' + type: String + description: 'Identifies what kind of resource this is' + default_value: "dns#dnsKeySpec" + - name: 'managedZoneID' + type: Integer + description: Unique identifier for the resource; defined by the server. + api_name: id + output: true + - name: 'name' + type: String + description: | + User assigned name for this resource. + Must be unique within the project. + required: true + immutable: true + - name: 'nameServers' + type: Array + description: | + Delegate your managed_zone to these virtual name servers; + defined by the server + output: true + item_type: + type: String + - name: 'creationTime' + type: Time + description: | + The time that this resource was created on the server. + This is in RFC3339 text format. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + A set of key/value label pairs to assign to this ManagedZone. + - name: 'visibility' + type: Enum + description: | + The zone's visibility: public zones are exposed to the Internet, + while private zones are visible only to Virtual Private Cloud resources. + immutable: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: "public" + enum_values: + - 'private' + - 'public' + - name: 'privateVisibilityConfig' + type: NestedObject + description: | + For privately visible zones, the set of Virtual Private Cloud + resources that the zone is visible from. At least one of `gke_clusters` or `networks` must be specified. + send_empty_value: true + at_least_one_of: + - 'gke_clusters' + - 'networks' + custom_expand: 'templates/terraform/custom_expand/go/dns_managed_zone_private_visibility_config.go.tmpl' + properties: + - name: 'gkeClusters' + type: Array + description: + 'The list of Google Kubernetes Engine clusters that can see this zone.' + item_type: + type: NestedObject + properties: + - name: 'gkeClusterName' + type: String + description: | + The resource name of the cluster to bind this ManagedZone to. + This should be specified in the format like + `projects/*/locations/*/clusters/*` + required: true + - name: 'networks' + type: Array + description: | + The list of VPC networks that can see this zone. Until the provider updates to use the Terraform 0.12 SDK in a future release, you + may experience issues with this resource while updating. If you've defined a `networks` block and + add another `networks` block while keeping the old block, Terraform will see an incorrect diff + and apply an incorrect update to the resource. If you encounter this issue, remove all `networks` + blocks in an update and then apply another update adding all of them back simultaneously. + is_set: true + set_hash_func: |- + func(v interface{}) int { + if v == nil { + return 0 + } + raw := v.(map[string]interface{}) + if url, ok := raw["network_url"]; ok { + return tpgresource.SelfLinkRelativePathHash(url) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsManagedZonePrivateVisibilityConfigNetworksSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'networkUrl' + type: String + description: | + The id or fully qualified URL of the VPC network to bind to. + This should be formatted like `projects/{project}/global/networks/{network}` or + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'forwardingConfig' + type: NestedObject + description: | + The presence for this field indicates that outbound forwarding is enabled + for this zone. The value of this field contains the set of destinations + to forward to. + properties: + - name: 'targetNameServers' + type: Array + description: | + List of target name servers to forward to. Cloud DNS will + select the best available name server if more than + one target is given. + is_set: true + required: true + set_hash_func: |- + func(v interface{}) int { + raw := v.(map[string]interface{}) + if address, ok := raw["ipv4_address"]; ok { + tpgresource.Hashcode(address.(string)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsManagedZoneForwardingConfigTargetNameServersSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'ipv4Address' + type: String + description: 'IPv4 address of a target name server.' + required: true + - name: 'forwardingPath' + type: Enum + description: | + Forwarding path for this TargetNameServer. If unset or `default` Cloud DNS will make forwarding + decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go + to the Internet. When set to `private`, Cloud DNS will always send queries through VPC for this target + enum_values: + - 'default' + - 'private' + - name: 'peeringConfig' + type: NestedObject + description: | + The presence of this field indicates that DNS Peering is enabled for this + zone. The value of this field contains the network to peer with. + properties: + - name: 'targetNetwork' + type: NestedObject + description: 'The network with which to peer.' + required: true + properties: + - name: 'networkUrl' + type: String + description: | + The id or fully qualified URL of the VPC network to forward queries to. + This should be formatted like `projects/{project}/global/networks/{network}` or + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/network_full_url.tmpl' + - name: 'reverseLookup' + type: Boolean + description: | + Specifies if this is a managed reverse lookup zone. If true, Cloud DNS will resolve reverse + lookup queries using automatically configured records for VPC resources. This only applies + to networks listed under `private_visibility_config`. + api_name: reverseLookupConfig + min_version: 'beta' + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/object_to_bool.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bool_to_object.go.tmpl' + - name: 'serviceDirectoryConfig' + type: NestedObject + description: + The presence of this field indicates that this zone is backed by Service + Directory. The value of this field contains information related to the + namespace associated with the zone. + min_version: 'beta' + immutable: true + properties: + - name: 'namespace' + type: NestedObject + description: 'The namespace associated with the zone.' + required: true + properties: + - name: 'namespaceUrl' + type: String + description: | + The fully qualified or partial URL of the service directory namespace that should be + associated with the zone. This should be formatted like + `https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace_id}` + or simply `projects/{project}/locations/{location}/namespaces/{namespace_id}` + Ignored for `public` visibility zones. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/full_to_relative_path.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/sd_full_url.tmpl' + - name: 'cloudLoggingConfig' + type: NestedObject + description: 'Cloud logging configuration' + default_from_api: true + properties: + - name: 'enableLogging' + type: Boolean + description: + 'If set, enable query logging for this ManagedZone. False by default, + making logging opt-in.' + required: true diff --git a/mmv1/products/dns/go_Policy.yaml b/mmv1/products/dns/go_Policy.yaml new file mode 100644 index 000000000000..54263175e605 --- /dev/null +++ b/mmv1/products/dns/go_Policy.yaml @@ -0,0 +1,159 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Policy' +kind: 'dns#policy' +description: | + A policy is a collection of DNS rules applied to one or more Virtual + Private Cloud resources. +references: + guides: + 'Using DNS server policies': 'https://cloud.google.com/dns/zones/#using-dns-server-policies' + api: 'https://cloud.google.com/dns/docs/reference/v1beta2/policies' +docs: +id_format: 'projects/{{project}}/policies/{{name}}' +base_url: 'projects/{{project}}/policies' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + pre_delete: 'templates/terraform/pre_delete/go/detach_network.tmpl' +examples: + - name: 'dns_policy_basic' + primary_resource_id: 'example-policy' + vars: + policy_name: 'example-policy' + network_1_name: 'network-1' + network_2_name: 'network-2' + - name: 'dns_policy_multiproject' + primary_resource_id: 'example-policy-multiproject' + vars: + policy_name: 'example-policy-multiproject' + network_1_name: 'network-1' + network_2_name: 'network-2' + project_1_name: 'project-1' + project_2_name: 'project-2' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true +parameters: +properties: + - name: 'alternativeNameServerConfig' + type: NestedObject + description: | + Sets an alternative name server for the associated networks. + When specified, all DNS queries are forwarded to a name server that you choose. + Names such as .internal are not available when an alternative name server is specified. + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + properties: + - name: 'targetNameServers' + type: Array + description: | + Sets an alternative name server for the associated networks. When specified, + all DNS queries are forwarded to a name server that you choose. Names such as .internal + are not available when an alternative name server is specified. + is_set: true + required: true + set_hash_func: |- + func(v interface{}) int { + raw := v.(map[string]interface{}) + if address, ok := raw["ipv4_address"]; ok { + tpgresource.Hashcode(address.(string)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'ipv4Address' + type: String + description: 'IPv4 address to forward to.' + required: true + - name: 'forwardingPath' + type: Enum + description: | + Forwarding path for this TargetNameServer. If unset or `default` Cloud DNS will make forwarding + decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go + to the Internet. When set to `private`, Cloud DNS will always send queries through VPC for this target + enum_values: + - 'default' + - 'private' + - name: 'description' + type: String + description: | + A textual description field. Defaults to 'Managed by Terraform'. + required: false + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + default_value: "Managed by Terraform" + - name: 'enableInboundForwarding' + type: Boolean + description: | + Allows networks bound to this policy to receive DNS queries sent + by VMs or applications over VPN connections. When enabled, a + virtual IP address will be allocated from each of the sub-networks + that are bound to this policy. + send_empty_value: true + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + - name: 'enableLogging' + type: Boolean + description: | + Controls whether logging is enabled for the networks bound to this policy. + Defaults to no logging if not set. + send_empty_value: true + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + - name: 'name' + type: String + description: | + User assigned name for this policy. + required: true + - name: 'networks' + type: Array + description: + 'List of network names specifying networks to which this policy is + applied.' + is_set: true + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + set_hash_func: |- + func(v interface{}) int { + raw := v.(map[string]interface{}) + if url, ok := raw["network_url"]; ok { + return tpgresource.SelfLinkRelativePathHash(url) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'networkUrl' + type: String + description: | + The id or fully qualified URL of the VPC network to forward queries to. + This should be formatted like `projects/{project}/global/networks/{network}` or + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/network_full_url.tmpl' diff --git a/mmv1/products/dns/go_ResponsePolicy.yaml b/mmv1/products/dns/go_ResponsePolicy.yaml new file mode 100644 index 000000000000..c9ad2091c2d3 --- /dev/null +++ b/mmv1/products/dns/go_ResponsePolicy.yaml @@ -0,0 +1,90 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ResponsePolicy' +kind: 'dns#responsePolicy' +description: | + A Response Policy is a collection of selectors that apply to queries + made against one or more Virtual Private Cloud networks. +docs: +base_url: 'projects/{{project}}/responsePolicies' +self_link: 'projects/{{project}}/responsePolicies/{{response_policy_name}}' +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - responsePolicyName +custom_code: + pre_delete: 'templates/terraform/pre_delete/go/response_policy_detach_network_gke.tmpl' +examples: + - name: 'dns_response_policy_basic' + primary_resource_id: 'example-response-policy' + vars: + response_policy_name: 'example-response-policy' + network_1_name: 'network-1' + network_2_name: 'network-2' + cluster_1_name: 'cluster-1' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' +parameters: +properties: + - name: 'responsePolicyName' + type: String + description: + The user assigned name for this Response Policy, such as + `myresponsepolicy`. + required: true + immutable: true + - name: 'description' + type: String + description: | + The description of the response policy, such as `My new response policy`. + required: false + default_value: "Managed by Terraform" + - name: 'networks' + type: Array + description: + 'The list of network names specifying networks to which this policy is + applied.' + send_empty_value: true + item_type: + type: NestedObject + properties: + - name: 'networkUrl' + type: String + description: | + The fully qualified URL of the VPC network to bind to. + This should be formatted like + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/network_full_url.tmpl' + - name: 'gkeClusters' + type: Array + description: + 'The list of Google Kubernetes Engine clusters that can see this zone.' + item_type: + type: NestedObject + properties: + - name: 'gkeClusterName' + type: String + description: | + The resource name of the cluster to bind this ManagedZone to. + This should be specified in the format like + `projects/*/locations/*/clusters/*` + required: true diff --git a/mmv1/products/dns/go_ResponsePolicyRule.yaml b/mmv1/products/dns/go_ResponsePolicyRule.yaml new file mode 100644 index 000000000000..dcfa029cabba --- /dev/null +++ b/mmv1/products/dns/go_ResponsePolicyRule.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ResponsePolicyRule' +kind: 'dns#responsePolicyRule' +description: | + A Response Policy Rule is a selector that applies its behavior to queries that match the selector. + Selectors are DNS names, which may be wildcards or exact matches. + Each DNS query subject to a Response Policy matches at most one ResponsePolicyRule, + as identified by the dns_name field with the longest matching suffix. +docs: +id_format: 'projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}' +base_url: 'projects/{{project}}/responsePolicies/{{response_policy}}/rules' +self_link: 'projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}' +update_verb: 'PATCH' +import_format: + - 'projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - ruleName +custom_code: +examples: + - name: 'dns_response_policy_rule_basic' + primary_resource_id: 'example-response-policy-rule' + vars: + response_policy_name: 'example-response-policy' + network_1_name: 'network-1' + network_2_name: 'network-2' + cluster_1_name: 'cluster-1' + response_policy_rule_name: 'example-rule' +parameters: + - name: 'response_policy' + type: ResourceRef + description: | + Identifies the response policy addressed by this request. + url_param_only: true + required: true + resource: 'ResponsePolicy' + imports: 'responsePolicyName' +properties: + - name: 'ruleName' + type: String + description: + An identifier for this rule. Must be unique with the ResponsePolicy. + required: true + immutable: true + - name: 'dnsName' + type: String + description: + The DNS name (wildcard or exact) to apply this rule to. Must be unique + within the Response Policy Rule. + required: true + - name: 'localData' + type: NestedObject + description: | + Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; + in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed. + conflicts: + - behavior + properties: + - name: 'localDatas' + type: Array + description: + All resource record sets for this selector, one per resource record + type. The name must match the dns_name. + required: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: For example, www.example.com. + required: true + - name: 'type' + type: Enum + description: One of valid DNS resource types. + required: true + enum_values: + - 'A' + - 'AAAA' + - 'CAA' + - 'CNAME' + - 'DNSKEY' + - 'DS' + - 'HTTPS' + - 'IPSECVPNKEY' + - 'MX' + - 'NAPTR' + - 'NS' + - 'PTR' + - 'SOA' + - 'SPF' + - 'SRV' + - 'SSHFP' + - 'SVCB' + - 'TLSA' + - 'TXT' + - name: 'ttl' + type: Integer + description: | + Number of seconds that this ResourceRecordSet can be cached by + resolvers. + - name: 'rrdatas' + type: Array + description: | + As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + item_type: + type: String + - name: 'behavior' + type: String + description: + Answer this query with a behavior rather than DNS data. Acceptable values + are 'behaviorUnspecified', and 'bypassResponsePolicy' + min_version: 'beta' + conflicts: + - local_data diff --git a/mmv1/products/dns/go_product.yaml b/mmv1/products/dns/go_product.yaml new file mode 100644 index 000000000000..602011cf721c --- /dev/null +++ b/mmv1/products/dns/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DNS' +display_name: 'Cloud DNS' +versions: + - name: 'ga' + base_url: 'https://dns.googleapis.com/dns/v1/' + - name: 'beta' + base_url: 'https://dns.googleapis.com/dns/v1beta2/' +scopes: + - 'https://www.googleapis.com/auth/ndev.clouddns.readwrite' diff --git a/mmv1/products/edgecontainer/go_Cluster.yaml b/mmv1/products/edgecontainer/go_Cluster.yaml index 590ccb156e0c..a1eb9b80b36e 100644 --- a/mmv1/products/edgecontainer/go_Cluster.yaml +++ b/mmv1/products/edgecontainer/go_Cluster.yaml @@ -57,10 +57,12 @@ examples: primary_resource_id: 'default' vars: edgecontainer_cluster_name: 'basic-cluster' + skip_vcr: true - name: 'edgecontainer_cluster_with_maintenance_window' primary_resource_id: 'default' vars: edgecontainer_cluster_name: 'cluster-with-maintenance' + skip_vcr: true - name: 'edgecontainer_local_control_plane_cluster' primary_resource_id: 'default' vars: diff --git a/mmv1/products/edgenetwork/go_Network.yaml b/mmv1/products/edgenetwork/go_Network.yaml index 56a1c60b7d5e..f3bf944ddcb1 100644 --- a/mmv1/products/edgenetwork/go_Network.yaml +++ b/mmv1/products/edgenetwork/go_Network.yaml @@ -86,7 +86,7 @@ properties: `projects/{{project}}/locations/{{location}}/zones/{{zone}}/networks/{{network_id}}` output: true - name: 'labels' - type: KeyValuePairs + type: KeyValueLabels description: | Labels associated with this resource. required: false diff --git a/mmv1/products/edgenetwork/go_Subnet.yaml b/mmv1/products/edgenetwork/go_Subnet.yaml index f59a6862db55..68e4c79f534b 100644 --- a/mmv1/products/edgenetwork/go_Subnet.yaml +++ b/mmv1/products/edgenetwork/go_Subnet.yaml @@ -96,7 +96,7 @@ properties: `projects/{{project}}/locations/{{location}}/zones/{{zone}}/subnets/{{subnet_id}}` output: true - name: 'labels' - type: KeyValuePairs + type: KeyValueLabels description: | Labels associated with this resource. required: false diff --git a/mmv1/products/firebase/go_Project.yaml b/mmv1/products/firebase/go_Project.yaml index 4b6205e60221..34f4ad712209 100644 --- a/mmv1/products/firebase/go_Project.yaml +++ b/mmv1/products/firebase/go_Project.yaml @@ -22,9 +22,13 @@ description: | min_version: 'beta' references: guides: - 'Official Documentation': 'https://firebase.google.com/' + 'Official Documentation': 'https://firebase.google.com/docs/projects/terraform/get-started' api: 'https://firebase.google.com/docs/reference/firebase-management/rest/v1beta1/projects' docs: + note: | + This resource should usually be used with a provider configuration + with `user_project_override = true` unless you wish for your quota + project to be different from the Firebase project. base_url: 'projects/{{project}}' self_link: 'projects/{{project}}' create_url: 'projects/{{project}}:addFirebase' diff --git a/mmv1/products/firebasehosting/go_Channel.yaml b/mmv1/products/firebasehosting/go_Channel.yaml index f169e29959f3..76c697913574 100644 --- a/mmv1/products/firebasehosting/go_Channel.yaml +++ b/mmv1/products/firebasehosting/go_Channel.yaml @@ -89,7 +89,6 @@ properties: type: KeyValueLabels description: Text labels used for extra metadata and/or filtering min_version: 'beta' - immutable: false - name: 'expireTime' type: Time description: | diff --git a/mmv1/products/firebasehosting/go_CustomDomain.yaml b/mmv1/products/firebasehosting/go_CustomDomain.yaml index e956016663f5..bd23cd30be48 100644 --- a/mmv1/products/firebasehosting/go_CustomDomain.yaml +++ b/mmv1/products/firebasehosting/go_CustomDomain.yaml @@ -86,10 +86,14 @@ examples: site_id: 'site-id' custom_domain: 'run.custom.domain.com' cloud_run_service_id: 'cloud-run-service-via-hosting' + deletion_protection: 'true' test_env_vars: project_id: 'PROJECT_NAME' test_vars_overrides: 'custom_domain': '"run.custom.domain.com"' + 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' virtual_fields: - name: 'wait_dns_verification' description: | diff --git a/mmv1/products/firebasehosting/go_Version.yaml b/mmv1/products/firebasehosting/go_Version.yaml index a0395f425153..b7896b85c415 100644 --- a/mmv1/products/firebasehosting/go_Version.yaml +++ b/mmv1/products/firebasehosting/go_Version.yaml @@ -59,8 +59,13 @@ examples: vars: site_id: 'site-id' cloud_run_service_id: 'cloud-run-service-via-hosting' + deletion_protection: 'true' test_env_vars: project_id: 'PROJECT_NAME' + test_vars_overrides: + 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' - name: 'firebasehosting_version_cloud_functions' primary_resource_id: 'default' min_version: 'beta' diff --git a/mmv1/products/firestore/go_Database.yaml b/mmv1/products/firestore/go_Database.yaml index 1ee622691214..7929d3514053 100644 --- a/mmv1/products/firestore/go_Database.yaml +++ b/mmv1/products/firestore/go_Database.yaml @@ -81,7 +81,6 @@ examples: - 'deletion_policy' - name: 'firestore_cmek_database' primary_resource_id: 'database' - min_version: 'beta' vars: database_id: 'cmek-database-id' delete_protection_state: 'DELETE_PROTECTION_ENABLED' @@ -119,7 +118,6 @@ examples: - 'deletion_policy' - name: 'firestore_cmek_database_in_datastore_mode' primary_resource_id: 'database' - min_version: 'beta' vars: database_id: 'cmek-database-id' delete_protection_state: 'DELETE_PROTECTION_ENABLED' @@ -267,7 +265,6 @@ properties: The CMEK (Customer Managed Encryption Key) configuration for a Firestore database. If not present, the database is secured by the default Google encryption key. - min_version: 'beta' immutable: true properties: - name: 'kmsKeyName' diff --git a/mmv1/products/firestore/go_Document.yaml b/mmv1/products/firestore/go_Document.yaml index ab9348f8cd15..e5cb276ef27d 100644 --- a/mmv1/products/firestore/go_Document.yaml +++ b/mmv1/products/firestore/go_Document.yaml @@ -68,6 +68,7 @@ parameters: description: | The Firestore database id. Defaults to `"(default)"`. url_param_only: true + immutable: true default_value: "(default)" - name: 'collection' type: String @@ -75,12 +76,14 @@ parameters: The collection ID, relative to database. For example: chatrooms or chatrooms/my-document/private-messages. url_param_only: true required: true + immutable: true - name: 'documentId' type: String description: | The client-assigned document ID to use for this document during creation. url_param_only: true required: true + immutable: true properties: - name: 'name' type: String diff --git a/mmv1/products/firestore/go_Field.yaml b/mmv1/products/firestore/go_Field.yaml index c88dc793135b..171480940b36 100644 --- a/mmv1/products/firestore/go_Field.yaml +++ b/mmv1/products/firestore/go_Field.yaml @@ -99,6 +99,7 @@ properties: description: | The Firestore database id. Defaults to `"(default)"`. url_param_only: true + immutable: true default_value: "(default)" - name: 'collection' type: String @@ -106,12 +107,14 @@ properties: The id of the collection group to configure. url_param_only: true required: true + immutable: true - name: 'field' type: String description: | The id of the field to configure. url_param_only: true required: true + immutable: true - name: 'name' type: String description: | diff --git a/mmv1/products/firestore/go_Index.yaml b/mmv1/products/firestore/go_Index.yaml index 23b81b12157a..917271fadca3 100644 --- a/mmv1/products/firestore/go_Index.yaml +++ b/mmv1/products/firestore/go_Index.yaml @@ -16,8 +16,9 @@ name: 'Index' description: | Cloud Firestore indexes enable simple and complex queries against documents in a database. - This resource manages composite indexes and not single field indexes. Both Firestore Native and Datastore Mode indexes are supported. + This resource manages composite indexes and not single field indexes. + To manage single field indexes, use the `google_firestore_field` resource instead. references: guides: 'Official Documentation': 'https://cloud.google.com/firestore/docs/query-data/indexing' diff --git a/mmv1/products/gkebackup/go_BackupPlan.yaml b/mmv1/products/gkebackup/go_BackupPlan.yaml index 7152caa839ba..7c1680696e21 100644 --- a/mmv1/products/gkebackup/go_BackupPlan.yaml +++ b/mmv1/products/gkebackup/go_BackupPlan.yaml @@ -227,7 +227,6 @@ properties: Description: A set of custom labels supplied by the user. A list of key->value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'backupSchedule' type: NestedObject description: diff --git a/mmv1/products/gkebackup/go_RestorePlan.yaml b/mmv1/products/gkebackup/go_RestorePlan.yaml index 0543e1e1e780..e48312f48c16 100644 --- a/mmv1/products/gkebackup/go_RestorePlan.yaml +++ b/mmv1/products/gkebackup/go_RestorePlan.yaml @@ -203,7 +203,6 @@ properties: Description: A set of custom labels supplied by the user. A list of key->value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'backupPlan' type: String description: | diff --git a/mmv1/products/gkehub/go_Membership.yaml b/mmv1/products/gkehub/go_Membership.yaml index 2bf2f8bf08b0..e3a922145bd0 100644 --- a/mmv1/products/gkehub/go_Membership.yaml +++ b/mmv1/products/gkehub/go_Membership.yaml @@ -135,7 +135,6 @@ properties: type: KeyValueLabels description: | Labels to apply to this membership. - immutable: false - name: 'endpoint' type: NestedObject description: | diff --git a/mmv1/products/gkehub2/go_Feature.yaml b/mmv1/products/gkehub2/go_Feature.yaml index d0b5e2442fe2..cbeb756f835b 100644 --- a/mmv1/products/gkehub2/go_Feature.yaml +++ b/mmv1/products/gkehub2/go_Feature.yaml @@ -127,7 +127,6 @@ properties: - name: 'labels' type: KeyValueLabels description: GCP labels for this Feature. - immutable: false - name: 'resourceState' type: NestedObject description: State of the Feature resource itself. @@ -271,6 +270,13 @@ properties: - name: 'version' type: String description: 'Version of ACM installed' + - name: 'management' + type: Enum + description: 'Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.' + enum_values: + - 'MANAGEMENT_UNSPECIFIED' + - 'MANAGEMENT_AUTOMATIC' + - 'MANAGEMENT_MANUAL' - name: 'configSync' type: NestedObject description: 'ConfigSync configuration for the cluster' diff --git a/mmv1/products/gkehub2/go_MembershipBinding.yaml b/mmv1/products/gkehub2/go_MembershipBinding.yaml index fd8357ff67b8..8e40fba367fb 100644 --- a/mmv1/products/gkehub2/go_MembershipBinding.yaml +++ b/mmv1/products/gkehub2/go_MembershipBinding.yaml @@ -145,4 +145,3 @@ properties: type: KeyValueLabels description: | Labels for this Membership binding. - immutable: false diff --git a/mmv1/products/gkehub2/go_Namespace.yaml b/mmv1/products/gkehub2/go_Namespace.yaml index a5dbd58f4695..b17ceca262b9 100644 --- a/mmv1/products/gkehub2/go_Namespace.yaml +++ b/mmv1/products/gkehub2/go_Namespace.yaml @@ -138,4 +138,3 @@ properties: type: KeyValueLabels description: | Labels for this Namespace. - immutable: false diff --git a/mmv1/products/gkehub2/go_Scope.yaml b/mmv1/products/gkehub2/go_Scope.yaml index e273cf9f2e13..163310c36ac6 100644 --- a/mmv1/products/gkehub2/go_Scope.yaml +++ b/mmv1/products/gkehub2/go_Scope.yaml @@ -129,4 +129,3 @@ properties: type: KeyValueLabels description: | Labels for this Scope. - immutable: false diff --git a/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml b/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml index a3a64ce804b1..f58282dea62c 100644 --- a/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml +++ b/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml @@ -153,4 +153,3 @@ properties: type: KeyValueLabels description: | Labels for this ScopeRBACRoleBinding. - immutable: false diff --git a/mmv1/products/iap/go_AppEngineService.yaml b/mmv1/products/iap/go_AppEngineService.yaml index c02f70a3bf34..07dcbd26c22c 100644 --- a/mmv1/products/iap/go_AppEngineService.yaml +++ b/mmv1/products/iap/go_AppEngineService.yaml @@ -46,6 +46,7 @@ examples: test_env_vars: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' + skip_test: true parameters: properties: - name: 'appId' diff --git a/mmv1/products/integrations/go_Client.yaml b/mmv1/products/integrations/go_Client.yaml index 8adff75c092c..b926660903b7 100644 --- a/mmv1/products/integrations/go_Client.yaml +++ b/mmv1/products/integrations/go_Client.yaml @@ -45,11 +45,8 @@ examples: primary_resource_id: 'example' vars: key_ring_name: 'my-keyring' - service_account_id: my-service-acc + service_account_id: 'service-acc' skip_vcr: true - - name: 'integrations_client_deprecated_fields' - primary_resource_id: 'example' - skip_docs: true parameters: - name: 'location' type: String @@ -65,8 +62,6 @@ properties: Cloud KMS config for AuthModule to encrypt/decrypt credentials. immutable: true ignore_read: true - conflicts: - - provision_gmek properties: - name: 'kmsLocation' type: String @@ -101,32 +96,12 @@ properties: the kms key is stored at the same project as customer's project and ecrypted with CMEK, otherwise, the kms key is stored in the tenant project and encrypted with GMEK. - - name: 'createSampleWorkflows' - type: Boolean - description: | - Indicates if sample workflow should be created along with provisioning. - immutable: true - ignore_read: true - conflicts: - - create_sample_integrations - deprecation_message: '`create_sample_workflows` is deprecated and will be removed in a future major release. Use `create_sample_integrations` instead.' - name: 'createSampleIntegrations' type: Boolean description: | Indicates if sample integrations should be created along with provisioning. immutable: true ignore_read: true - conflicts: - - create_sample_workflows - - name: 'provisionGmek' - type: Boolean - description: | - Indicates provision with GMEK or CMEK. - immutable: true - ignore_read: true - conflicts: - - cloud_kms_config - deprecation_message: '`provision_gmek` is deprecated and will be removed in a future major release. Client would be provisioned as gmek if `cloud_kms_config` is not given.' - name: 'runAsServiceAccount' type: String description: | diff --git a/mmv1/products/kms/go_AutokeyConfig.yaml b/mmv1/products/kms/go_AutokeyConfig.yaml index bc47bd075d71..a7445c3ef986 100644 --- a/mmv1/products/kms/go_AutokeyConfig.yaml +++ b/mmv1/products/kms/go_AutokeyConfig.yaml @@ -56,6 +56,7 @@ examples: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' external_providers: ["random", "time"] + skip_vcr: true parameters: - name: 'folder' type: String diff --git a/mmv1/products/kms/go_EkmConnection.yaml b/mmv1/products/kms/go_EkmConnection.yaml index 560cfd6dad5e..49959e3e0779 100644 --- a/mmv1/products/kms/go_EkmConnection.yaml +++ b/mmv1/products/kms/go_EkmConnection.yaml @@ -38,6 +38,13 @@ timeouts: insert_minutes: 20 update_minutes: 20 delete_minutes: 20 +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'name' + iam_conditions_request_type: 'QUERY_PARAM_NESTED' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}' custom_code: examples: - name: 'kms_ekm_connection_basic' diff --git a/mmv1/products/kms/go_KeyHandle.yaml b/mmv1/products/kms/go_KeyHandle.yaml index 087ce9e1c5f3..3b1a8b113735 100644 --- a/mmv1/products/kms/go_KeyHandle.yaml +++ b/mmv1/products/kms/go_KeyHandle.yaml @@ -60,6 +60,7 @@ examples: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' external_providers: ["random", "time"] + skip_vcr: true parameters: - name: 'location' type: String @@ -89,7 +90,7 @@ properties: type: String description: | Selector of the resource type where we want to protect resources. - For example, `storage.googleapis.com/Bucket OR compute.googleapis.com/*` + For example, `storage.googleapis.com/Bucket`. min_version: 'beta' required: true immutable: true diff --git a/mmv1/products/logging/go_LogView.yaml b/mmv1/products/logging/go_LogView.yaml index 46e491e1f8ff..6d78e73c57fe 100644 --- a/mmv1/products/logging/go_LogView.yaml +++ b/mmv1/products/logging/go_LogView.yaml @@ -42,7 +42,6 @@ iam_policy: import_format: - '{{%parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}' - '{{name}}' - min_version: 'beta' custom_code: encoder: 'templates/terraform/encoders/go/logging_log_view.go.tmpl' pre_read: 'templates/terraform/pre_read/go/logging_log_view.go.tmpl' diff --git a/mmv1/products/managedkafka/go_Cluster.yaml b/mmv1/products/managedkafka/go_Cluster.yaml index 06921d6ac7e3..2b9840f4124a 100644 --- a/mmv1/products/managedkafka/go_Cluster.yaml +++ b/mmv1/products/managedkafka/go_Cluster.yaml @@ -95,9 +95,10 @@ properties: properties: - name: 'networkConfigs' type: Array - description: "Virtual Private Cloud (VPC) networks that must be granted - direct access to the Kafka cluster. Minimum of 1 network is required. Maximum - of 10 networks can be specified." + description: "Virtual Private Cloud (VPC) subnets where IP addresses for the Kafka + cluster are allocated. To make the cluster available in a VPC, you must specify at least + one subnet per network. You must specify between 1 and 10 subnets. + Additional subnets may be specified with additional `network_configs` blocks." min_version: 'beta' required: true item_type: @@ -108,8 +109,7 @@ properties: description: "Name of the VPC subnet from which the cluster is accessible. Both broker and bootstrap server IP addresses and DNS entries are automatically created in the subnet. The subnet must be located in the same region as the - cluster. The project may differ. A minimum of 1 subnet is required. - A maximum of 10 subnets can be specified. The name of the subnet must be + cluster. The project may differ. The name of the subnet must be in the format `projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET`." min_version: 'beta' required: true diff --git a/mmv1/products/networkmanagement/go_ConnectivityTest.yaml b/mmv1/products/networkmanagement/go_ConnectivityTest.yaml index 0ba2624ae497..e5829588efe3 100644 --- a/mmv1/products/networkmanagement/go_ConnectivityTest.yaml +++ b/mmv1/products/networkmanagement/go_ConnectivityTest.yaml @@ -228,4 +228,3 @@ properties: type: KeyValueLabels description: | Resource labels to represent user-provided metadata. - immutable: false diff --git a/mmv1/products/networksecurity/go_AddressGroup.yaml b/mmv1/products/networksecurity/go_AddressGroup.yaml index 4de5daa3b6fb..0b9066b233d0 100644 --- a/mmv1/products/networksecurity/go_AddressGroup.yaml +++ b/mmv1/products/networksecurity/go_AddressGroup.yaml @@ -122,7 +122,6 @@ properties: description: | Set of label tags associated with the AddressGroup resource. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'type' type: Enum description: | diff --git a/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml b/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml index cb1ab4cb991e..c1d962dd3f91 100644 --- a/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml +++ b/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml @@ -96,7 +96,6 @@ properties: description: Set of label tags associated with the AuthorizationPolicy resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml b/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml index d35529ee780f..5166caf08361 100644 --- a/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml +++ b/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml @@ -16,7 +16,6 @@ name: 'ClientTlsPolicy' description: | ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. -min_version: 'beta' references: guides: 'Service Security': 'https://cloud.google.com/traffic-director/docs/security-use-cases' @@ -54,20 +53,19 @@ custom_code: examples: - name: 'network_security_client_tls_policy_basic' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-client-tls-policy' + skip_vcr: true - name: 'network_security_client_tls_policy_advanced' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-client-tls-policy' + skip_vcr: true parameters: - name: 'name' type: String description: | Name of the ClientTlsPolicy resource. - min_version: 'beta' url_param_only: true required: true immutable: true @@ -76,7 +74,6 @@ parameters: description: | The location of the client tls policy. The default value is `global`. - min_version: 'beta' url_param_only: true default_value: "global" properties: @@ -84,40 +81,32 @@ properties: type: Time description: | Time the ClientTlsPolicy was created in UTC. - min_version: 'beta' output: true - name: 'updateTime' type: Time description: | Time the ClientTlsPolicy was updated in UTC. - min_version: 'beta' output: true - name: 'labels' type: KeyValueLabels description: Set of label tags associated with the ClientTlsPolicy resource. - min_version: 'beta' - immutable: false - name: 'description' type: String description: | A free-text description of the resource. Max length 1024 characters. - min_version: 'beta' - name: 'sni' type: String description: | Server Name Indication string to present to the server during TLS handshake. E.g: "secure.example.com". - min_version: 'beta' - name: 'clientCertificate' type: NestedObject description: | Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS. - min_version: 'beta' properties: - name: 'grpcEndpoint' type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -126,13 +115,11 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -141,13 +128,11 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true - name: 'serverValidationCa' type: Array description: | Defines the mechanism to obtain the Certificate Authority certificate to validate the server certificate. If empty, client does not validate the server certificate. - min_version: 'beta' item_type: type: NestedObject properties: @@ -155,7 +140,6 @@ properties: type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -164,13 +148,11 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -179,5 +161,4 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true diff --git a/mmv1/products/networksecurity/go_FirewallEndpoint.yaml b/mmv1/products/networksecurity/go_FirewallEndpoint.yaml index b69011e44eaf..b6373f851347 100644 --- a/mmv1/products/networksecurity/go_FirewallEndpoint.yaml +++ b/mmv1/products/networksecurity/go_FirewallEndpoint.yaml @@ -88,7 +88,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'selfLink' type: String description: | diff --git a/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml b/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml index efac035cab7d..6d1a4651ad0a 100644 --- a/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml +++ b/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml @@ -25,6 +25,12 @@ references: 'Create and associate firewall endpoints': 'https://cloud.google.com/firewall/docs/configure-firewall-endpoints' api: 'https://cloud.google.com/firewall/docs/reference/network-security/rest/v1/projects.locations.firewallEndpointAssociations#FirewallEndpointAssociation' docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project_id` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project_id` you defined. base_url: '{{parent}}/locations/{{location}}/firewallEndpointAssociations' self_link: '{{parent}}/locations/{{location}}/firewallEndpointAssociations/{{name}}' create_url: '{{parent}}/locations/{{location}}/firewallEndpointAssociations?firewallEndpointAssociationId={{name}}' @@ -95,7 +101,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'disabled' type: Boolean description: | diff --git a/mmv1/products/networksecurity/go_SecurityProfile.yaml b/mmv1/products/networksecurity/go_SecurityProfile.yaml index 83ccbe758f7f..3d619d1cac58 100644 --- a/mmv1/products/networksecurity/go_SecurityProfile.yaml +++ b/mmv1/products/networksecurity/go_SecurityProfile.yaml @@ -106,7 +106,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'threatPreventionProfile' type: NestedObject description: The threat prevention configuration for the security profile. diff --git a/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml b/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml index 1f82e44de541..0c8f14064c81 100644 --- a/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml +++ b/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml @@ -97,7 +97,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'threatPreventionProfile' type: String description: | diff --git a/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml b/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml index bb132e3ff597..c3e48c7bd62b 100644 --- a/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml +++ b/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml @@ -16,7 +16,6 @@ name: 'ServerTlsPolicy' description: | ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. -min_version: 'beta' references: guides: api: 'https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies' @@ -53,22 +52,18 @@ custom_code: examples: - name: 'network_security_server_tls_policy_basic' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' - name: 'network_security_server_tls_policy_advanced' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' - name: 'network_security_server_tls_policy_server_cert' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' - name: 'network_security_server_tls_policy_mtls' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' trust_config_name: 'my-trust-config' @@ -77,7 +72,6 @@ parameters: type: String description: | Name of the ServerTlsPolicy resource. - min_version: 'beta' url_param_only: true required: true immutable: true @@ -86,7 +80,6 @@ parameters: description: | The location of the server tls policy. The default value is `global`. - min_version: 'beta' url_param_only: true default_value: "global" properties: @@ -94,42 +87,34 @@ properties: type: Time description: | Time the ServerTlsPolicy was created in UTC. - min_version: 'beta' output: true - name: 'updateTime' type: Time description: | Time the ServerTlsPolicy was updated in UTC. - min_version: 'beta' output: true - name: 'labels' type: KeyValueLabels description: Set of label tags associated with the ServerTlsPolicy resource. - min_version: 'beta' - immutable: false - name: 'description' type: String description: | A free-text description of the resource. Max length 1024 characters. - min_version: 'beta' - name: 'allowOpen' type: Boolean description: | This field applies only for Traffic Director policies. It is must be set to false for external HTTPS load balancer policies. Determines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility. Consider using it if you wish to upgrade in place your deployment to TLS while having mixed TLS and non-TLS traffic reaching port :80. - min_version: 'beta' - name: 'serverCertificate' type: NestedObject description: | Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS. - min_version: 'beta' properties: - name: 'grpcEndpoint' type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -138,14 +123,12 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -154,21 +137,18 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true - name: 'mtlsPolicy' type: NestedObject description: | This field is required if the policy is used with external HTTPS load balancers. This field can be empty for Traffic Director. Defines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. - min_version: 'beta' properties: - name: 'clientValidationMode' type: Enum description: | When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled. Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. - min_version: 'beta' immutable: true enum_values: - 'CLIENT_VALIDATION_MODE_UNSPECIFIED' @@ -180,14 +160,12 @@ properties: Reference to the TrustConfig from certificatemanager.googleapis.com namespace. If specified, the chain validation will be performed against certificates configured in the given TrustConfig. Allowed only if the policy is to be used with external HTTPS load balancers. - min_version: 'beta' immutable: true - name: 'clientValidationCa' type: Array description: | Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty. Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate. - min_version: 'beta' item_type: type: NestedObject properties: @@ -195,7 +173,6 @@ properties: type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -204,14 +181,12 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -220,5 +195,4 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true diff --git a/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml b/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml index c93282014844..0c1b45847550 100644 --- a/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml +++ b/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml @@ -81,7 +81,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the EdgeCache resource.' - immutable: false - name: 'public_key' type: Array description: | diff --git a/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml b/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml index 51ce9f3f1c64..2a759fa8057f 100644 --- a/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml +++ b/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml @@ -89,7 +89,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the EdgeCache resource.' - immutable: false - name: 'originAddress' type: String description: | diff --git a/mmv1/products/networkservices/go_EdgeCacheService.yaml b/mmv1/products/networkservices/go_EdgeCacheService.yaml index f0a34752a429..86ccaf52a816 100644 --- a/mmv1/products/networkservices/go_EdgeCacheService.yaml +++ b/mmv1/products/networkservices/go_EdgeCacheService.yaml @@ -88,7 +88,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the EdgeCache resource.' - immutable: false - name: 'disableQuic' type: Boolean description: | diff --git a/mmv1/products/networkservices/go_EndpointPolicy.yaml b/mmv1/products/networkservices/go_EndpointPolicy.yaml index 76ad3250c77f..e0ade9e8be66 100644 --- a/mmv1/products/networkservices/go_EndpointPolicy.yaml +++ b/mmv1/products/networkservices/go_EndpointPolicy.yaml @@ -86,7 +86,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the TcpRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_Gateway.yaml b/mmv1/products/networkservices/go_Gateway.yaml index 674909a93385..c87d7974085f 100644 --- a/mmv1/products/networkservices/go_Gateway.yaml +++ b/mmv1/products/networkservices/go_Gateway.yaml @@ -133,7 +133,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Set of label tags associated with the Gateway resource. - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_GrpcRoute.yaml b/mmv1/products/networkservices/go_GrpcRoute.yaml index 51fba4ae1b0f..30d530c95de3 100644 --- a/mmv1/products/networkservices/go_GrpcRoute.yaml +++ b/mmv1/products/networkservices/go_GrpcRoute.yaml @@ -99,7 +99,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the GrpcRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_HttpRoute.yaml b/mmv1/products/networkservices/go_HttpRoute.yaml index 545e3ae54f1e..410ab0b85aa8 100644 --- a/mmv1/products/networkservices/go_HttpRoute.yaml +++ b/mmv1/products/networkservices/go_HttpRoute.yaml @@ -106,7 +106,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the HttpRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_LbRouteExtension.yaml b/mmv1/products/networkservices/go_LbRouteExtension.yaml index 3a2b46bde77f..294e3736de7c 100644 --- a/mmv1/products/networkservices/go_LbRouteExtension.yaml +++ b/mmv1/products/networkservices/go_LbRouteExtension.yaml @@ -87,7 +87,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of labels associated with the LbRouteExtension resource.' - immutable: false - name: 'forwardingRules' type: Array description: | diff --git a/mmv1/products/networkservices/go_LbTrafficExtension.yaml b/mmv1/products/networkservices/go_LbTrafficExtension.yaml index ebb94b424d09..99901353dc7f 100644 --- a/mmv1/products/networkservices/go_LbTrafficExtension.yaml +++ b/mmv1/products/networkservices/go_LbTrafficExtension.yaml @@ -84,7 +84,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of labels associated with the LbTrafficExtension resource.' - immutable: false - name: 'forwardingRules' type: Array description: | diff --git a/mmv1/products/networkservices/go_Mesh.yaml b/mmv1/products/networkservices/go_Mesh.yaml index 123bb9579487..d6780f3dabdc 100644 --- a/mmv1/products/networkservices/go_Mesh.yaml +++ b/mmv1/products/networkservices/go_Mesh.yaml @@ -95,7 +95,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the Mesh resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_ServiceBinding.yaml b/mmv1/products/networkservices/go_ServiceBinding.yaml index 04b7668a5e1b..b492fc1d6de4 100644 --- a/mmv1/products/networkservices/go_ServiceBinding.yaml +++ b/mmv1/products/networkservices/go_ServiceBinding.yaml @@ -85,7 +85,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the ServiceBinding resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_ServiceLbPolicies.yaml b/mmv1/products/networkservices/go_ServiceLbPolicies.yaml index 4612b13b81bd..c60a511386f5 100644 --- a/mmv1/products/networkservices/go_ServiceLbPolicies.yaml +++ b/mmv1/products/networkservices/go_ServiceLbPolicies.yaml @@ -93,7 +93,6 @@ properties: type: KeyValueLabels description: 'Set of label tags associated with the ServiceLbPolicy resource.' min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_TcpRoute.yaml b/mmv1/products/networkservices/go_TcpRoute.yaml index 9367d8556315..32dd2ae2d0a3 100644 --- a/mmv1/products/networkservices/go_TcpRoute.yaml +++ b/mmv1/products/networkservices/go_TcpRoute.yaml @@ -113,7 +113,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the TcpRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | @@ -205,3 +204,12 @@ properties: description: | If true, Router will use the destination IP and port of the original connection as the destination of the request. min_version: 'beta' + - name: 'idleTimeout' + type: String + description: | + Specifies the idle timeout for the selected route. The idle timeout is defined as the period in which there are no bytes sent or received on either the upstream or downstream connection. If not set, the default idle timeout is 30 seconds. If set to 0s, the timeout will be disabled. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + min_version: 'beta' + validation: + regex: '^(0|[1-9][0-9]*)(\.[0-9]{1,9})?s$' diff --git a/mmv1/products/notebooks/go_Location.yaml b/mmv1/products/notebooks/go_Location.yaml index b99bc2d342ce..6513176f1f50 100644 --- a/mmv1/products/notebooks/go_Location.yaml +++ b/mmv1/products/notebooks/go_Location.yaml @@ -44,5 +44,4 @@ properties: - name: 'name' type: String description: 'Name of the Location resource.' - custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' diff --git a/mmv1/products/parallelstore/Instance.yaml b/mmv1/products/parallelstore/Instance.yaml index 881bb5b5790a..9ed5aa2d1cdc 100644 --- a/mmv1/products/parallelstore/Instance.yaml +++ b/mmv1/products/parallelstore/Instance.yaml @@ -127,8 +127,8 @@ properties: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: `name + \"_\" + value` would prove problematic if we were to - allow `\"_\"` in a future release. " + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " - !ruby/object:Api::Type::String name: capacityGib description: | diff --git a/mmv1/products/parallelstore/go_Instance.yaml b/mmv1/products/parallelstore/go_Instance.yaml index d066bb7131d0..fa39d556b128 100644 --- a/mmv1/products/parallelstore/go_Instance.yaml +++ b/mmv1/products/parallelstore/go_Instance.yaml @@ -134,8 +134,8 @@ properties: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: `name + \"_\" + value` would prove problematic if we were to - allow `\"_\"` in a future release. " + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " min_version: 'beta' - name: 'capacityGib' type: String diff --git a/mmv1/products/privateca/go_CaPool.yaml b/mmv1/products/privateca/go_CaPool.yaml index 2d0292a2dcb0..a257a9e7bfac 100644 --- a/mmv1/products/privateca/go_CaPool.yaml +++ b/mmv1/products/privateca/go_CaPool.yaml @@ -502,4 +502,3 @@ properties: An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false diff --git a/mmv1/products/privateca/go_Certificate.yaml b/mmv1/products/privateca/go_Certificate.yaml index d7d449927a45..bc46f211cc2f 100644 --- a/mmv1/products/privateca/go_Certificate.yaml +++ b/mmv1/products/privateca/go_Certificate.yaml @@ -672,7 +672,6 @@ properties: type: KeyValueLabels description: | Labels with user-defined metadata to apply to this resource. - immutable: false - name: 'pemCsr' type: String description: | diff --git a/mmv1/products/privateca/go_CertificateAuthority.yaml b/mmv1/products/privateca/go_CertificateAuthority.yaml index 18efff479357..8bd6c9a68729 100644 --- a/mmv1/products/privateca/go_CertificateAuthority.yaml +++ b/mmv1/products/privateca/go_CertificateAuthority.yaml @@ -783,4 +783,3 @@ properties: An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false diff --git a/mmv1/products/privateca/go_CertificateTemplate.yaml b/mmv1/products/privateca/go_CertificateTemplate.yaml index b5a0a7022949..c54e6dab962c 100644 --- a/mmv1/products/privateca/go_CertificateTemplate.yaml +++ b/mmv1/products/privateca/go_CertificateTemplate.yaml @@ -303,4 +303,3 @@ properties: - name: 'labels' type: KeyValueLabels description: Optional. Labels with user-defined metadata. - immutable: false diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index c16b3ab2f43b..068a568afaa2 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -41,6 +41,7 @@ async: iam_policy: method_name_separator: ':' parent_resource_attribute: 'schema' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: update_encoder: 'templates/terraform/update_encoder/go/pubsub_schema.tmpl' examples: diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 260e1a733403..92bc7d775487 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -127,7 +127,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Subscription. - immutable: false - name: 'bigqueryConfig' type: NestedObject description: | @@ -213,6 +212,10 @@ properties: description: | The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. The maxBytes limit may be exceeded in cases where messages are larger than the limit. + - name: 'maxMessages' + type: Integer + description: | + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. - name: 'state' type: Enum description: | @@ -231,6 +234,10 @@ properties: type: Boolean description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. + - name: 'useTopicSchema' + type: Boolean + description: | + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. - name: 'serviceAccountEmail' type: String description: | @@ -387,8 +394,6 @@ properties: diff_suppress_func: 'comparePubsubSubscriptionExpirationPolicy' - name: 'filter' type: String - validation: !ruby/object:Provider::Terraform::Validation - function: 'verify.ValidateRegexp(`^.{1,256}$`)' description: | The subscription only delivers the messages that match the filter. Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages @@ -396,6 +401,8 @@ properties: you can't modify the filter. required: false immutable: true + validation: + regex: '^.{0,256}$' - name: 'deadLetterPolicy' type: NestedObject description: | diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index 807462055e78..87b6d457b0a8 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -49,6 +49,7 @@ async: iam_policy: method_name_separator: ':' parent_resource_attribute: 'topic' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: encoder: 'templates/terraform/encoders/go/no_send_name.go.tmpl' update_encoder: 'templates/terraform/update_encoder/go/pubsub_topic.tmpl' @@ -105,7 +106,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Topic. - immutable: false - name: 'messageStoragePolicy' type: NestedObject description: | @@ -130,7 +130,6 @@ properties: type: NestedObject description: | Settings for validating messages published against a schema. - default_from_api: true properties: - name: 'schema' type: String diff --git a/mmv1/products/redis/go_Cluster.yaml b/mmv1/products/redis/go_Cluster.yaml index 5e643219507a..f9bb64b701a8 100644 --- a/mmv1/products/redis/go_Cluster.yaml +++ b/mmv1/products/redis/go_Cluster.yaml @@ -283,3 +283,122 @@ properties: Configure Redis Cluster behavior using a subset of native Redis configuration parameters. Please check Memorystore documentation for the list of supported parameters: https://cloud.google.com/memorystore/docs/cluster/supported-instance-configurations + - name: 'maintenancePolicy' + type: NestedObject + description: Maintenance policy for a cluster + properties: + - name: 'createTime' + type: String + description: | + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'weeklyMaintenanceWindow' + type: Array + description: | + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + item_type: + type: NestedObject + properties: + - name: 'day' + type: Enum + description: | + Required. The day of week that maintenance updates occur. + + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + required: true + enum_values: + - 'DAY_OF_WEEK_UNSPECIFIED' + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + - name: 'duration' + type: String + description: | + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + output: true + - name: 'startTime' + type: NestedObject + description: | + Required. Start time of the window in UTC time. + required: true + send_empty_value: true + allow_empty_object: true + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + validation: + function: 'validation.IntBetween(0,23)' + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Must be from 0 to 59. + validation: + function: 'validation.IntBetween(0,59)' + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + validation: + function: 'validation.IntBetween(0,60)' + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + validation: + function: 'validation.IntBetween(0,999999999)' + - name: 'maintenanceSchedule' + type: NestedObject + description: Upcoming maintenance schedule. + output: true + properties: + - name: 'startTime' + type: String + description: | + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'endTime' + type: String + description: | + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'scheduleDeadlineTime' + type: String + description: | + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true diff --git a/mmv1/products/securitycenter/go_FolderNotificationConfig.yaml b/mmv1/products/securitycenter/go_FolderNotificationConfig.yaml new file mode 100644 index 000000000000..c5eed19c8747 --- /dev/null +++ b/mmv1/products/securitycenter/go_FolderNotificationConfig.yaml @@ -0,0 +1,130 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v1/folders.notificationConfigs' +docs: +base_url: 'folders/{{folder}}/notificationConfigs' +self_link: 'folders/{{folder}}/notificationConfigs/{{config_id}}' +create_url: 'folders/{{folder}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/notificationConfigs/{{config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_import: 'templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl' +examples: + - name: 'scc_folder_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + folder_display_name: 'folder-name' + config_id: 'my-config' + topic_name: 'my-topic' + test_env_vars: + org_id: 'ORG_ID' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + Numerical ID of the parent folder. + url_param_only: true + required: true + immutable: true + - name: 'configId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `folders/{{folder}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + required: true + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml b/mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml new file mode 100644 index 000000000000..aab5912d34a3 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml @@ -0,0 +1,118 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderMuteConfig' +description: | + Mute Findings is a volume management feature in Security Command Center + that lets you manually or programmatically hide irrelevant findings, + and create filters to automatically silence existing and future + findings based on criteria you specify. +references: + guides: + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.muteConfigs' +docs: +base_url: 'folders/{{folder}}/locations/{{location}}/muteConfigs' +self_link: 'folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +create_url: 'folders/{{folder}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_folder_mute_config_basic' + primary_resource_id: 'default' + vars: + mute_config_id: 'my-config' + folder_display_name: 'folder-name' + test_env_vars: + org_id: 'ORG_ID' + test_vars_overrides: + 'sleep': 'true' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + The folder whose Cloud Security Command Center the Mute + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by folder. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" + - name: 'mute_config_id' + type: String + description: | + Unique identifier provided by the client within the parent scope. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the mute config. Its format is + organizations/{organization}/locations/global/muteConfigs/{configId}, + folders/{folder}/locations/global/muteConfigs/{configId}, + or projects/{project}/locations/global/muteConfigs/{configId} + output: true + - name: 'description' + type: String + description: A description of the mute config. + - name: 'filter' + type: String + description: | + An expression that defines the filter to apply across create/update + events of findings. While creating a filter string, be mindful of + the scope in which the mute configuration is being created. E.g., + If a filter contains project = X but is created under the + project = Y scope, it might not match any findings. + required: true + - name: 'createTime' + type: String + description: | + The time at which the mute config was created. This field is set by + the server and will be ignored if provided on config creation. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The most recent time at which the mute config was + updated. This field is set by the server and will be ignored if + provided on config creation or update. + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the mute config. This + field is set by the server and will be ignored if provided on + config creation or update. + output: true + - name: 'type' + type: String + description: | + The type of the mute config. + required: true diff --git a/mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml new file mode 100644 index 000000000000..5ec56fd5828d --- /dev/null +++ b/mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml @@ -0,0 +1,138 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.notificationConfigs' +docs: +base_url: 'folders/{{folder}}/locations/{{location}}/notificationConfigs' +self_link: 'folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}' +create_url: 'folders/{{folder}}/locations/{{location}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_import: 'templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl' +examples: + - name: 'scc_v2_folder_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + folder_display_name: 'folder-name' + config_id: 'my-config' + topic_name: 'my-topic' + test_env_vars: + org_id: 'ORG_ID' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + Numerical ID of the parent folder. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + Location ID of the parent organization. If not provided, 'global' will be used as the default location. + url_param_only: true + required: false + immutable: true + default_value: "global" + - name: 'configId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + required: true + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml new file mode 100644 index 000000000000..1c3263f4b062 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml @@ -0,0 +1,152 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderSccBigQueryExports' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports' +docs: +base_url: 'folders/{{folder}}/locations/{{location}}/bigQueryExports' +self_link: 'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +create_url: 'folders/{{folder}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_folder_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + dataset_id: 'my_dataset_id' + name: 'my-export' + folder_display_name: 'folder-name' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + The folder where Cloud Security Command Center Big Query Export + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'bigQueryExportId' + type: String + description: | + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The BigQuery export configuration is stored in this location. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'dataset' + type: String + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - name: 'createTime' + type: String + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + output: true + - name: 'principal' + type: String + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + output: true + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml b/mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml new file mode 100644 index 000000000000..6449a6e49b9a --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml @@ -0,0 +1,113 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationMuteConfig' +description: | + Mute Findings is a volume management feature in Security Command Center + that lets you manually or programmatically hide irrelevant findings, + and create filters to automatically silence existing and future + findings based on criteria you specify. +references: + guides: + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.muteConfigs' +docs: +base_url: 'organizations/{{organization}}/locations/{{location}}/muteConfigs' +self_link: 'organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +create_url: 'organizations/{{organization}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_organization_mute_config_basic' + primary_resource_id: 'default' + vars: + mute_config_id: 'my-config' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Mute + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" + - name: 'mute_config_id' + type: String + description: | + Unique identifier provided by the client within the parent scope. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the mute config. Its format is + organizations/{organization}/locations/global/muteConfigs/{configId}, + folders/{folder}/locations/global/muteConfigs/{configId}, + or projects/{project}/locations/global/muteConfigs/{configId} + output: true + - name: 'description' + type: String + description: A description of the mute config. + - name: 'filter' + type: String + description: | + An expression that defines the filter to apply across create/update + events of findings. While creating a filter string, be mindful of + the scope in which the mute configuration is being created. E.g., + If a filter contains project = X but is created under the + project = Y scope, it might not match any findings. + required: true + - name: 'createTime' + type: String + description: | + The time at which the mute config was created. This field is set by + the server and will be ignored if provided on config creation. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The most recent time at which the mute config was + updated. This field is set by the server and will be ignored if + provided on config creation or update. + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the mute config. This + field is set by the server and will be ignored if provided on + config creation or update. + output: true + - name: 'type' + type: String + description: | + The type of the mute config. + required: true diff --git a/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml new file mode 100644 index 000000000000..9ba89f6b3fd8 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml @@ -0,0 +1,134 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.notificationConfigs' +docs: +base_url: 'organizations/{{organization}}/locations/{{location}}/notificationConfigs' +self_link: '{{name}}' +create_url: 'organizations/{{organization}}/locations/{{location}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/scc_v2_source_self_link_as_name_set_organization.go.tmpl' +examples: + - name: 'scc_v2_organization_notification_config_basic' + primary_resource_id: 'custom_organization_notification_config' + vars: + topic_name: 'my-topic' + config_id: 'my-config' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Notification + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'configId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `organizations/{{organization}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + required: true + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml new file mode 100644 index 000000000000..b2f070bb4512 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml @@ -0,0 +1,148 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSccBigQueryExports' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.bigQueryExports' +docs: +base_url: 'organizations/{{organization}}/locations/{{location}}/bigQueryExports' +self_link: 'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +create_url: 'organizations/{{organization}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_import: 'templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl' +examples: + - name: 'scc_v2_organization_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + name: 'my-export' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Big Query Export + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'bigQueryExportId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this export, in the format + `organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'dataset' + type: String + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - name: 'createTime' + type: String + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + output: true + - name: 'principal' + type: String + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + output: true + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/go_OrganizationSource.yaml b/mmv1/products/securitycenterv2/go_OrganizationSource.yaml new file mode 100644 index 000000000000..e02d659237fd --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationSource.yaml @@ -0,0 +1,88 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSource' +description: | + A Cloud Security Command Center's (Cloud SCC) finding source. A finding + source is an entity or a mechanism that can produce a finding. A source is + like a container of findings that come from the same scanner, logger, + monitor, etc. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.sources' +docs: +base_url: 'organizations/{{organization}}/sources' +self_link: '{{name}}' +update_verb: 'PATCH' +update_mask: true +skip_delete: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + fetch_iam_policy_verb: 'POST' + parent_resource_attribute: 'source' + base_url: 'organizations/{{organization}}/sources/{{source}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'organizations/{{organization}}/sources/{{source}}' + - '{{source}}' +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/scc_source_self_link_as_name_set_organization.go.tmpl' +examples: + - name: 'scc_source_basic' + primary_resource_id: 'custom_source' + vars: + source_display_name: 'My Source' + test_env_vars: + org_id: 'ORG_ID' + skip_test: true +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Source + lives in. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of this source, in the format + `organizations/{{organization}}/sources/{{source}}`. + output: true + - name: 'description' + type: String + description: | + The description of the source (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'displayName' + type: String + description: | + The source’s display name. A source’s display name must be unique + amongst its siblings, for example, two sources with the same parent + can't share the same display name. The display name must start and end + with a letter or digit, may contain letters, digits, spaces, hyphens, + and underscores, and can be no longer than 32 characters. + required: true + validation: + regex: '[\p{L}\p{N}]({\p{L}\p{N}_- ]{0,30}[\p{L}\p{N}])?' diff --git a/mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml b/mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml new file mode 100644 index 000000000000..2fd80176f7f0 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml @@ -0,0 +1,106 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectMuteConfig' +description: | + Mute Findings is a volume management feature in Security Command Center + that lets you manually or programmatically hide irrelevant findings, + and create filters to automatically silence existing and future + findings based on criteria you specify. +references: + guides: + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.muteConfigs' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/muteConfigs' +self_link: 'projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_project_mute_config_basic' + primary_resource_id: 'default' + vars: + mute_config_id: 'my-config' + test_env_vars: + project_id: 'PROJECT_NAME' + skip_test: true +parameters: + - name: 'location' + type: String + description: | + location Id is provided by project. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" + - name: 'mute_config_id' + type: String + description: | + Unique identifier provided by the client within the parent scope. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the mute config. Its format is + projects/{project}/locations/global/muteConfigs/{configId}, + folders/{folder}/locations/global/muteConfigs/{configId}, + or organizations/{organization}/locations/global/muteConfigs/{configId} + output: true + - name: 'description' + type: String + description: A description of the mute config. + - name: 'filter' + type: String + description: | + An expression that defines the filter to apply across create/update + events of findings. While creating a filter string, be mindful of + the scope in which the mute configuration is being created. E.g., + If a filter contains project = X but is created under the + project = Y scope, it might not match any findings. + required: true + - name: 'createTime' + type: String + description: | + The time at which the mute config was created. This field is set by + the server and will be ignored if provided on config creation. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The most recent time at which the mute config was + updated. This field is set by the server and will be ignored if + provided on config creation or update. + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the mute config. This + field is set by the server and will be ignored if provided on + config creation or update. + output: true + - name: 'type' + type: String + description: | + The type of the mute config. + required: true diff --git a/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml new file mode 100644 index 000000000000..62f92ffa8f3c --- /dev/null +++ b/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.notificationConfigs' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/notificationConfigs' +self_link: '{{name}}' +create_url: 'projects/{{project}}/locations/{{location}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/notificationConfigs/{{config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/self_link_as_name_set_project.go.tmpl' +examples: + - name: 'scc_v2_project_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + topic_name: 'my-topic' + config_id: 'my-config' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'location' + - 'project' +parameters: + - name: 'configId' + type: String + description: | + This must be unique within the project. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + Location ID of the parent organization. Only global is supported at the moment. + url_param_only: true + required: false + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `projects/{{projectId}}/locations/{{location}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml new file mode 100644 index 000000000000..73c06de1c11b --- /dev/null +++ b/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml @@ -0,0 +1,143 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectSccBigQueryExports' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/bigQueryExports' +self_link: 'projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_project_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + dataset_id: 'my_dataset_id' + name: 'my-export' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + ignore_read_extra: + - 'project' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'bigQueryExportId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this export, in the format + `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'dataset' + type: String + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - name: 'createTime' + type: String + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + output: true + - name: 'principal' + type: String + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + output: true + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/go_product.yaml b/mmv1/products/securitycenterv2/go_product.yaml new file mode 100644 index 000000000000..b01078c6e2c9 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecurityCenterV2' +legacy_name: 'scc_v2' +display_name: 'Security Command Center (SCC)v2 API' +versions: + - name: 'ga' + base_url: 'https://securitycenter.googleapis.com/v2/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/vertexai/go_Dataset.yaml b/mmv1/products/vertexai/go_Dataset.yaml index ead71c55abb7..c6c09f09bbd8 100644 --- a/mmv1/products/vertexai/go_Dataset.yaml +++ b/mmv1/products/vertexai/go_Dataset.yaml @@ -82,7 +82,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Workflow. - immutable: false - name: 'encryptionSpec' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_Endpoint.yaml b/mmv1/products/vertexai/go_Endpoint.yaml index 21d6def117b1..12c85f5c593b 100644 --- a/mmv1/products/vertexai/go_Endpoint.yaml +++ b/mmv1/products/vertexai/go_Endpoint.yaml @@ -373,7 +373,6 @@ properties: can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. - immutable: false - name: 'createTime' type: String description: Output only. Timestamp when this Endpoint was created. diff --git a/mmv1/products/vertexai/go_FeatureGroup.yaml b/mmv1/products/vertexai/go_FeatureGroup.yaml index 976e31428985..241a5d566c46 100644 --- a/mmv1/products/vertexai/go_FeatureGroup.yaml +++ b/mmv1/products/vertexai/go_FeatureGroup.yaml @@ -78,7 +78,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your FeatureGroup. - immutable: false - name: 'description' type: String description: The description of the FeatureGroup. diff --git a/mmv1/products/vertexai/go_FeatureGroupFeature.yaml b/mmv1/products/vertexai/go_FeatureGroupFeature.yaml index 40905b1e27c3..d41e7d49fafb 100644 --- a/mmv1/products/vertexai/go_FeatureGroupFeature.yaml +++ b/mmv1/products/vertexai/go_FeatureGroupFeature.yaml @@ -90,7 +90,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your FeatureGroup. - immutable: false - name: 'description' type: String description: The description of the FeatureGroup. diff --git a/mmv1/products/vertexai/go_FeatureOnlineStore.yaml b/mmv1/products/vertexai/go_FeatureOnlineStore.yaml index 607c366c9419..9838eac120ae 100644 --- a/mmv1/products/vertexai/go_FeatureOnlineStore.yaml +++ b/mmv1/products/vertexai/go_FeatureOnlineStore.yaml @@ -64,6 +64,7 @@ examples: name: 'example_feature_online_store_beta_bigtable' ignore_read_extra: - 'force_destroy' + skip_vcr: true virtual_fields: - name: 'force_destroy' description: @@ -102,7 +103,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your feature online stores. - immutable: false - name: 'state' type: String description: The state of the Feature Online Store. See the possible states in [this link](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.featureOnlineStores#state). diff --git a/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml b/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml index 708d52d55ffa..6c87c539f517 100644 --- a/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml +++ b/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml @@ -70,6 +70,7 @@ examples: min_version: 'beta' vars: name: 'example_feature_view_vector_search' + skip_vcr: true parameters: - name: 'featureOnlineStore' type: String @@ -106,7 +107,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this FeatureView. - immutable: false - name: 'syncConfig' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_Featurestore.yaml b/mmv1/products/vertexai/go_Featurestore.yaml index 1b7761c299fb..943750f30f24 100644 --- a/mmv1/products/vertexai/go_Featurestore.yaml +++ b/mmv1/products/vertexai/go_Featurestore.yaml @@ -140,7 +140,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Featurestore. - immutable: false - name: 'onlineServingConfig' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml b/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml index 64d75b1c724d..6099e1f17af2 100644 --- a/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml +++ b/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml @@ -128,7 +128,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this EntityType. - immutable: false - name: 'monitoringConfig' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml b/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml index 6e905f971af4..54ff7b3d9d87 100644 --- a/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml +++ b/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml @@ -105,7 +105,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to the feature. - immutable: false - name: 'description' type: String description: Description of the feature. diff --git a/mmv1/products/vertexai/go_Index.yaml b/mmv1/products/vertexai/go_Index.yaml index 87eec4dd6cd7..af0331caf14e 100644 --- a/mmv1/products/vertexai/go_Index.yaml +++ b/mmv1/products/vertexai/go_Index.yaml @@ -223,7 +223,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your Indexes. - immutable: false - name: 'createTime' type: String description: diff --git a/mmv1/products/vertexai/go_IndexEndpoint.yaml b/mmv1/products/vertexai/go_IndexEndpoint.yaml index 8aa6b8c557fe..dedbabb025b2 100644 --- a/mmv1/products/vertexai/go_IndexEndpoint.yaml +++ b/mmv1/products/vertexai/go_IndexEndpoint.yaml @@ -94,7 +94,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your Indexes. - immutable: false - name: 'createTime' type: String description: The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. diff --git a/mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml b/mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml new file mode 100644 index 000000000000..4708d17edef2 --- /dev/null +++ b/mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml @@ -0,0 +1,297 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'IndexEndpointDeployedIndex' +description: |- + An endpoint indexes are deployed into. An index endpoint can have multiple deployed indexes. +references: + guides: + api: 'https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#DeployedIndex' +docs: +id_format: '{{index_endpoint}}/deployedIndex/{{deployed_index_id}}' +base_url: '{{index_endpoint}}' +self_link: '{{index_endpoint}}' +create_url: '{{index_endpoint}}:deployIndex' +update_url: '{{index_endpoint}}:mutateDeployedIndex' +update_verb: 'POST' +delete_url: '{{index_endpoint}}:undeployIndex' +delete_verb: 'POST' +import_format: + - 'projects/{{project}}/locations/{{region}}/indexEndpoints/{{index_endpoint}}/deployedIndex/{{deployed_index_id}}' +timeouts: + insert_minutes: 45 + update_minutes: 45 + delete_minutes: 20 +async: + actions: ['create', 'update', 'delete'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 45 + update_minutes: 45 + delete_minutes: 20 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' + include_project: true +custom_code: + encoder: 'templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + decoder: 'templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' +examples: + - name: 'vertex_ai_index_endpoint_deployed_index_basic' + primary_resource_id: 'basic_deployed_index' + vars: + endpoint_name: 'endpoint-name' + network_name: 'network-name' + deployed_index_id: 'deployed_index_id' + display_name: 'vertex-deployed-index' + display_name_index: 'test-index' + bucket_name: 'bucket-name' + service_account_id: 'vertex-sa' + address_name: 'vertex-ai-range' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + 'address_name': 'acctest.BootstrapSharedTestGlobalAddress(t, "vpc-network-1", acctest.AddressWithPrefixLength(8))' + - name: 'vertex_ai_index_endpoint_deployed_index_basic_two' + primary_resource_id: 'basic_deployed_index' + vars: + endpoint_name: 'endpoint-name' + network_name: 'network-name' + deployed_index_id: 'deployed_index_id' + display_name: 'vertex-deployed-index' + display_name_index: 'test-index' + bucket_name: 'bucket-name' + service_account_id: 'vertex-sa' + address_name: 'vertex-ai-range' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + 'address_name': 'acctest.BootstrapSharedTestGlobalAddress(t, "vpc-network-1", acctest.AddressWithPrefixLength(8))' + - name: 'vertex_ai_index_endpoint_deployed_index_dedicated_resources' + primary_resource_id: 'dedicated_resources' + vars: + display_name_index: 'test-index' + bucket_name: 'bucket-name' + display_name: 'vertex-deployed-index' + deployed_index_id: 'deployed_index_id' + skip_docs: true + - name: 'vertex_ai_index_endpoint_deployed_index_automatic_resources' + primary_resource_id: 'automatic_resources' + vars: + display_name_index: 'test-index' + bucket_name: 'bucket-name' + display_name: 'vertex-deployed-index' + deployed_index_id: 'deployed_index_id' + skip_docs: true +parameters: + - name: 'indexEndpoint' + type: ResourceRef + description: | + Identifies the index endpoint. Must be in the format + 'projects/{{project}}/locations/{{region}}/indexEndpoints/{{indexEndpoint}}' + url_param_only: true + required: true + immutable: true + resource: 'IndexEndpoint' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The name of the DeployedIndex resource. + output: true + - name: 'deployedIndexId' + type: String + description: The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. + required: true + immutable: true + - name: 'index' + type: String + description: The name of the Index this is the deployment of. + required: true + immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'displayName' + type: String + description: The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters. + immutable: true + - name: 'createTime' + type: String + description: The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + output: true + - name: 'privateEndpoints' + type: NestedObject + description: Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if [network](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#IndexEndpoint.FIELDS.network) is configured. + output: true + properties: + - name: 'matchGrpcAddress' + type: String + description: The ip address used to send match gRPC requests. + output: true + - name: 'serviceAttachment' + type: String + description: The name of the service attachment resource. Populated if private service connect is enabled. + output: true + - name: 'pscAutomatedEndpoints' + type: Array + description: | + PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set. + output: true + item_type: + type: NestedObject + properties: + - name: 'projectId' + type: String + description: | + Corresponding projectId in pscAutomationConfigs + output: true + - name: 'network' + type: String + description: | + Corresponding network in pscAutomationConfigs. + output: true + - name: 'matchAddress' + type: String + description: | + ip Address created by the automated forwarding rule. + output: true + - name: 'indexSyncTime' + type: String + description: | + The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the [Index.update_time](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexes#Index.FIELDS.update_time) of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must [list](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.operations/list#google.longrunning.Operations.ListOperations) the operations that are running on the original Index. Only the successfully completed Operations with updateTime equal or before this sync time are contained in this DeployedIndex. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'automaticResources' + type: NestedObject + description: | + A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. + + # Note: Having the fields within automaticResouces not being marked as immutable was done in order to support the ability to update such fields. See : https://github.com/GoogleCloudPlatform/magic-modules/pull/11039#issuecomment-2209316648 + default_from_api: true + properties: + - name: 'minReplicaCount' + type: Integer + description: | + The minimum number of replicas this DeployedModel will be always deployed on. If minReplicaCount is not set, the default value is 2 (we don't provide SLA when minReplicaCount=1). + + If traffic against it increases, it may dynamically be deployed onto more replicas up to [maxReplicaCount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/AutomaticResources#FIELDS.max_replica_count), and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + default_from_api: true + - name: 'maxReplicaCount' + type: Integer + description: | + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + default_from_api: true + - name: 'dedicatedResources' + type: NestedObject + description: | + A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field minReplicaCount must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when minReplicaCount=1. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + + Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. + + Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. + + Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. + + n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency. + + # Having fields within dedicatedResources not being marked as immutable as well as removing + # fields such as acceleratorType, acceleratorCount, tpuTopology was done in order to support the ability to update such fields. This is discussed extensively [here](https://github.com/GoogleCloudPlatform/magic-modules/pull/11039#issuecomment-2209316648). + properties: + - name: 'machineSpec' + type: NestedObject + description: The minimum number of replicas this DeployedModel will be always deployed on. + # This field (and its nested fields) is not updatable via the mutateDeployedIndex method in the API, which is the only update method usable by this 'fine-grained' resource. This is why the field is marked as immutable despite the API docs not marking the field as immutable. + required: true + immutable: true + properties: + - name: 'machineType' + type: String + description: | + The type of the machine. + + See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) + + See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). + + For [DeployedModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints#DeployedModel) this field is optional, and the default value is n1-standard-2. For [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchPredictionJob) or as part of [WorkerPoolSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec) this field is required. + immutable: true + - name: 'minReplicaCount' + type: Integer + description: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. + required: true + - name: 'maxReplicaCount' + type: Integer + description: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount + default_from_api: true + - name: 'enableAccessLogging' + type: Boolean + description: If true, private endpoint's access logs are sent to Cloud Logging. + # This field is not updatable via the mutateDeployedIndex method in the API, which is the only update method usable by this 'fine-grained' resource. This is why the field is marked as immutable despite the API docs not marking the field as immutable. + immutable: true + default_value: false + - name: 'deployedIndexAuthConfig' + type: NestedObject + description: If set, the authentication is enabled for the private endpoint. + # This field (and its nested fields) is not updatable via the mutateDeployedIndex method in the API, which is the only update method usable by this 'fine-grained' resource. This is why the field is marked as immutable despite the API docs not marking the field as immutable. + immutable: true + properties: + - name: 'authProvider' + type: NestedObject + description: Defines the authentication provider that the DeployedIndex uses. + properties: + - name: 'audiences' + type: Array + description: The list of JWT audiences. that are allowed to access. A JWT containing any of these audiences will be accepted. + immutable: true + item_type: + type: String + - name: 'allowedIssuers' + type: Array + description: | + A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: service-account-name@project-id.iam.gserviceaccount.com + immutable: true + item_type: + type: String + - name: 'reservedIpRanges' + type: Array + description: | + A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. + If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. + + The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. + + For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. + immutable: true + item_type: + type: String + - name: 'deploymentGroup' + type: String + description: | + The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. + Creating deployment_groups with reserved_ip_ranges is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. [See the official documentation here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#DeployedIndex.FIELDS.deployment_group). + Note: we only support up to 5 deployment groups (not including 'default'). + immutable: true + default_value: "default" diff --git a/mmv1/products/vertexai/go_Tensorboard.yaml b/mmv1/products/vertexai/go_Tensorboard.yaml index d4a99e5190bc..ba54d07256e7 100644 --- a/mmv1/products/vertexai/go_Tensorboard.yaml +++ b/mmv1/products/vertexai/go_Tensorboard.yaml @@ -116,4 +116,3 @@ properties: type: KeyValueLabels description: | The labels with user-defined metadata to organize your Tensorboards. - immutable: false diff --git a/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml b/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml index b6fd372d0737..85a298620059 100644 --- a/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml +++ b/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml @@ -47,8 +47,8 @@ async: path: 'error' message: 'message' include_project: true -skip_sweeper: true custom_code: +skip_sweeper: true examples: - name: 'vmware_engine_external_access_rule_basic' primary_resource_id: 'vmw-engine-external-access-rule' diff --git a/mmv1/products/vmwareengine/go_ExternalAddress.yaml b/mmv1/products/vmwareengine/go_ExternalAddress.yaml index c853ea1348e6..2b8395be5be4 100644 --- a/mmv1/products/vmwareengine/go_ExternalAddress.yaml +++ b/mmv1/products/vmwareengine/go_ExternalAddress.yaml @@ -51,9 +51,10 @@ async: path: 'error' message: 'message' include_project: true -skip_sweeper: true custom_code: +skip_sweeper: true error_retry_predicates: + - 'transport_tpg.ExternalIpServiceNotActive' examples: - name: 'vmware_engine_external_address_basic' diff --git a/mmv1/products/vmwareengine/go_PrivateCloud.yaml b/mmv1/products/vmwareengine/go_PrivateCloud.yaml index 95ad9731938c..1c52a4c13760 100644 --- a/mmv1/products/vmwareengine/go_PrivateCloud.yaml +++ b/mmv1/products/vmwareengine/go_PrivateCloud.yaml @@ -52,6 +52,7 @@ custom_code: constants: 'templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl' update_encoder: 'templates/terraform/update_encoder/go/private_cloud.go.tmpl' decoder: 'templates/terraform/decoders/go/private_cloud.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl' post_delete: 'templates/terraform/post_delete/go/private_cloud.go.tmpl' post_update: 'templates/terraform/post_update/go/private_cloud.go.tmpl' pre_delete: 'templates/terraform/pre_delete/go/vmwareengine_private_cloud.go.tmpl' diff --git a/mmv1/products/vpcaccess/go_Connector.yaml b/mmv1/products/vpcaccess/go_Connector.yaml index f79e6156459d..fded7434edb2 100644 --- a/mmv1/products/vpcaccess/go_Connector.yaml +++ b/mmv1/products/vpcaccess/go_Connector.yaml @@ -115,21 +115,27 @@ properties: Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + default_from_api: true + conflicts: + - min_instances validation: function: 'validation.IntBetween(200, 1000)' - default_value: 200 - name: 'minInstances' type: Integer description: | Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be lower than the value specified by max_instances. default_from_api: true + conflicts: + - min_throughput - name: 'maxInstances' type: Integer description: | Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be higher than the value specified by min_instances. default_from_api: true + conflicts: + - max_throughput - name: 'maxThroughput' type: Integer description: | @@ -137,9 +143,11 @@ properties: when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of max_throughput is discouraged in favor of max_instances. + default_from_api: true + conflicts: + - max_instances validation: function: 'validation.IntBetween(200, 1000)' - default_value: 300 - name: 'selfLink' type: String description: | diff --git a/mmv1/products/workflows/go_Workflow.yaml b/mmv1/products/workflows/go_Workflow.yaml new file mode 100644 index 000000000000..dc534c04bd15 --- /dev/null +++ b/mmv1/products/workflows/go_Workflow.yaml @@ -0,0 +1,141 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workflow' +description: | + Workflow program to be executed by Workflows. +references: + guides: + 'Managing Workflows': 'https://cloud.google.com/workflows/docs/creating-updating-workflow' + api: 'https://cloud.google.com/workflows/docs/reference/rest/v1/projects.locations.workflows' +docs: + optional_properties: | + * `name_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. If this and name are unspecified, a random value is chosen for the name. +id_format: 'projects/{{project}}/locations/{{region}}/workflows/{{name}}' +base_url: 'projects/{{project}}/locations/{{region}}/workflows' +self_link: 'projects/{{project}}/locations/{{region}}/workflows/{{name}}' +create_url: 'projects/{{project}}/locations/{{region}}/workflows?workflowId={{name}}' +update_verb: 'PATCH' +update_mask: true +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/workflow.tmpl' + encoder: 'templates/terraform/encoders/go/workflow.go.tmpl' +schema_version: 1 +state_upgraders: true +examples: + - name: 'workflow_basic' + primary_resource_id: 'example' + vars: + name: 'workflow' + account_id: 'my-account' + skip_import_test: true +parameters: + - name: 'region' + type: String + description: The region of the workflow. + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: Name of the Workflow. + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'description' + type: String + description: | + Description of the workflow provided by the user. Must be at most 1000 unicode characters long. + default_from_api: true + - name: 'createTime' + type: String + description: | + The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + output: true + - name: 'updateTime' + type: String + description: | + The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + output: true + - name: 'state' + type: String + description: State of the workflow deployment. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + A set of key/value label pairs to assign to this Workflow. + - name: 'serviceAccount' + type: String + description: | + Name of the service account associated with the latest workflow version. This service + account represents the identity of the workflow and determines what permissions the workflow has. + Format: projects/{project}/serviceAccounts/{account} or {account}. + Using - as a wildcard for the {project} or not providing one at all will infer the project from the account. + The {account} value can be the email address or the unique_id of the service account. + If not provided, workflow will use the project's default service account. + Modifying this field for an existing workflow results in a new workflow revision. + default_from_api: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'sourceContents' + type: String + description: | + Workflow code to be executed. The size limit is 128KB. + - name: 'revisionId' + type: String + description: | + The revision of the workflow. A new one is generated if the service account or source contents is changed. + output: true + - name: 'cryptoKeyName' + type: String + description: | + The KMS key used to encrypt workflow and execution data. + + Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} + - name: 'callLogLevel' + type: Enum + description: | + Describes the level of platform logging to apply to calls and call responses during + executions of this workflow. If both the workflow and the execution specify a logging level, + the execution level takes precedence. + enum_values: + - 'CALL_LOG_LEVEL_UNSPECIFIED' + - 'LOG_ALL_CALLS' + - 'LOG_ERRORS_ONLY' + - 'LOG_NONE' + - name: 'userEnvVars' + type: KeyValuePairs + description: | + User-defined environment variables associated with this workflow revision. This map has a maximum length of 20. Each string can take up to 4KiB. Keys cannot be empty strings and cannot start with “GOOGLE” or “WORKFLOWS". diff --git a/mmv1/products/workflows/go_product.yaml b/mmv1/products/workflows/go_product.yaml new file mode 100644 index 000000000000..a8bd13a3689f --- /dev/null +++ b/mmv1/products/workflows/go_product.yaml @@ -0,0 +1,36 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workflows' +display_name: 'Workflows' +versions: + - name: 'beta' + base_url: 'https://workflows.googleapis.com/v1/' + - name: 'ga' + base_url: 'https://workflows.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' +async: + type: "OpAsync" + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' diff --git a/mmv1/products/workstations/go_Workstation.yaml b/mmv1/products/workstations/go_Workstation.yaml new file mode 100644 index 000000000000..0ee998625140 --- /dev/null +++ b/mmv1/products/workstations/go_Workstation.yaml @@ -0,0 +1,162 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workstation' +description: + 'A single instance of a developer workstation with its own persistent storage.' +min_version: 'beta' +references: + guides: + 'Workstations': 'https://cloud.google.com/workstations/docs/' + api: 'https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs.workstations' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations' +self_link: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations/{{workstation_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations?workstationId={{workstation_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations/{{workstation_id}}' +timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'workstation_id' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations/{{workstation_id}}' + - '{{workstation_id}}' + min_version: 'beta' +custom_code: +examples: + - name: 'workstation_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-workstation-cluster%s", context["random_suffix"]), fmt.Sprintf("tf-test-workstation-config%s", context["random_suffix"]), fmt.Sprintf("tf-test-work-station%s", context["random_suffix"])' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + workstation_name: 'work-station' +parameters: + - name: 'workstationId' + type: String + description: | + ID to use for the workstation. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'workstationConfigId' + type: String + description: | + The ID of the parent workstation cluster config. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'workstationClusterId' + type: String + description: | + The ID of the parent workstation cluster. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the workstation parent resources reside. + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Full name of this resource. + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: | + A system-assigned unique identified for this resource. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + Human-readable name for this resource. + min_version: 'beta' + - name: 'labels' + type: KeyValueLabels + description: + 'Client-specified labels that are applied to the resource and that are + also propagated to the underlying Compute Engine resources.' + min_version: 'beta' + - name: 'annotations' + type: KeyValueAnnotations + description: 'Client-specified annotations. This is distinct from labels.' + min_version: 'beta' + - name: 'env' + type: KeyValuePairs + description: | + 'Client-specified environment variables passed to the workstation container's entrypoint.' + min_version: 'beta' + - name: 'createTime' + type: Time + description: | + Time when this resource was created. + min_version: 'beta' + output: true + - name: 'host' + type: String + description: | + Host to which clients can send HTTPS traffic that will be received by the workstation. + Authorized traffic will be received to the workstation as HTTP on port 80. + To send traffic to a different port, clients may prefix the host with the destination port in the format "{port}-{host}". + min_version: 'beta' + output: true + - name: 'state' + type: Enum + description: | + Current state of the workstation. + min_version: 'beta' + output: true + enum_values: + - 'STATE_STARTING' + - 'STATE_RUNNING' + - 'STATE_STOPPING' + - 'STATE_STOPPED' diff --git a/mmv1/products/workstations/go_WorkstationCluster.yaml b/mmv1/products/workstations/go_WorkstationCluster.yaml new file mode 100644 index 000000000000..e602afbc048e --- /dev/null +++ b/mmv1/products/workstations/go_WorkstationCluster.yaml @@ -0,0 +1,237 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'WorkstationCluster' +description: "A grouping of workstation configurations and the associated workstations in that region." +min_version: 'beta' +references: + guides: + 'Workstations': 'https://cloud.google.com/workstations/docs/' + api: 'https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters/create' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/workstationClusters' +self_link: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/workstationClusters?workstationClusterId={{workstation_cluster_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}' +timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'workstation_cluster_basic' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + - name: 'workstation_cluster_private' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster-private' + - name: 'workstation_cluster_custom_domain' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster-custom-domain' +parameters: + - name: 'workstationClusterId' + type: String + description: | + ID to use for the workstation cluster. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the workstation cluster should reside. + min_version: 'beta' + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The name of the cluster resource. + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: | + The system-generated UID of the resource. + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: + "Client-specified labels that are applied to the resource and that are + also propagated to the underlying Compute Engine resources." + min_version: 'beta' + - name: 'network' + type: String + description: | + The relative resource name of the VPC network on which the instance can be accessed. + It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}". + min_version: 'beta' + required: true + immutable: true + - name: 'subnetwork' + type: String + description: | + Name of the Compute Engine subnetwork in which instances associated with this cluster will be created. + Must be part of the subnetwork specified for this cluster. + min_version: 'beta' + required: true + immutable: true + - name: 'controlPlaneIp' + type: String + description: | + The private IP address of the control plane for this workstation cluster. + Workstation VMs need access to this IP address to work with the service, so make sure that your firewall rules allow egress from the workstation VMs to this address. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + Human-readable name for this resource. + min_version: 'beta' + - name: 'degraded' + type: Boolean + description: | + Whether this resource is in degraded mode, in which case it may require user action to restore full functionality. + Details can be found in the conditions field. + min_version: 'beta' + output: true + - name: 'annotations' + type: KeyValueAnnotations + description: "Client-specified annotations. This is distinct from labels." + min_version: 'beta' + - name: 'etag' + type: Fingerprint + description: | + Checksum computed by the server. + May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + min_version: 'beta' + output: true + - name: 'createTime' + type: Time + description: | + Time when this resource was created. + min_version: 'beta' + output: true + - name: 'privateClusterConfig' + type: NestedObject + description: | + Configuration for private cluster. + min_version: 'beta' + properties: + - name: 'enablePrivateEndpoint' + type: Boolean + description: | + Whether Workstations endpoint is private. + min_version: 'beta' + required: true + immutable: true + - name: 'clusterHostname' + type: String + description: | + Hostname for the workstation cluster. + This field will be populated only when private endpoint is enabled. + To access workstations in the cluster, create a new DNS zone mapping this domain name to an internal IP address and a forwarding rule mapping that address to the service attachment. + min_version: 'beta' + output: true + - name: 'serviceAttachmentUri' + type: String + description: | + Service attachment URI for the workstation cluster. + The service attachment is created when private endpoint is enabled. + To access workstations in the cluster, configure access to the managed service using (Private Service Connect)[https://cloud.google.com/vpc/docs/configure-private-service-connect-services]. + min_version: 'beta' + output: true + - name: 'allowedProjects' + type: Array + description: | + Additional project IDs that are allowed to attach to the workstation cluster's service attachment. + By default, the workstation cluster's project and the VPC host project (if different) are allowed. + min_version: 'beta' + default_from_api: true + item_type: + type: String + - name: 'domainConfig' + type: NestedObject + description: | + Configuration options for a custom domain. + min_version: 'beta' + properties: + - name: 'domain' + type: String + description: | + Domain used by Workstations for HTTP ingress. + + min_version: 'beta' + required: true + immutable: true + - name: 'conditions' + type: Array + description: |- + Status conditions describing the current resource state. + min_version: 'beta' + output: true + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: |- + The status code, which should be an enum value of google.rpc.Code. + min_version: 'beta' + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + min_version: 'beta' + output: true + - name: 'details' + type: Array + description: | + A list of messages that carry the error details. + min_version: 'beta' + output: true + item_type: + type: KeyValuePairs diff --git a/mmv1/products/workstations/go_WorkstationConfig.yaml b/mmv1/products/workstations/go_WorkstationConfig.yaml new file mode 100644 index 000000000000..05372e367a3f --- /dev/null +++ b/mmv1/products/workstations/go_WorkstationConfig.yaml @@ -0,0 +1,680 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'WorkstationConfig' +description: + 'A set of configuration options describing how a workstation will be run. + Workstation configurations are intended to be shared across multiple + workstations.' +min_version: 'beta' +references: + guides: + 'Workstations': 'https://cloud.google.com/workstations/docs/' + api: 'https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs/create' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs' +self_link: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs?workstationConfigId={{workstation_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}' +timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'workstation_config_id' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}' + - '{{workstation_config_id}}' + min_version: 'beta' +custom_code: +examples: + - name: 'workstation_config_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-workstation-cluster%s", context["random_suffix"]), fmt.Sprintf("tf-test-workstation-config%s", context["random_suffix"])' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + key_short_name: 'keyname' + value_short_name: 'valuename' + org_id: '123456789' + test_vars_overrides: + 'key_short_name': '"tf-test-key-" + acctest.RandString(t, 10)' + 'value_short_name': '"tf-test-value-" + acctest.RandString(t, 10)' + 'org_id': 'envvar.GetTestOrgFromEnv(t)' + - name: 'workstation_config_container' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_persistent_directories' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_source_snapshot' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_shielded_instance_config' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_accelerators' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_boost' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_encryption_key' + primary_resource_id: 'default' + min_version: 'beta' + vars: + account_id: 'my-account' + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' +parameters: + - name: 'workstationConfigId' + type: String + description: | + The ID to be assigned to the workstation cluster config. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'workstationClusterId' + type: String + description: | + The ID of the parent workstation cluster. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the workstation cluster config should reside. + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Full name of this resource. + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: | + The system-generated UID of the resource. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + Human-readable name for this resource. + min_version: 'beta' + - name: 'labels' + type: KeyValueLabels + description: + 'Client-specified labels that are applied to the resource and that are + also propagated to the underlying Compute Engine resources.' + min_version: 'beta' + - name: 'annotations' + type: KeyValueAnnotations + description: 'Client-specified annotations. This is distinct from labels.' + min_version: 'beta' + - name: 'etag' + type: Fingerprint + description: | + Checksum computed by the server. + May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + min_version: 'beta' + output: true + - name: 'createTime' + type: Time + description: | + Time when this resource was created. + min_version: 'beta' + output: true + - name: 'idleTimeout' + type: String + description: | + How long to wait before automatically stopping an instance that hasn't recently received any user traffic. A value of 0 indicates that this instance should never time out from idleness. Defaults to 20 minutes. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + min_version: 'beta' + default_value: "1200s" + - name: 'runningTimeout' + type: String + description: | + How long to wait before automatically stopping a workstation after it was started. A value of 0 indicates that workstations using this configuration should never time out from running duration. Must be greater than 0 and less than 24 hours if `encryption_key` is set. Defaults to 12 hours. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + min_version: 'beta' + default_value: "43200s" + - name: 'replicaZones' + type: Array + description: | + Specifies the zones used to replicate the VM and disk resources within the region. If set, exactly two zones within the workstation cluster's region must be specified—for example, `['us-central1-a', 'us-central1-f']`. + If this field is empty, two default zones within the region are used. Immutable after the workstation configuration is created. + min_version: 'beta' + immutable: true + default_from_api: true + item_type: + type: String + - name: 'enableAuditAgent' + type: Boolean + description: | + Whether to enable Linux `auditd` logging on the workstation. When enabled, a service account must also be specified that has `logging.buckets.write` permission on the project. Operating system audit logging is distinct from Cloud Audit Logs. + min_version: 'beta' + ignore_read: true + - name: 'host' + type: NestedObject + description: | + Runtime host for a workstation. + min_version: 'beta' + default_from_api: true + update_mask_fields: + - 'host.gceInstance.machineType' + - 'host.gceInstance.poolSize' + - 'host.gceInstance.tags' + - 'host.gceInstance.serviceAccountScopes' + - 'host.gceInstance.disablePublicIpAddresses' + - 'host.gceInstance.enableNestedVirtualization' + - 'host.gceInstance.shieldedInstanceConfig.enableSecureBoot' + - 'host.gceInstance.shieldedInstanceConfig.enableVtpm' + - 'host.gceInstance.shieldedInstanceConfig.enableIntegrityMonitoring' + - 'host.gceInstance.confidentialInstanceConfig.enableConfidentialCompute' + - 'host.gceInstance.accelerators' + - 'host.gceInstance.boostConfigs' + - 'host.gceInstance.disableSsh' + - 'host.gceInstance.vmTags' + properties: + - name: 'gceInstance' + type: NestedObject + description: | + A runtime using a Compute Engine instance. + min_version: 'beta' + default_from_api: true + properties: + - name: 'machineType' + type: String + description: |- + The name of a Compute Engine machine type. + min_version: 'beta' + default_from_api: true + - name: 'serviceAccount' + type: String + description: |- + Email address of the service account that will be used on VM instances used to support this config. This service account must have permission to pull the specified container image. If not set, VMs will run without a service account, in which case the image must be publicly accessible. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'serviceAccountScopes' + type: Array + description: |- + Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + min_version: 'beta' + default_from_api: true + item_type: + type: String + - name: 'poolSize' + type: Integer + description: |- + Number of instances to pool for faster workstation startup. + min_version: 'beta' + default_from_api: true + - name: 'bootDiskSizeGb' + type: Integer + description: |- + Size of the boot disk in GB. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'tags' + type: Array + description: | + Network tags to add to the Compute Engine machines backing the Workstations. + min_version: 'beta' + item_type: + type: String + - name: 'disablePublicIpAddresses' + type: Boolean + description: | + Whether instances have no public IP address. + min_version: 'beta' + - name: 'disableSsh' + type: Boolean + description: | + Whether to disable SSH access to the VM. + min_version: 'beta' + send_empty_value: true + default_value: true + - name: 'enableNestedVirtualization' + type: Boolean + description: | + Whether to enable nested virtualization on the Compute Engine VMs backing the Workstations. + + See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization + min_version: 'beta' + - name: 'shieldedInstanceConfig' + type: NestedObject + description: | + A set of Compute Engine Shielded instance options. + min_version: 'beta' + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/workstations_config_shielded_instance.go.tmpl' + properties: + - name: 'enableSecureBoot' + type: Boolean + description: | + Whether the instance has Secure Boot enabled. + min_version: 'beta' + send_empty_value: true + - name: 'enableVtpm' + type: Boolean + description: | + Whether the instance has the vTPM enabled. + min_version: 'beta' + send_empty_value: true + - name: 'enableIntegrityMonitoring' + type: Boolean + description: | + Whether the instance has integrity monitoring enabled. + min_version: 'beta' + send_empty_value: true + - name: 'confidentialInstanceConfig' + type: NestedObject + description: | + A set of Compute Engine Confidential VM instance options. + min_version: 'beta' + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/workstations_config_confidential_instance.go.tmpl' + properties: + - name: 'enableConfidentialCompute' + type: Boolean + description: | + Whether the instance has confidential compute enabled. + min_version: 'beta' + send_empty_value: true + - name: 'accelerators' + type: Array + description: | + An accelerator card attached to the instance. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: | + Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". + min_version: 'beta' + required: true + - name: 'count' + type: Integer + description: | + Number of accelerator cards exposed to the instance. + min_version: 'beta' + required: true + - name: 'boostConfigs' + type: Array + description: | + A list of the boost configurations that workstations created using this workstation configuration are allowed to use. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'id' + type: String + description: | + The id to be used for the boost config. + min_version: 'beta' + required: true + - name: 'machineType' + type: String + description: | + The type of machine that boosted VM instances will use—for example, e2-standard-4. For more information about machine types that Cloud Workstations supports, see the list of available machine types https://cloud.google.com/workstations/docs/available-machine-types. Defaults to e2-standard-4. + min_version: 'beta' + - name: 'bootDiskSizeGb' + type: Integer + description: |- + Size of the boot disk in GB. The minimum boot disk size is `30` GB. Defaults to `50` GB. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'enableNestedVirtualization' + type: Boolean + description: | + Whether to enable nested virtualization on the Compute Engine VMs backing boosted Workstations. + + See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization + min_version: 'beta' + default_from_api: true + - name: 'poolSize' + type: Integer + description: |- + Number of instances to pool for faster workstation boosting. + min_version: 'beta' + default_from_api: true + - name: 'accelerators' + type: Array + description: | + An accelerator card attached to the boost instance. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: | + Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". + min_version: 'beta' + required: true + - name: 'count' + type: Integer + description: | + Number of accelerator cards exposed to the instance. + min_version: 'beta' + required: true + - name: 'vmTags' + type: KeyValuePairs + description: | + Resource manager tags to be bound to the VM instances backing the Workstations. + Tag keys and values have the same definition as + https://cloud.google.com/resource-manager/docs/tags/tags-overview + Keys must be in the format `tagKeys/{tag_key_id}`, and + values are in the format `tagValues/456`. + min_version: 'beta' + - name: 'persistentDirectories' + type: Array + description: | + Directories to persist across workstation sessions. + min_version: 'beta' + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'mountPath' + type: String + description: | + Location of this directory in the running workstation. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'gcePd' + type: NestedObject + description: | + A directory to persist across workstation sessions, backed by a Compute Engine regional persistent disk. Can only be updated if not empty during creation. + min_version: 'beta' + default_from_api: true + properties: + - name: 'fsType' + type: String + description: | + Type of file system that the disk should be formatted with. The workstation image must support this file system type. Must be empty if `sourceSnapshot` is set. Defaults to `ext4`. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'diskType' + type: String + description: | + The type of the persistent disk for the home directory. Defaults to `pd-standard`. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'sizeGb' + type: Integer + description: |- + The GB capacity of a persistent home directory for each workstation created with this configuration. Must be empty if `sourceSnapshot` is set. + Valid values are `10`, `50`, `100`, `200`, `500`, or `1000`. Defaults to `200`. If less than `200` GB, the `diskType` must be `pd-balanced` or `pd-ssd`. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'reclaimPolicy' + type: Enum + description: | + Whether the persistent disk should be deleted when the workstation is deleted. Valid values are `DELETE` and `RETAIN`. Defaults to `DELETE`. + min_version: 'beta' + enum_values: + - 'DELETE' + - 'RETAIN' + - name: 'sourceSnapshot' + type: String + description: | + Name of the snapshot to use as the source for the disk. This can be the snapshot's `self_link`, `id`, or a string in the format of `projects/{project}/global/snapshots/{snapshot}`. If set, `sizeGb` and `fsType` must be empty. Can only be updated if it has an existing value. + # TODO(esu): Add conflicting fields once complex lists are supported. + min_version: 'beta' + - name: 'ephemeralDirectories' + type: Array + description: | + Ephemeral directories which won't persist across workstation sessions. + min_version: 'beta' + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'mountPath' + type: String + description: | + Location of this directory in the running workstation. + min_version: 'beta' + default_from_api: true + - name: 'gcePd' + type: NestedObject + description: | + An EphemeralDirectory backed by a Compute Engine persistent disk. + min_version: 'beta' + default_from_api: true + properties: + - name: 'diskType' + type: String + description: | + Type of the disk to use. Defaults to `"pd-standard"`. + min_version: 'beta' + default_from_api: true + - name: 'sourceSnapshot' + type: String + description: | + Name of the snapshot to use as the source for the disk. + + Must be empty if `sourceImage` is set. + Must be empty if `read_only` is false. + Updating `source_snapshot` will update content in the ephemeral directory after the workstation is restarted. + min_version: 'beta' + - name: 'sourceImage' + type: String + description: | + Name of the disk image to use as the source for the disk. + + Must be empty `sourceSnapshot` is set. + Updating `sourceImage` will update content in the ephemeral directory after the workstation is restarted. + min_version: 'beta' + - name: 'readOnly' + type: Boolean + description: | + Whether the disk is read only. If true, the disk may be shared by multiple VMs and `sourceSnapshot` must be set. + min_version: 'beta' + - name: 'container' + type: NestedObject + description: | + Container that will be run for each workstation using this configuration when that workstation is started. + min_version: 'beta' + default_from_api: true + update_mask_fields: + - 'container.image' + - 'container.command' + - 'container.args' + - 'container.workingDir' + - 'container.env' + - 'container.runAsUser' + properties: + - name: 'image' + type: String + description: | + Docker image defining the container. This image must be accessible by the config's service account. + min_version: 'beta' + default_from_api: true + - name: 'command' + type: Array + description: | + If set, overrides the default ENTRYPOINT specified by the image. + min_version: 'beta' + item_type: + type: String + - name: 'args' + type: Array + description: | + Arguments passed to the entrypoint. + min_version: 'beta' + item_type: + type: String + - name: 'workingDir' + type: String + description: | + If set, overrides the default DIR specified by the image. + # Allow unsetting to revert to container default. + min_version: 'beta' + send_empty_value: true + - name: 'env' + type: KeyValuePairs + description: | + Environment variables passed to the container. + The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE". + min_version: 'beta' + - name: 'runAsUser' + type: Integer + description: | + If set, overrides the USER specified in the image with the given uid. + min_version: 'beta' + - name: 'encryptionKey' + type: NestedObject + description: | + Encrypts resources of this workstation configuration using a customer-managed encryption key. + + If specified, the boot disk of the Compute Engine instance and the persistent disk are encrypted using this encryption key. If this field is not set, the disks are encrypted using a generated key. Customer-managed encryption keys do not protect disk metadata. + If the customer-managed encryption key is rotated, when the workstation instance is stopped, the system attempts to recreate the persistent disk with the new version of the key. Be sure to keep older versions of the key until the persistent disk is recreated. Otherwise, data on the persistent disk will be lost. + If the encryption key is revoked, the workstation session will automatically be stopped within 7 hours. + min_version: 'beta' + immutable: true + properties: + - name: 'kmsKey' + type: String + description: | + The name of the Google Cloud KMS encryption key. + min_version: 'beta' + required: true + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account to use with the specified KMS key. + min_version: 'beta' + required: true + - name: 'readinessChecks' + type: Array + description: | + Readiness checks to be performed on a workstation. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'path' + type: String + description: | + Path to which the request should be sent. + min_version: 'beta' + required: true + - name: 'port' + type: Integer + description: | + Port to which the request should be sent. + min_version: 'beta' + required: true + - name: 'degraded' + type: Boolean + description: | + Whether this resource is in degraded mode, in which case it may require user action to restore full functionality. Details can be found in the conditions field. + min_version: 'beta' + output: true + - name: 'disableTcpConnections' + type: Boolean + description: | + Disables support for plain TCP connections in the workstation. By default the service supports TCP connections via a websocket relay. Setting this option to true disables that relay, which prevents the usage of services that require plain tcp connections, such as ssh. When enabled, all communication must occur over https or wss. + min_version: 'beta' + - name: 'conditions' + type: Array + description: |- + Status conditions describing the current resource state. + min_version: 'beta' + output: true + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: |- + The status code, which should be an enum value of google.rpc.Code. + min_version: 'beta' + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + min_version: 'beta' + output: true + - name: 'details' + type: Array + description: | + A list of messages that carry the error details. + min_version: 'beta' + output: true + item_type: + type: KeyValuePairs diff --git a/mmv1/products/workstations/go_product.yaml b/mmv1/products/workstations/go_product.yaml new file mode 100644 index 000000000000..7400feef3a9b --- /dev/null +++ b/mmv1/products/workstations/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workstations' +display_name: 'Cloud Workstations' +versions: + - name: 'beta' + base_url: 'https://workstations.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 7f7105eb38da..8824e2a9ae94 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -26,7 +26,6 @@ import ( "text/template" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" - "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "github.com/golang/glog" ) @@ -35,7 +34,7 @@ type TemplateData struct { // include Compile::Core OutputFolder string - Version product.Version + VersionName string TerraformResourceDirectory string TerraformProviderModule string @@ -50,13 +49,13 @@ var GA_VERSION = "ga" var BETA_VERSION = "beta" var ALPHA_VERSION = "alpha" -func NewTemplateData(outputFolder string, version product.Version) *TemplateData { - td := TemplateData{OutputFolder: outputFolder, Version: version} +func NewTemplateData(outputFolder string, versionName string) *TemplateData { + td := TemplateData{OutputFolder: outputFolder, VersionName: versionName} - if version.Name == GA_VERSION { + if versionName == GA_VERSION { td.TerraformResourceDirectory = "google" td.TerraformProviderModule = "github.com/hashicorp/terraform-provider-google" - } else if version.Name == ALPHA_VERSION { + } else if versionName == ALPHA_VERSION { td.TerraformResourceDirectory = "google-private" td.TerraformProviderModule = "internal/terraform-next" } else { @@ -293,9 +292,9 @@ func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, g // end func (td *TemplateData) ImportPath() string { - if td.Version.Name == GA_VERSION { + if td.VersionName == GA_VERSION { return "github.com/hashicorp/terraform-provider-google/google" - } else if td.Version.Name == ALPHA_VERSION { + } else if td.VersionName == ALPHA_VERSION { return "internal/terraform-next/google-private" } return "github.com/hashicorp/terraform-provider-google-beta/google-beta" diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 5ffc49dee51b..834df4869501 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -97,7 +97,7 @@ func (t *Terraform) GenerateObjects(outputFolder string, generateCode, generateD } func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPath string, generateCode, generateDocs bool) { - templateData := NewTemplateData(outputFolder, t.Version) + templateData := NewTemplateData(outputFolder, t.TargetVersionName) if !object.IsExcluded() { log.Printf("Generating %s resource", object.Name) @@ -143,7 +143,7 @@ func (t *Terraform) GenerateResourceTests(object api.Resource, templateData Temp eligibleExample := false for _, example := range object.Examples { if !example.SkipTest { - if object.ProductMetadata.VersionObjOrClosest(t.Version.Name).CompareTo(object.ProductMetadata.VersionObjOrClosest(example.MinVersion)) > 0 { + if object.ProductMetadata.VersionObjOrClosest(t.Version.Name).CompareTo(object.ProductMetadata.VersionObjOrClosest(example.MinVersion)) >= 0 { eligibleExample = true break } @@ -190,7 +190,7 @@ func (t *Terraform) GenerateOperation(outputFolder string) { log.Println(fmt.Errorf("error creating parent directory %v: %v", targetFolder, err)) } targetFilePath := path.Join(targetFolder, fmt.Sprintf("%s_operation.go", google.Underscore(t.Product.Name))) - templateData := NewTemplateData(outputFolder, t.Version) + templateData := NewTemplateData(outputFolder, t.TargetVersionName) templateData.GenerateOperationFile(targetFilePath, *asyncObjects[0]) } @@ -392,7 +392,7 @@ func (t Terraform) CopyFileList(outputFolder string, files map[string]string) { func (t Terraform) CompileCommonFiles(outputFolder string, products []*api.Product, overridePath string) { t.generateResourcesForVersion(products) files := t.getCommonCompileFiles(t.TargetVersionName) - templateData := NewTemplateData(outputFolder, t.Version) + templateData := NewTemplateData(outputFolder, t.TargetVersionName) t.CompileFileList(outputFolder, files, *templateData, products) } diff --git a/mmv1/provider/terraform.rb b/mmv1/provider/terraform.rb index 5ba3f3b46763..bd6abab44f8a 100644 --- a/mmv1/provider/terraform.rb +++ b/mmv1/provider/terraform.rb @@ -410,6 +410,9 @@ def generate_object(object, output_folder, version_name, generate_code, generate end def generate_object_modified(object, output_folder, version_name) + # skip healthcare - exceptional case will be done manually + return if output_folder.include? 'healthcare' + pwd = Dir.pwd data = build_object_data(pwd, object, output_folder, version_name) Dir.chdir output_folder diff --git a/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl b/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl index af237ca83f40..e3b8f39cbee3 100644 --- a/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl +++ b/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl @@ -27,22 +27,30 @@ func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *s } if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("domain").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("special_group").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } } + if memberInState := d.Get("user_by_email").(string); memberInState != "" { + return strings.ToLower(old) == strings.ToLower(new) + } + + if memberInState := d.Get("group_by_email").(string); memberInState != "" { + return strings.ToLower(old) == strings.ToLower(new) + } + return false } diff --git a/mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl b/mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl new file mode 100644 index 000000000000..4acee74aa971 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl @@ -0,0 +1,5 @@ +// Suppresses a diff on cases like 1:00 when it should be 01:00. +// Because API will normalize this value +func HourlyFormatSuppressDiff(_, old, new string, _ *schema.ResourceData) bool { + return old == "0"+new +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl b/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl index fe8d016797b3..17c73cc0954e 100644 --- a/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl @@ -87,7 +87,8 @@ func modifyNotebooksInstanceState(config *transport_tpg.Config, d *schema.Resour } return res, nil } -{{ if ne $.Compiler "terraformgoogleconversion-codegen" }} + +{{- if ne $.Compiler "terraformgoogleconversion-codegen" }} func waitForNotebooksOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { var opRes map[string]interface{} err := NotebooksOperationWaitTimeWithResponse( diff --git a/mmv1/templates/terraform/constants/go/subnetwork.tmpl b/mmv1/templates/terraform/constants/go/subnetwork.tmpl index 3833540942b6..74edec68a5a5 100644 --- a/mmv1/templates/terraform/constants/go/subnetwork.tmpl +++ b/mmv1/templates/terraform/constants/go/subnetwork.tmpl @@ -17,3 +17,34 @@ func IsShrinkageIpCidr(_ context.Context, old, new, _ interface{}) bool { return true } + +func sendSecondaryIpRangeIfEmptyDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + // on create, return immediately as we don't need to determine if the value is empty or not + if diff.Id() == "" { + return nil + } + + sendZero := diff.Get("send_secondary_ip_range_if_empty").(bool) + if !sendZero { + return nil + } + + configSecondaryIpRange := diff.GetRawConfig().GetAttr("secondary_ip_range") + if !configSecondaryIpRange.IsKnown() { + return nil + } + configValueIsEmpty := configSecondaryIpRange.IsNull() || configSecondaryIpRange.LengthInt() == 0 + + stateSecondaryIpRange := diff.GetRawState().GetAttr("secondary_ip_range") + if !stateSecondaryIpRange.IsKnown() { + return nil + } + stateValueIsEmpty := stateSecondaryIpRange.IsNull() || stateSecondaryIpRange.LengthInt() == 0 + + if configValueIsEmpty && !stateValueIsEmpty { + log.Printf("[DEBUG] setting secondary_ip_range to newly empty") + diff.SetNew("secondary_ip_range", make([]interface{}, 0)) + } + + return nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl b/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl index b2941d68a57c..5123f7eaf04e 100644 --- a/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl +++ b/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl @@ -31,3 +31,49 @@ func isMultiNodePrivateCloud(d *schema.ResourceData) bool { } return false } + +func isPrivateCloudInDeletedState(config *transport_tpg.Config, d *schema.ResourceData, billingProject string, userAgent string) (bool, error) { + baseurl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}") + if err != nil { + return false, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: baseurl, + UserAgent: userAgent, + }) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[DEBUG] No existing private cloud found") + return false, nil + } + return false, err + } + // if resource exists but is marked for deletion + v, ok := res["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud exists and is marked for deletion.") + return true, nil + } + return false, nil +} + +// Check if private cloud is absent or if it exists in a deleted state. +func pollCheckForPrivateCloudAbsence(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + if respErr != nil { + if transport_tpg.IsGoogleApiErrorWithCode(respErr, 404) { + return transport_tpg.SuccessPollResult() + } + return transport_tpg.ErrorPollResult(respErr) + } + // if resource exists but is marked for deletion + log.Printf("[DEBUG] Fetching state of the private cloud.") + v, ok := resp["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud has been successfully marked for delayed deletion.") + return transport_tpg.SuccessPollResult() + } + return transport_tpg.PendingStatusPollResult("found") +} diff --git a/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl b/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl index d133473847c7..5dc7e90f5558 100644 --- a/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl @@ -24,59 +24,62 @@ func WorkbenchInstanceLabelsDiffSuppress(k, old, new string, d *schema.ResourceD var WorkbenchInstanceProvidedMetadata = []string{ - "agent-health-check-interval-seconds", - "agent-health-check-path", - "container", - "custom-container-image", - "custom-container-payload", - "data-disk-uri", - "dataproc-allow-custom-clusters", - "dataproc-cluster-name", - "dataproc-configs", - "dataproc-default-subnet", - "dataproc-locations-list", - "dataproc-machine-types-list", - "dataproc-notebooks-url", - "dataproc-region", - "dataproc-service-account", - "disable-check-xsrf", - "framework", - "gcs-data-bucket", - "generate-diagnostics-bucket", - "generate-diagnostics-file", - "generate-diagnostics-options", - "image-url", - "install-monitoring-agent", - "install-nvidia-driver", - "installed-extensions", - "last_updated_diagnostics", - "notebooks-api", - "notebooks-api-version", - "notebooks-examples-location", - "notebooks-location", - "proxy-backend-id", - "proxy-byoid-url", - "proxy-mode", - "proxy-status", - "proxy-url", - "proxy-user-mail", - "report-container-health", - "report-event-url", - "report-notebook-metrics", - "report-system-health", - "report-system-status", - "restriction", - "serial-port-logging-enable", - "shutdown-script", - "title", - "use-collaborative", - "user-data", - "version", - - "disable-swap-binaries", - "enable-guest-attributes", - "enable-oslogin", - "proxy-registration-url", + "agent-health-check-interval-seconds", + "agent-health-check-path", + "container", + "cos-update-strategy", + "custom-container-image", + "custom-container-payload", + "data-disk-uri", + "dataproc-allow-custom-clusters", + "dataproc-cluster-name", + "dataproc-configs", + "dataproc-default-subnet", + "dataproc-locations-list", + "dataproc-machine-types-list", + "dataproc-notebooks-url", + "dataproc-region", + "dataproc-service-account", + "disable-check-xsrf", + "framework", + "gcs-data-bucket", + "generate-diagnostics-bucket", + "generate-diagnostics-file", + "generate-diagnostics-options", + "google-logging-enabled", + "image-url", + "install-monitoring-agent", + "install-nvidia-driver", + "installed-extensions", + "last_updated_diagnostics", + "notebooks-api", + "notebooks-api-version", + "notebooks-examples-location", + "notebooks-location", + "proxy-backend-id", + "proxy-byoid-url", + "proxy-mode", + "proxy-status", + "proxy-url", + "proxy-user-mail", + "report-container-health", + "report-event-url", + "report-notebook-metrics", + "report-system-health", + "report-system-status", + "restriction", + "serial-port-logging-enable", + "service-account-mode", + "shutdown-script", + "title", + "use-collaborative", + "user-data", + "version", + + "disable-swap-binaries", + "enable-guest-attributes", + "enable-oslogin", + "proxy-registration-url", } func WorkbenchInstanceMetadataDiffSuppress(k, old, new string, d *schema.ResourceData) bool { diff --git a/mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl b/mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl new file mode 100644 index 000000000000..1518e1014a61 --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl @@ -0,0 +1,3 @@ +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return nil, nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl index c5754ad27380..7f57f25f3d02 100644 --- a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl @@ -15,7 +15,12 @@ func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.T if v, ok := d.GetOk("name"); ok { certName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - certName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + certName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + certName = id.PrefixedUniqueId(prefix) + } } else { certName = id.UniqueId() } diff --git a/mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl b/mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl new file mode 100644 index 000000000000..5b419947c9ca --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl @@ -0,0 +1,19 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return strings.ToLower(v.(string)), nil +} diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl new file mode 100644 index 000000000000..ffdee7b68523 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl @@ -0,0 +1,38 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // This flatten function is shared between the resource and the datasource. + // TF Input will use the generation from the source object + // GET Response will use the generation from the automatically created object + // As TF Input and GET response values have different format, + // we will return TF Input value to prevent state drift. + + if genVal, ok := d.GetOk("build_config.0.source.0.storage_source.0.generation"); ok { + v = genVal + } + + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} diff --git a/mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl new file mode 100644 index 000000000000..7eb7c54775a2 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl @@ -0,0 +1,23 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} + +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + s, err := structure.NormalizeJsonString(v) + if err != nil { + log.Printf("[ERROR] failed to normalize JSON string: %v", err) + } + return s +} \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl b/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl index 78cb56c58cd0..54e6d0f587d0 100644 --- a/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl @@ -53,3 +53,4 @@ if len(nameParts) == 8 { "{{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}}/{{"{{"}}service_id{{"}}"}}") } return []*schema.ResourceData{d}, nil + diff --git a/mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..529fe042e33f --- /dev/null +++ b/mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,17 @@ +config := meta.(*transport_tpg.Config) + +// current import_formats can't import fields with forward slashes in their value +if err := tpgresource.ParseImportId([]string{ + "(?P.+)/deployedIndex/(?P[^/]+)", +}, d, config); err != nil { + return nil, err +} + +// Replace import id for the resource id +id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}index_endpoint{{"}}"}}/deployedIndex/{{"{{"}}deployed_index_id{{"}}"}}") +if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) +} +d.SetId(id) + +return []*schema.ResourceData{d}, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl index 06c94a1d0553..3b9f86dd0fb4 100644 --- a/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl @@ -10,18 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// We need to pretend IAP isn't there if it's disabled for Terraform to maintain -// BC behaviour with the handwritten resource. -v, ok := res["iap"] -if !ok || v == nil { - delete(res, "iap") - return res, nil -} -m := v.(map[string]interface{}) -if ok && m["enabled"] == false { - delete(res, "iap") -} - // Requests with consistentHash will error for specific values of // localityLbPolicy. However, the API will not remove it if the backend // service is updated to from supporting to non-supporting localityLbPolicy diff --git a/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl b/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl index a9d8d364bcc3..f674806c302a 100644 --- a/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl @@ -21,6 +21,19 @@ if paramMap, ok := res["params"]; ok { } } } + for k, v := range params { + switch v.(type) { + case []interface{}, map[string]interface{}: + value, err := json.Marshal(v) + if err != nil { + return nil, err + } + params[k] = string(value) + default: + params[k] = v + } + } + res["params"] = params } return res, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl index 560cd1243da7..e105509f502c 100644 --- a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl @@ -10,22 +10,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// We need to pretend IAP isn't there if it's disabled for Terraform to maintain -// BC behaviour with the handwritten resource. -v, ok := res["iap"] -if !ok || v == nil { - delete(res, "iap") - return res, nil -} -m := v.(map[string]interface{}) -if ok && m["enabled"] == false { - delete(res, "iap") -} {{ if ne $.TargetVersionName `ga` -}} // Since we add in a NONE subsetting policy, we need to remove it in some // cases for backwards compatibility with the config -v, ok = res["subsetting"] +v, ok := res["subsetting"] if ok && v != nil { subsetting := v.(map[string]interface{}) policy, ok := subsetting["policy"] diff --git a/mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..4775b5119c94 --- /dev/null +++ b/mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,20 @@ +v, ok := res["deployedIndexes"] +if !ok || v == nil { // CREATE + res["name"] = res["deployedIndexId"] + delete(res, "deployedIndexId") + return res, nil +} +dpIndex := make(map[string]interface{}) +for _, v := range v.([]interface{}) { + dpI := v.(map[string]interface{}) + if dpI["id"] == d.Get("deployed_index_id").(string) { + dpI["indexEndpoint"] = d.Get("index_endpoint") + dpI["deployedIndexId"] = d.Get("deployed_index_id") + dpIndex = dpI + break + } +} +if dpIndex == nil { + return nil, fmt.Errorf("Error: Deployment Index not Found") +} +return dpIndex, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl index 333664a0ba96..f92c155ea246 100644 --- a/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl @@ -10,24 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// The BackendService API's Update / PUT API is badly formed and behaves like -// a PATCH field for at least IAP. When sent a `null` `iap` field, the API -// doesn't disable an existing field. To work around this, we need to emulate -// the old Terraform behaviour of always sending the block (at both update and -// create), and force sending each subfield as empty when the block isn't -// present in config. - -iapVal := obj["iap"] -if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - obj["iap"] = data -} else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap -} - backendsRaw, ok := obj["backends"] if !ok { return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl b/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl index 4ec9950635f4..631b88f55ff6 100644 --- a/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl @@ -15,8 +15,24 @@ if !ok { paramMap = make(map[string]string) } -var params map[string]string -params = paramMap.(map[string]string) +params := map[string]interface{}{} + +for k, v := range paramMap.(map[string]string) { + var value interface{} + if err := json.Unmarshal([]byte(v), &value); err != nil { + // If the value is a string, don't convert it to anything. + params[k] = v + } else { + switch value.(type) { + case float64: + // If the value is a number, keep the string representation. + params[k] = v + default: + // If the value is another JSON type, keep the unmarshalled type as is. + params[k] = value + } + } +} for _, sp := range sensitiveParams { if auth, _ := d.GetOkExists("sensitive_params.0." + sp); auth != "" { diff --git a/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl index 168d4a65c5ef..bcf2aa79977f 100644 --- a/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl @@ -7,4 +7,12 @@ if _, ok := obj["certificateManagerCertificates"]; ok { obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } -return obj, nil \ No newline at end of file + +// Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` +// in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove +// the association. +if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil +} + +return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl index 168d4a65c5ef..bcf2aa79977f 100644 --- a/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl @@ -7,4 +7,12 @@ if _, ok := obj["certificateManagerCertificates"]; ok { obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } -return obj, nil \ No newline at end of file + +// Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` +// in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove +// the association. +if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil +} + +return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl index 40010b8ac301..12e1d789053f 100644 --- a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl @@ -10,23 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// The RegionBackendService API's Update / PUT API is badly formed and behaves like -// a PATCH field for at least IAP. When sent a `null` `iap` field, the API -// doesn't disable an existing field. To work around this, we need to emulate -// the old Terraform behaviour of always sending the block (at both update and -// create), and force sending each subfield as empty when the block isn't -// present in config. - -iapVal := obj["iap"] -if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - obj["iap"] = data -} else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap -} if d.Get("load_balancing_scheme").(string) == "EXTERNAL_MANAGED" || d.Get("load_balancing_scheme").(string) == "INTERNAL_MANAGED" { return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..487a61305df4 --- /dev/null +++ b/mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,7 @@ +req := make(map[string]interface{}) +obj["id"] = d.Get("deployed_index_id") +delete(obj, "deployedIndexId") +delete(obj, "name") +delete(obj, "indexEndpoint") +req["deployedIndex"] = obj +return req, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/go/workflow.go.tmpl b/mmv1/templates/terraform/encoders/go/workflow.go.tmpl index 4888f187bd15..511fde357259 100644 --- a/mmv1/templates/terraform/encoders/go/workflow.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/workflow.go.tmpl @@ -2,7 +2,12 @@ var ResName string if v, ok := d.GetOk("name"); ok { ResName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + ResName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + ResName = id.PrefixedUniqueId(prefix) + } } else { ResName = id.UniqueId() } diff --git a/mmv1/templates/terraform/examples/apphub_application_full.tf.erb b/mmv1/templates/terraform/examples/apphub_application_full.tf.erb index 42d9de520464..be22ec1d0b4d 100644 --- a/mmv1/templates/terraform/examples/apphub_application_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_application_full.tf.erb @@ -5,7 +5,7 @@ resource "google_apphub_application" "<%= ctx[:primary_resource_id] %>" { scope { type = "REGIONAL" } - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/apphub_service_full.tf.erb b/mmv1/templates/terraform/examples/apphub_service_full.tf.erb index e76a286dc23c..0a8403c3fbf7 100644 --- a/mmv1/templates/terraform/examples/apphub_service_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_service_full.tf.erb @@ -50,7 +50,7 @@ resource "google_apphub_service" "<%= ctx[:primary_resource_id] %>" { service_id = google_compute_forwarding_rule.forwarding_rule.name discovered_service = data.google_apphub_discovered_service.catalog-service.name display_name = "<%= ctx[:vars]['display_name'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb b/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb index bd92c81d0e7e..ffc71316e1b5 100644 --- a/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb @@ -49,7 +49,7 @@ resource "google_apphub_workload" "<%= ctx[:primary_resource_id] %>" { workload_id = google_compute_region_instance_group_manager.mig.name discovered_workload = data.google_apphub_discovered_workload.catalog-workload.name display_name = "<%= ctx[:vars]['display_name'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb index 0b6cee4f9d65..5d0145d426f7 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" } diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb index cdbcd495d5c9..21b61833f8de 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" cleanup_policy_dry_run = false cleanup_policies { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb index 84e23ef06179..e718d64f9b29 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" docker_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb index 67a22125e500..3ec0b7b3aa5b 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb index fce0829c3a2e..e1810d2357b8 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "APT" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb index ef515c550631..94942ac8d073 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb index 00f616fc8306..fe47b73a605d 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb index 9b07a1e78cdc..e6f31910b16b 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "MAVEN" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb index 81d3e70b6021..cde2d86b8336 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "NPM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb index d9cbc61d6d05..dd633f41199c 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "PYTHON" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb index 1e631e677059..0896665e8af0 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "YUM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb index 4d20c408b80b..35440ca93937 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb @@ -1,14 +1,14 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>-upstream-1" { location = "us-central1" repository_id = "<%= ctx[:vars]['upstream_repository_id'] %>-1" - description = "<%= ctx[:vars]['upstream_description'] %> 1" + description = "<%= ctx[:vars]['upstream_desc'] %> 1" format = "DOCKER" } resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>-upstream-2" { location = "us-central1" repository_id = "<%= ctx[:vars]['upstream_repository_id'] %>-2" - description = "<%= ctx[:vars]['upstream_description'] %> 2" + description = "<%= ctx[:vars]['upstream_desc'] %> 2" format = "DOCKER" } @@ -16,7 +16,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %> depends_on = [] location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "VIRTUAL_REPOSITORY" virtual_repository_config { diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb index 987d8f978686..691eddb35fe1 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb @@ -2,5 +2,5 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" } diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb index 5f76a93d4d64..456ba8a564ed 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" sharing_environment_config { dcr_exchange_config {} } diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb index 7099cbec2506..3c6fefab4b50 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" } resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] %>" { @@ -10,7 +10,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] data_exchange_id = google_bigquery_analytics_hub_data_exchange.<%= ctx[:primary_resource_id] %>.data_exchange_id listing_id = "<%= ctx[:vars]['listing_id'] %>" display_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" bigquery_dataset { dataset = google_bigquery_dataset.<%= ctx[:primary_resource_id] %>.id @@ -20,6 +20,6 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { dataset_id = "<%= ctx[:vars]['listing_id'] %>" friendly_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" location = "US" } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb index 399a0d91de47..afa60930cc38 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" sharing_environment_config { dcr_exchange_config {} } @@ -13,7 +13,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] data_exchange_id = google_bigquery_analytics_hub_data_exchange.<%= ctx[:primary_resource_id] %>.data_exchange_id listing_id = "<%= ctx[:vars]['listing_id'] %>" display_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" bigquery_dataset { dataset = google_bigquery_dataset.<%= ctx[:primary_resource_id] %>.id @@ -30,7 +30,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { dataset_id = "<%= ctx[:vars]['listing_id'] %>" friendly_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" location = "US" } diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb index 7c3ba2242db1..6f73c97903e3 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" } resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] %>" { @@ -10,7 +10,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] data_exchange_id = google_bigquery_analytics_hub_data_exchange.<%= ctx[:primary_resource_id] %>.data_exchange_id listing_id = "<%= ctx[:vars]['listing_id'] %>" display_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" bigquery_dataset { dataset = google_bigquery_dataset.<%= ctx[:primary_resource_id] %>.id @@ -25,6 +25,6 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { dataset_id = "<%= ctx[:vars]['listing_id'] %>" friendly_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" location = "US" } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl new file mode 100644 index 000000000000..17561629da0e --- /dev/null +++ b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl @@ -0,0 +1,36 @@ +resource "google_access_context_manager_service_perimeter" "storage-perimeter" { + parent = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}" + name = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}/serviceperimeters/storage-perimeter" + title = "Storage Perimeter" + spec { + restricted_services = ["storage.googleapis.com"] + } + lifecycle { + ignore_changes = [status[0].resources] + } +} + +resource "google_access_context_manager_service_perimeter_dry_run_egress_policy" "egress_policy" { + perimeter = "${google_access_context_manager_service_perimeter.storage-perimeter.name}" + egress_from { + identity_type = "ANY_IDENTITY" + } + egress_to { + resources = ["*"] + operations { + service_name = "bigquery.googleapis.com" + method_selectors { + method = "*" + } + } + } + lifecycle { + create_before_destroy = true + } +} + + +resource "google_access_context_manager_access_policy" "access-policy" { + parent = "organizations/123456789" + title = "Storage Policy" +} diff --git a/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl new file mode 100644 index 000000000000..df981c843515 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl @@ -0,0 +1,39 @@ +resource "google_access_context_manager_service_perimeter" "storage-perimeter" { + parent = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}" + name = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}/serviceperimeters/storage-perimeter" + title = "Storage Perimeter" + status { + restricted_services = ["storage.googleapis.com"] + } + lifecycle { + ignore_changes = [status[0].resources] + } +} + +resource "google_access_context_manager_service_perimeter_dry_run_ingress_policy" "ingress_policy" { + perimeter = "${google_access_context_manager_service_perimeter.storage-perimeter.name}" + ingress_from { + identity_type = "any_identity" + sources { + access_level = "*" + } + } + ingress_to { + resources = ["*"] + operations { + service_name = "bigquery.googleapis.com" + method_selectors { + method = "*" + } + } + } + lifecycle { + create_before_destroy = true + } +} + + +resource "google_access_context_manager_access_policy" "access-policy" { + parent = "organizations/123456789" + title = "Storage Policy" +} diff --git a/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl index 25d7b18d8c8a..665ad26f8754 100644 --- a/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_active_directory_domain" "ad-domain" { domain_name = "{{index $.Vars "domain_name"}}.org.com" locations = ["us-central1"] - reserved_ip_range = "192.168.255.0/24" + reserved_ip_range = "192.168.255.0/24" deletion_protection = false } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl index c16b4eab99d5..c1f9cf61d676 100644 --- a/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl @@ -5,4 +5,5 @@ resource "google_active_directory_domain_trust" "ad-domain-trust" { trust_direction = "OUTBOUND" trust_type = "FOREST" trust_handshake_secret = "Testing1!" + deletion_protection = false } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl index 51c61dea4d78..e18ce41918ff 100644 --- a/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl @@ -3,6 +3,7 @@ resource "google_active_directory_peering" "ad-domain-peering" { domain_resource = google_active_directory_domain.ad-domain.name peering_id = "ad-domain-peering" authorized_network = google_compute_network.peered-network.id + deletion_protection = false labels = { foo = "bar" } @@ -14,6 +15,7 @@ resource "google_active_directory_domain" "ad-domain" { locations = ["us-central1"] reserved_ip_range = "192.168.255.0/24" authorized_networks = [google_compute_network.source-network.id] + deletion_protection = false } resource "google_compute_network" "peered-network" { diff --git a/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl index ad96d25ec95e..7713276440e6 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl @@ -45,7 +45,6 @@ resource "google_alloydb_cluster" "restored_via_pitr" { network_config { network = data.google_compute_network.default.id } - restore_continuous_backup_source { cluster = google_alloydb_cluster.{{$.PrimaryResourceId}}.name point_in_time = "2023-08-03T19:19:00.094Z" diff --git a/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl index 7c2b09d69979..0ca5146f0ac8 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl @@ -14,7 +14,6 @@ resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { network_config { network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_name"}}" } diff --git a/mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl new file mode 100644 index 000000000000..d2d4712d0ae7 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl @@ -0,0 +1,21 @@ +resource "google_alloydb_instance" "{{$.PrimaryResourceId}}" { + cluster = google_alloydb_cluster.{{$.PrimaryResourceId}}.name + instance_id = "{{index $.Vars "alloydb_instance_name"}}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { + cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" + location = "us-central1" + + initial_user { + password = "{{index $.Vars "alloydb_cluster_name"}}" + } + psc_config { + psc_enabled = true + } +} diff --git a/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl index 8b202bad443e..8e2eeb44e88d 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl @@ -22,7 +22,7 @@ resource "google_alloydb_cluster" "secondary" { cluster_id = "{{index $.Vars "alloydb_secondary_cluster_name"}}" location = "us-east1" network_config { - network = google_compute_network.default.id + network = data.google_compute_network.default.id } cluster_type = "SECONDARY" diff --git a/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl index 6ae9ee261194..b91e92abddce 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl @@ -10,9 +10,8 @@ resource "google_alloydb_cluster" "default" { cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" location = "us-central1" network_config { - network = google_compute_network.default.id + network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_pass"}}" } diff --git a/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl index a46757d8a79e..01d0f92dd77c 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl @@ -10,7 +10,6 @@ resource "google_alloydb_cluster" "default" { network_config { network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_pass"}}" } diff --git a/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl index e3b2b4743180..eebb03454761 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl @@ -10,7 +10,6 @@ resource "google_alloydb_cluster" "default" { network_config { network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_pass"}}" } diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl index 2d122300c59d..aa204c22c688 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl @@ -5,6 +5,7 @@ resource "google_project" "project" { name = "tf-test%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl index 61d723d0897f..c7b3f6bf134c 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl @@ -5,6 +5,7 @@ resource "google_project" "project" { name = "tf-test%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl index b1cc55fb2abc..f70f639f2a54 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl @@ -3,6 +3,7 @@ resource "google_project" "project" { name = "tf-test-%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl index d08b3601898c..4265a85236f8 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl @@ -3,6 +3,7 @@ resource "google_project" "project" { name = "tf-test-%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl index efb5464434f5..daf707b451c5 100644 --- a/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl @@ -5,7 +5,7 @@ resource "google_apphub_application" "{{$.PrimaryResourceId}}" { scope { type = "REGIONAL" } - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl index e9a8ee7e82ef..31a52f86769e 100644 --- a/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl @@ -50,7 +50,7 @@ resource "google_apphub_service" "{{$.PrimaryResourceId}}" { service_id = google_compute_forwarding_rule.forwarding_rule.name discovered_service = data.google_apphub_discovered_service.catalog-service.name display_name = "{{index $.Vars "display_name"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl index ca13f5699c30..2a51cfb09d59 100644 --- a/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl @@ -49,7 +49,7 @@ resource "google_apphub_workload" "{{$.PrimaryResourceId}}" { workload_id = google_compute_region_instance_group_manager.mig.name discovered_workload = data.google_apphub_discovered_workload.catalog-workload.name display_name = "{{index $.Vars "display_name"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl index 7a63233ab7a2..37687acc6960 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" } diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl index 96a5b8a3bdb7..8853964941e1 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" cleanup_policy_dry_run = false cleanup_policies { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl index 6c8e6181bdb4..b7c1bd729542 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" docker_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl index 255cacab7263..a91876f75950 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl index dae742f1bad9..ddffa4d557c1 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "APT" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl index 562bff87fff5..17a3951db7f6 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl index 92b0d4684730..e005c0e8ed59 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl index 1f3d5696a7d2..e259ae40a08e 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "MAVEN" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl index 5bd553e75a8b..8606714a3644 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "NPM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl index 2a0c1ec21685..fa58182a7b44 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "PYTHON" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl index b8ac1ab35a0a..dd2bba09cf8f 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "YUM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl index 36e16607d05c..88ce7362dafb 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl @@ -1,14 +1,14 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}-upstream-1" { location = "us-central1" repository_id = "{{index $.Vars "upstream_repository_id"}}-1" - description = "{{index $.Vars "upstream_description"}} 1" + description = "{{index $.Vars "upstream_desc"}} 1" format = "DOCKER" } resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}-upstream-2" { location = "us-central1" repository_id = "{{index $.Vars "upstream_repository_id"}}-2" - description = "{{index $.Vars "upstream_description"}} 2" + description = "{{index $.Vars "upstream_desc"}} 2" format = "DOCKER" } @@ -16,7 +16,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { depends_on = [] location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "VIRTUAL_REPOSITORY" virtual_repository_config { diff --git a/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl b/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl index 4ce60c795706..f2da2681ca83 100644 --- a/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl @@ -3,6 +3,7 @@ resource "google_compute_backend_service" "{{$.PrimaryResourceId}}" { protocol = "HTTP" load_balancing_scheme = "EXTERNAL" iap { + enabled = true oauth2_client_id = "abc" oauth2_client_secret = "xyz" } diff --git a/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl b/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl index 17fe9e39abc9..f40a7b76345c 100644 --- a/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl @@ -19,7 +19,15 @@ resource "google_compute_backend_service" "{{$.PrimaryResourceId}}" { } } outlier_detection { - consecutive_errors = 2 + consecutive_errors = 2 + consecutive_gateway_failure = 5 + enforcing_consecutive_errors = 100 + enforcing_consecutive_gateway_failure = 0 + enforcing_success_rate = 100 + max_ejection_percent = 10 + success_rate_minimum_hosts = 5 + success_rate_request_volume = 100 + success_rate_stdev_factor = 1900 } } diff --git a/mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl b/mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl new file mode 100644 index 000000000000..a2bf023066c1 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl @@ -0,0 +1,18 @@ +resource "google_backup_dr_backup_vault" "{{$.PrimaryResourceId}}" { + provider = google-beta + location = "us-central1" + backup_vault_id = "{{index $.Vars "backup_vault_id"}}" + description = "This is a second backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "100000s" + labels = { + foo = "bar1" + bar = "baz1" + } + annotations = { + annotations1 = "bar1" + annotations2 = "baz1" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl index 4feac1f156c8..54cb026c6177 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl @@ -2,5 +2,5 @@ resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" location = "US" data_exchange_id = "{{index $.Vars "data_exchange_id"}}" display_name = "{{index $.Vars "data_exchange_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" } diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl new file mode 100644 index 000000000000..bc26d58a9a52 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl @@ -0,0 +1,9 @@ +resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" { + location = "US" + data_exchange_id = "{{index $.Vars "data_exchange_id"}}" + display_name = "{{index $.Vars "data_exchange_id"}}" + description = "{{index $.Vars "desc"}}" + sharing_environment_config { + dcr_exchange_config {} + } +} diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl index 6fc1e529a132..b9900df469f8 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" location = "US" data_exchange_id = "{{index $.Vars "data_exchange_id"}}" display_name = "{{index $.Vars "data_exchange_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" } resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { @@ -10,7 +10,7 @@ resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { data_exchange_id = google_bigquery_analytics_hub_data_exchange.{{$.PrimaryResourceId}}.data_exchange_id listing_id = "{{index $.Vars "listing_id"}}" display_name = "{{index $.Vars "listing_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" bigquery_dataset { dataset = google_bigquery_dataset.{{$.PrimaryResourceId}}.id @@ -20,6 +20,6 @@ resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { dataset_id = "{{index $.Vars "listing_id"}}" friendly_name = "{{index $.Vars "listing_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" location = "US" } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl new file mode 100644 index 000000000000..1dbf2705cf73 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl @@ -0,0 +1,60 @@ +resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" { + location = "US" + data_exchange_id = "{{index $.Vars "data_exchange_id"}}" + display_name = "{{index $.Vars "data_exchange_id"}}" + description = "{{index $.Vars "desc"}}" + sharing_environment_config { + dcr_exchange_config {} + } +} + +resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { + location = "US" + data_exchange_id = google_bigquery_analytics_hub_data_exchange.{{$.PrimaryResourceId}}.data_exchange_id + listing_id = "{{index $.Vars "listing_id"}}" + display_name = "{{index $.Vars "listing_id"}}" + description = "{{index $.Vars "desc"}}" + + bigquery_dataset { + dataset = google_bigquery_dataset.{{$.PrimaryResourceId}}.id + selected_resources { + table = google_bigquery_table.{{$.PrimaryResourceId}}.id + } + } + + restricted_export_config { + enabled = true + } +} + +resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { + dataset_id = "{{index $.Vars "listing_id"}}" + friendly_name = "{{index $.Vars "listing_id"}}" + description = "{{index $.Vars "desc"}}" + location = "US" +} + +resource "google_bigquery_table" "{{$.PrimaryResourceId}}" { + deletion_protection = false + table_id = "{{index $.Vars "listing_id"}}" + dataset_id = google_bigquery_dataset.{{$.PrimaryResourceId}}.dataset_id + schema = <" { redis_configs = { maxmemory-policy = "volatile-ttl" } - deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled'] == 'true' %> + deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled']%> zone_distribution_config { mode = "MULTI_ZONE" diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb index 1e3eb14dd2ef..d03efb412529 100644 --- a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb +++ b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb @@ -20,7 +20,7 @@ resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { } } } - deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled'] == 'true' %> + deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled']%> depends_on = [ google_network_connectivity_service_connection_policy.default ] diff --git a/mmv1/templates/terraform/examples/tpu_node_full.tf.erb b/mmv1/templates/terraform/examples/tpu_node_full.tf.erb index cebe528b24ac..2ae3e8a2a597 100644 --- a/mmv1/templates/terraform/examples/tpu_node_full.tf.erb +++ b/mmv1/templates/terraform/examples/tpu_node_full.tf.erb @@ -23,6 +23,7 @@ resource "google_tpu_node" "<%= ctx[:primary_resource_id] %>" { use the default network in order to still demonstrate using as many fields as possible on the resource. -%> + network = google_service_networking_connection.private_service_connection.network labels = { diff --git a/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb b/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb index 3be36ecb4d64..8ed22bd87a51 100644 --- a/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb +++ b/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb @@ -10,22 +10,11 @@ resource "google_tpu_node" "<%= ctx[:primary_resource_id] %>" { accelerator_type = "v3-8" -<%#- - We previously used the first available version from the - google_tpu_tensorflow_versions data source. However, this started to return a - random set of versions which caused our tests to occasionally fail, so we pin - tensorflow_version to a specific version so that our tests pass reliably. --%> tensorflow_version = "2.10.0" description = "Terraform Google Provider test TPU" use_service_networking = true -<%#- - We previously used a separate network resource here, but TPUs only allow using 50 - different network names, ever. This caused our tests to start failing, so just - use the default network in order to still demonstrate using as many fields as - possible on the resource. --%> + network = data.google_compute_network.network.id labels = { diff --git a/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl b/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl index 589b25919642..e23b16ed87ec 100644 --- a/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl +++ b/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl @@ -7,11 +7,11 @@ Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, diff --git a/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb b/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb index 7b2623648cb7..8cd2eddbf93a 100644 --- a/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb +++ b/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb @@ -1,2 +1,3 @@ + service_name = google_endpoints_service.endpoints_service.service_name consumer_project = "%{consumer_project}" \ No newline at end of file diff --git a/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl b/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl index f86c332a157d..3a72050de8b7 100644 --- a/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl +++ b/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl @@ -29,18 +29,11 @@ privateCloudPollRead := func(d *schema.ResourceData, meta interface{}) transport if err != nil { return res, err } - // if resource exists but is marked for deletion - log.Printf("[DEBUG] Fetching state of the private cloud.") - v, ok := res["state"] - if ok && v.(string) == "DELETED" { - log.Printf("[DEBUG] The Private cloud has been successfully marked for delayed deletion.") - return nil, nil - } return res, nil } } -err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), transport_tpg.PollCheckForAbsence, "Deleting {{$.Name}}", d.Timeout(schema.TimeoutDelete), 10) +err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), pollCheckForPrivateCloudAbsence, "Deleting {{$.Name}}", d.Timeout(schema.TimeoutDelete), 10) if err != nil { return fmt.Errorf("Error waiting to delete PrivateCloud: %s", err) -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl b/mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl new file mode 100644 index 000000000000..d7d3e0d8e21c --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl @@ -0,0 +1,12 @@ +idParts := strings.Split(d.Id(), "/") +if len(idParts) != 4 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected folders/{{"{{"}}folder{{"}}"}}/notificationConfigs/{{"{{"}}config_id{{"}}"}}", d.Id()) +} + +if err := d.Set("folder", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting folder: %s", err) +} + +if err := d.Set("config_id", idParts[3]); err != nil { + return nil, fmt.Errorf("error setting config_id: %s", err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl b/mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl new file mode 100644 index 000000000000..10ebbe8b3032 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl @@ -0,0 +1,12 @@ +idParts := strings.Split(d.Id(), "/") +if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected folders/{{"{{"}}folder{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/notificationConfigs/{{"{{"}}config_id{{"}}"}}", d.Id()) +} + +if err := d.Set("folder", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting folder: %s", err) +} + +if err := d.Set("config_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting config_id: %s", err) +} diff --git a/mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl b/mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl new file mode 100644 index 000000000000..6449eb1851f9 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl @@ -0,0 +1,12 @@ +idParts := strings.Split(d.Id(), "/") +if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected organizations/{{"{{"}}organization{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/bigQueryExports/{{"{{"}}big_query_export_id{{"}}"}}", d.Id()) +} + +if err := d.Set("organization", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting organization: %s", err) +} + +if err := d.Set("big_query_export_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting big_query_export_id: %s", err) +} diff --git a/mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl b/mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl new file mode 100644 index 000000000000..9fd468471617 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl @@ -0,0 +1,72 @@ +if v, ok := d.GetOk("send_secondary_ip_range_if_empty"); ok && v.(bool) { + if sv, ok := d.GetOk("secondary_ip_range"); ok { + configValue := d.GetRawConfig().GetAttr("secondary_ip_range") + stateValue := sv.([]interface{}) + if configValue.LengthInt() == 0 && len(stateValue) != 0 { + log.Printf("[DEBUG] Sending empty secondary_ip_range in update") + obj := make(map[string]interface{}) + obj["secondaryIpRanges"] = make([]interface{}, 0) + + // The rest is the same as the secondary_ip_range generated update code + // without the secondaryIpRangesProp logic + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/subnetworks/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/subnetworks/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl b/mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl new file mode 100644 index 000000000000..7e9f036c027b --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl @@ -0,0 +1 @@ +obj["use_explicit_dry_run_spec"] = true diff --git a/mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl b/mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl new file mode 100644 index 000000000000..2e0de80e70e8 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl @@ -0,0 +1,15 @@ +// Check if the project exists in a deleted state +pcMarkedForDeletion, err := isPrivateCloudInDeletedState(config, d, billingProject, userAgent) +if err != nil { + return fmt.Errorf("Error checking if Private Cloud exists and is marked for deletion: %s", err) +} +if pcMarkedForDeletion { + log.Printf("[DEBUG] Private Cloud exists and is marked for deletion. Triggering UNDELETE of the Private Cloud.\n") + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}VmwareengineBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/privateClouds/{{"{{"}}name{{"}}"}}:undelete") + if err != nil { + return err + } + obj = make(map[string]interface{}) +} else { + log.Printf("[DEBUG] Private Cloud is not found to be marked for deletion. Triggering CREATE of the Private Cloud.\n") +} diff --git a/mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl new file mode 100644 index 000000000000..38c30189ea27 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl @@ -0,0 +1,3 @@ +if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy job without setting deletion_protection=false and running `terraform apply`") +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl new file mode 100644 index 000000000000..444ac1bfe13c --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl @@ -0,0 +1,3 @@ +if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy service without setting deletion_protection=false and running `terraform apply`") +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl b/mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl new file mode 100644 index 000000000000..e182b8aa347c --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl @@ -0,0 +1,5 @@ +// Add force=true query param to force deletion of private connection sub resources like Routes +url, err = transport_tpg.AddQueryParams(url, map[string]string{"force": strconv.FormatBool(true)}) +if err != nil { +return err +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..22b6172e267e --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,3 @@ +obj = map[string]interface{}{ + "deployedIndexId": d.Get("deployed_index_id"), +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl b/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl index 603e77f32acc..307208274cb2 100644 --- a/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl +++ b/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl @@ -30,6 +30,16 @@ if newRouting != oldRouting { } } } + +_, hasStandardIsolation := obj["standardIsolation"] +_, hasDataBoostIsolationReadOnly := obj["dataBoostIsolationReadOnly"] +if hasStandardIsolation && hasDataBoostIsolationReadOnly { + // Due to the "conflicts" both fields should be present only if neither was + // previously specified and the user is now manually adding dataBoostIsolationReadOnly. + delete(obj, "standardIsolation") + updateMask = append(updateMask, "dataBoostIsolationReadOnly") +} + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) diff --git a/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl b/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl index 49e42175c1ac..f0ad51601fd6 100644 --- a/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl +++ b/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl @@ -8,7 +8,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. */}} + limitations under the License. +*/ -}} if obj["statements"] != nil { if len(obj["statements"].([]string)) == 0 { diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index d7cb4c524fee..97a860442891 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -21,7 +21,7 @@ (Deprecated) {{- end}} {{- end }} - {{- $.ResourceMetadata.FormatDocDescription $.Description true -}} + {{- $.ResourceMetadata.FormatDocDescription $.GetDescription true -}} {{- if and (and ($.IsA "Array") ($.ItemType.IsA "Enum")) (and (not $.Output) (not $.ItemType.SkipDocsValues))}} {{- if $.ItemType.DefaultValue }} Default value is `{{ $.ItemType.DefaultValue }}`. diff --git a/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl b/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl index 291a709b0238..85d3d94dc062 100644 --- a/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl +++ b/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl @@ -8,7 +8,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. */}} + limitations under the License. +*/ -}} if obj["versionRetentionPeriod"] != nil || obj["extraStatements"] != nil { old, new := d.GetChange("ddl") diff --git a/mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..ef56b550bd2c --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,3 @@ +obj["id"] = obj["deployedIndexId"] +delete(obj, "deployedIndexId") +return obj, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 09100c79385b..ee67f6e95e06 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -39,6 +39,9 @@ exclude: <%= object.exclude %> <% unless object.readonly.nil? -%> readonly: <%= object.readonly %> <% end -%> +<% unless !object.skip_attribution_label -%> +skip_attribution_label: <%= object.skip_attribution_label %> +<% end -%> <% #references blocks -%> @@ -415,7 +418,7 @@ custom_code: <% end -%> <% custom_diff = object.custom_diff.reject { - |cdiff| cdiff == "tpgresource.SetLabelsDiff" || cdiff == "tpgresource.SetMetadataLabelsDiff" || cdiff == "tpgresource.SetAnnotationsDiff" || cdiff == "tpgresource.SetMetadataAnnotationsDiff" + |cdiff| cdiff == "tpgresource.SetLabelsDiff" || cdiff == "tpgresource.SetLabelsDiffWithoutAttributionLabel" || cdiff == "tpgresource.SetMetadataLabelsDiff" || cdiff == "tpgresource.SetAnnotationsDiff" || cdiff == "tpgresource.SetMetadataAnnotationsDiff" } -%> <% unless custom_diff.empty? -%> diff --git a/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl b/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl index 51e0e4947906..eccc72fdf7e5 100644 --- a/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl +++ b/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl @@ -49,10 +49,7 @@ func CheckDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourc if _, ok := ignoreFields[k]; ok { continue } - if _, ok := ignoreFields["labels.%"]; ok && strings.HasPrefix(k, "labels.") { - continue - } - if _, ok := ignoreFields["terraform_labels.%"]; ok && strings.HasPrefix(k, "terraform_labels.") { + if strings.HasPrefix(k, "labels.") || strings.HasPrefix(k, "terraform_labels.") || strings.HasPrefix(k, "effective_labels.") { continue } if k == "%" { diff --git a/mmv1/third_party/terraform/go/go.mod b/mmv1/third_party/terraform/go/go.mod index 3162439eec0b..3d389c3577ad 100644 --- a/mmv1/third_party/terraform/go/go.mod +++ b/mmv1/third_party/terraform/go/go.mod @@ -117,4 +117,4 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect -) +) \ No newline at end of file diff --git a/mmv1/third_party/terraform/go/main.go.tmpl b/mmv1/third_party/terraform/go/main.go.tmpl index d044f11fba2a..8bb1fb9fd4de 100644 --- a/mmv1/third_party/terraform/go/main.go.tmpl +++ b/mmv1/third_party/terraform/go/main.go.tmpl @@ -12,16 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-google/google/fwprovider" "github.com/hashicorp/terraform-provider-google/google/provider" - ver "github.com/hashicorp/terraform-provider-google/version" -) - -var ( - // these will be set by the goreleaser configuration - // to appropriate values for the compiled binary - version string = ver.ProviderVersion - - // goreleaser can also pass the specific commit if you want - // commit string = "" ) func main() { diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl index 9a45445de045..33fa97849f1d 100644 --- a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl @@ -28,7 +28,9 @@ func TestAccAccessContextManager(t *testing.T) { "access_levels": testAccAccessContextManagerAccessLevels_basicTest, "access_level_condition": testAccAccessContextManagerAccessLevelCondition_basicTest, "service_perimeter_egress_policy": testAccAccessContextManagerServicePerimeterEgressPolicy_basicTest, + "service_perimeter_dry_run_egress_policy": testAccAccessContextManagerServicePerimeterDryRunEgressPolicy_basicTest, "service_perimeter_ingress_policy": testAccAccessContextManagerServicePerimeterIngressPolicy_basicTest, + "service_perimeter_dry_run_ingress_policy": testAccAccessContextManagerServicePerimeterDryRunIngressPolicy_basicTest, "service_perimeters": testAccAccessContextManagerServicePerimeters_basicTest, "gcp_user_access_binding": testAccAccessContextManagerGcpUserAccessBinding_basicTest, "authorized_orgs_desc": testAccAccessContextManagerAuthorizedOrgsDesc_basicTest, diff --git a/mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl b/mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl new file mode 100644 index 000000000000..141be1e2be7d --- /dev/null +++ b/mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl @@ -0,0 +1,445 @@ +package appengine_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccAppEngineFlexibleAppVersion_update(t *testing.T) { + t.Skip("https://github.com/hashicorp/terraform-provider-google/issues/18239") + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckAppEngineFlexibleAppVersionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAppEngineFlexibleAppVersion_python(context), + }, + { + ResourceName: "google_app_engine_flexible_app_version.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"env_variables", "deployment", "entrypoint", "service", "noop_on_destroy"}, + }, + { + Config: testAccAppEngineFlexibleAppVersion_pythonUpdate(context), + }, + { + ResourceName: "google_app_engine_flexible_app_version.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"env_variables", "deployment", "entrypoint", "service", "delete_service_on_destroy"}, + }, + }, + }) +} + +func testAccAppEngineFlexibleAppVersion_python(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "my_project" { + provider = google-beta + name = "tf-test-appeng-flex%{random_suffix}" + project_id = "tf-test-appeng-flex%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + deletion_policy = "DELETE" +} + +resource "google_project_service" "compute" { + provider = google-beta + project = google_project.my_project.project_id + service = "compute.googleapis.com" + + disable_dependent_services = false +} + +resource "google_project_service" "appengineflex" { + provider = google-beta + project = google_project.my_project.project_id + service = "appengineflex.googleapis.com" + + disable_dependent_services = false + depends_on = [google_project_service.compute] +} + +resource "google_compute_network" "network" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "subnetwork" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + region = "us-central1" + network = google_compute_network.network.id + ip_cidr_range = "10.0.0.0/16" + private_ip_google_access = true +} + +resource "google_app_engine_application" "app" { + provider = google-beta + project = google_project.my_project.project_id + location_id = "us-central" +} + +resource "google_project_iam_member" "gae_api" { + provider = google-beta + project = google_project_service.appengineflex.project + role = "roles/compute.networkUser" + member = "serviceAccount:service-${google_project.my_project.number}@gae-api-prod.google.com.iam.gserviceaccount.com" +} + +resource "google_app_engine_standard_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "default" + runtime = "python38" + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + } + + env_variables = { + port = "8000" + } + + noop_on_destroy = true +} + +resource "google_app_engine_flexible_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "custom" + runtime = "python" + + runtime_api_version = "1" + + resources { + cpu = 1 + memory_gb = 0.5 + disk_gb = 10 + } + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + flexible_runtime_settings { + operating_system = "ubuntu22" + runtime_version = "3.11" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + + files { + name = "app.yaml" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.yaml.name}" + } + } + + liveness_check { + path = "alive" + } + + readiness_check { + path = "ready" + } + + env_variables = { + port = "8000" + } + + network { + name = google_compute_network.network.name + subnetwork = google_compute_subnetwork.subnetwork.name + instance_ip_mode = "EXTERNAL" + } + + instance_class = "B1" + + manual_scaling { + instances = 1 + } + + noop_on_destroy = true + + depends_on = [google_app_engine_standard_app_version.foo] +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = google_project.my_project.project_id + name = "tf-test-%{random_suffix}-flex-ae-bucket" + location = "US" +} + +resource "google_storage_bucket_object" "yaml" { + provider = google-beta + name = "app.yaml" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/app.yaml" +} + +resource "google_storage_bucket_object" "requirements" { + provider = google-beta + name = "requirements.txt" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/requirements.txt" +} + +resource "google_storage_bucket_object" "main" { + provider = google-beta + name = "main.py" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/main.py" +}`, context) +} + +func testAccAppEngineFlexibleAppVersion_pythonUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "my_project" { + provider = google-beta + name = "tf-test-appeng-flex%{random_suffix}" + project_id = "tf-test-appeng-flex%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + deletion_policy = "DELETE" +} + +resource "google_project_service" "compute" { + provider = google-beta + project = google_project.my_project.project_id + service = "compute.googleapis.com" + + disable_dependent_services = false +} + +resource "google_project_service" "appengineflex" { + provider = google-beta + project = google_project.my_project.project_id + service = "appengineflex.googleapis.com" + + disable_dependent_services = false + depends_on = [google_project_service.compute] +} + +resource "google_compute_network" "network" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "subnetwork" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + region = "us-central1" + network = google_compute_network.network.id + ip_cidr_range = "10.0.0.0/16" + private_ip_google_access = true +} + +resource "google_app_engine_application" "app" { + provider = google-beta + project = google_project.my_project.project_id + location_id = "us-central" +} + +resource "google_project_iam_member" "gae_api" { + provider = google-beta + project = google_project_service.appengineflex.project + role = "roles/compute.networkUser" + member = "serviceAccount:service-${google_project.my_project.number}@gae-api-prod.google.com.iam.gserviceaccount.com" +} + +resource "google_app_engine_standard_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "default" + runtime = "python38" + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + } + + env_variables = { + port = "8000" + } + + noop_on_destroy = true +} + +resource "google_app_engine_flexible_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "custom" + runtime = "python" + + runtime_api_version = "1" + + resources { + cpu = 1 + memory_gb = 1 + disk_gb = 10 + } + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + flexible_runtime_settings { + operating_system = "ubuntu22" + runtime_version = "3.11" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + + files { + name = "app.yaml" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.yaml.name}" + } + } + + liveness_check { + path = "" + } + + readiness_check { + path = "" + } + + env_variables = { + port = "8000" + } + + network { + name = google_compute_network.network.name + subnetwork = google_compute_subnetwork.subnetwork.name + instance_ip_mode = "INTERNAL" + } + + instance_class = "B2" + + manual_scaling { + instances = 2 + } + + delete_service_on_destroy = true + + depends_on = [google_app_engine_standard_app_version.foo] +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = google_project.my_project.project_id + name = "tf-test-%{random_suffix}-flex-ae-bucket" + location = "US" +} + +resource "google_storage_bucket_object" "yaml" { + provider = google-beta + name = "app.yaml" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/app.yaml" +} + +resource "google_storage_bucket_object" "requirements" { + provider = google-beta + name = "requirements.txt" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/requirements.txt" +} + +resource "google_storage_bucket_object" "main" { + provider = google-beta + name = "main.py" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/main.py" +}`, context) +} + +// Remove when generated test is enabled +func testAccCheckAppEngineFlexibleAppVersionDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_app_engine_flexible_app_version" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + log.Printf("[DEBUG] Ignoring destroy during test") + } + + return nil + } +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl b/mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl new file mode 100644 index 000000000000..d9d95562a523 --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl @@ -0,0 +1,97 @@ +package backupdr_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" + "time" +) + +func TestAccBackupDRBackupVault_fullUpdate(t *testing.T) { + t.Parallel() + + timeNow := time.Now().UTC() + referenceTime := time.Date(timeNow.Year(), timeNow.Month(), timeNow.Day(), 0, 0, 0, 0, time.UTC) + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "effective_time": referenceTime.Add(24 * time.Hour).Format(time.RFC3339), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccBackupDRBackupVault_fullCreate(context), + }, + { + ResourceName: "google_backup_dr_backup_vault.backup-vault-test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"allow_missing", "annotations", "backup_vault_id", "force_delete", "force_update", "labels", "location", "terraform_labels"}, + }, + { + Config: testAccBackupDRBackupVault_fullUpdate(context), + }, + { + ResourceName: "google_backup_dr_backup_vault.backup-vault-test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"allow_missing", "annotations", "backup_vault_id", "force_delete", "force_update", "labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccBackupDRBackupVault_fullCreate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_backup_dr_backup_vault" "backup-vault-test" { + provider = google-beta + location = "us-central1" + backup_vault_id = "tf-test-backup-vault-test%{random_suffix}" + description = "This is a backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "100000s" + effective_time = "%{effective_time}" + labels = { + foo = "bar" + bar = "baz" + } + annotations = { + annotations1 = "bar" + annotations2 = "baz" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} +`, context) +} + +func testAccBackupDRBackupVault_fullUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_backup_dr_backup_vault" "backup-vault-test" { + provider = google-beta + location = "us-central1" + backup_vault_id = "tf-test-backup-vault-test%{random_suffix}" + description = "This is a second backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "200000s" + effective_time = "%{effective_time}" + labels = { + foo = "bar1" + bar = "baz1" + } + annotations = { + annotations1 = "bar1" + annotations2 = "baz1" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} +`, context) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl deleted file mode 100644 index 880e625ff419..000000000000 --- a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 -package bigquery_test - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/envvar" - "google.golang.org/api/bigquery/v2" -) - -func TestAccBigQueryDataset_basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_withoutLabels(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccBigQueryDataset(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - // The labels field in the state is decided by the configuration. - // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "bar"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "7200000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "bar"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "7200000"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated2(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDataset_withoutLabels(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccBigQueryDataset_withComputedLabels(t *testing.T) { - // Skip it in VCR test because of the randomness of uuid in "labels" field - // which causes the replaying mode after recording mode failing in VCR test - acctest.SkipIfVcr(t) - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - ExternalProviders: map[string]resource.ExternalProvider{ - "random": {}, - }, - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - // The labels field in the state is decided by the configuration. - // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated_withComputedLabels(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_withProvider5(t *testing.T) { - acctest.SkipIfVcr(t) - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - oldVersion := map[string]resource.ExternalProvider{ - "google": { - VersionConstraint: "4.75.0", // a version that doesn't separate user defined labels and system labels - Source: "registry.terraform.io/hashicorp/google", - }, - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_withoutLabels(datasetID), - ExternalProviders: oldVersion, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), - ), - }, - { - Config: testAccBigQueryDataset(datasetID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), - ), - }, - }, - }) -} - -func TestAccBigQueryDataset_withOutOfBandLabels(t *testing.T) { - acctest.SkipIfVcr(t) - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - Check: addOutOfBandLabels(t, datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated_withOutOfBandLabels(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_datasetWithContents(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDatasetDeleteContents(datasetID), - Check: testAccAddTable(t, datasetID, tableID), - }, - { - ResourceName: "google_bigquery_dataset.contents_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_access(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_access_%s", acctest.RandString(t, 10)) - otherDatasetID := fmt.Sprintf("tf_test_other_%s", acctest.RandString(t, 10)) - otherTableID := fmt.Sprintf("tf_test_other_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDatasetWithOneAccess(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetWithThreeAccess(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetWithOneAccess(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetWithViewAccess(datasetID, otherDatasetID, otherTableID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_regionalLocation(t *testing.T) { - t.Parallel() - - datasetID1 := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryRegionalDataset(datasetID1, "asia-south1"), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_cmek(t *testing.T) { - t.Parallel() - - kms := acctest.BootstrapKMSKeyInLocation(t, "us") - pid := envvar.GetTestProjectFromEnv() - datasetID1 := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_cmek(pid, datasetID1, kms.CryptoKey.Name), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccBigQueryDataset_storageBillModel(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDatasetStorageBillingModel(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_invalidCharacterInID(t *testing.T) { - t.Parallel() - // Not an acceptance test. - acctest.SkipIfVcr(t) - - datasetID := fmt.Sprintf("tf_test_%s-with-hyphens", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - ExpectError: regexp.MustCompile("must contain only letters.+numbers.+or underscores.+"), - }, - }, - }) -} - -func TestAccBigQueryDataset_invalidLongID(t *testing.T) { - t.Parallel() - // Not an acceptance test. - acctest.SkipIfVcr(t) - - datasetSuffix := acctest.RandString(t, 10) - datasetID := fmt.Sprintf("tf_test_%s", strings.Repeat(datasetSuffix, 200)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - ExpectError: regexp.MustCompile(".+cannot be greater than 1,024 characters"), - }, - }, - }) -} - -{{ if ne $.TargetVersionName `ga` -}} -func TestAccBigQueryDataset_bigqueryDatasetResourceTags_update(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context), - }, - { - ResourceName: "google_bigquery_dataset.dataset", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context), - }, - { - ResourceName: "google_bigquery_dataset.dataset", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -{{ end }} -func testAccAddTable(t *testing.T, datasetID string, tableID string) resource.TestCheckFunc { - // Not actually a check, but adds a table independently of terraform - return func(s *terraform.State) error { - config := acctest.GoogleProviderConfig(t) - table := &bigquery.Table{ - TableReference: &bigquery.TableReference{ - DatasetId: datasetID, - TableId: tableID, - ProjectId: config.Project, - }, - } - _, err := config.NewBigQueryClient(config.UserAgent).Tables.Insert(config.Project, datasetID, table).Do() - if err != nil { - return fmt.Errorf("Could not create table") - } - return nil - } -} - -func addOutOfBandLabels(t *testing.T, datasetID string) resource.TestCheckFunc { - // Not actually a check, but adds labels independently of terraform - return func(s *terraform.State) error { - config := acctest.GoogleProviderConfig(t) - - dataset, err := config.NewBigQueryClient(config.UserAgent).Datasets.Get(config.Project, datasetID).Do() - if err != nil { - return fmt.Errorf("Could not get dataset with ID %s", datasetID) - } - - dataset.Labels["outband_key"] = "test" - _, err = config.NewBigQueryClient(config.UserAgent).Datasets.Patch(config.Project, datasetID, dataset).Do() - if err != nil { - return fmt.Errorf("Could not update labele for the dataset") - } - return nil - } -} - -func testAccBigQueryDataset_withoutLabels(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 -} -`, datasetID) -} - -func testAccBigQueryDataset(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "bar" - default_table_expiration_ms = 7200000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated_withOutOfBandLabels(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "bar" - default_table_expiration_ms = 7200000 - outband_key = "test-update" - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated2(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - # friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "bar" - default_table_expiration_ms = 7200000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated_withComputedLabels(datasetID string) string { - return fmt.Sprintf(` -resource "random_uuid" "test" { -} - -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - # friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "${random_uuid.test.result}" - default_table_expiration_ms = 7200000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetDeleteContents(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "contents_test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 - delete_contents_on_destroy = true - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryRegionalDataset(datasetID string, location string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "%s" - default_table_expiration_ms = 3600000 - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID, location) -} - -func testAccBigQueryDatasetWithOneAccess(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "access_test" { - dataset_id = "%s" - - access { - role = "OWNER" - user_by_email = "Joe@example.com" - } - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetWithThreeAccess(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "access_test" { - dataset_id = "%s" - - access { - role = "OWNER" - user_by_email = "Joe@example.com" - } - access { - role = "READER" - domain = "hashicorp.com" - } - access { - role = "READER" - iam_member = "allUsers" - } - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetWithViewAccess(datasetID, otherDatasetID, otherTableID string) string { - // Note that we have to add a non-view access to prevent BQ from creating 4 default - // access entries. - return fmt.Sprintf(` -resource "google_bigquery_dataset" "other_dataset" { - dataset_id = "%s" -} - -resource "google_bigquery_table" "table_with_view" { - deletion_protection = false - table_id = "%s" - dataset_id = google_bigquery_dataset.other_dataset.dataset_id - - time_partitioning { - type = "DAY" - } - - view { - query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]" - use_legacy_sql = true - } -} - -resource "google_bigquery_dataset" "access_test" { - dataset_id = "%s" - - access { - role = "OWNER" - user_by_email = "Joe@example.com" - } - access { - view { - project_id = google_bigquery_dataset.other_dataset.project - dataset_id = google_bigquery_dataset.other_dataset.dataset_id - table_id = google_bigquery_table.table_with_view.table_id - } - } - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, otherDatasetID, otherTableID, datasetID) -} - -func testAccBigQueryDataset_cmek(pid, datasetID, kmsKey string) string { - return fmt.Sprintf(` -data "google_project" "project" { - project_id = "%s" -} - -resource "google_kms_crypto_key_iam_member" "kms-member" { - crypto_key_id = "%s" - role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:bq-${data.google_project.project.number}@bigquery-encryption.iam.gserviceaccount.com" -} - -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "test" - description = "This is a test description" - location = "US" - default_table_expiration_ms = 3600000 - - default_encryption_configuration { - kms_key_name = "%s" - } - - depends_on = [google_kms_crypto_key_iam_member.kms-member] -} -`, pid, kmsKey, datasetID, kmsKey) -} - -func testAccBigQueryDatasetStorageBillingModel(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 - storage_billing_model = "PHYSICAL" - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} -{{- if ne $.TargetVersionName "ga" }} - -func testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context map[string]interface{}) string { - return acctest.Nprintf(` -data "google_project" "project" { - provider = "google-beta" -} - -resource "google_tags_tag_key" "tag_key1" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key1%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value1" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "tf_test_tag_value1%{random_suffix}" -} - -resource "google_tags_tag_key" "tag_key2" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key2%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value2" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" - short_name = "tf_test_tag_value2%{random_suffix}" -} - -resource "google_bigquery_dataset" "dataset" { - provider = google-beta - - dataset_id = "dataset%{random_suffix}" - friendly_name = "test" - description = "This is a test description" - location = "EU" - - resource_tags = { - "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key1.short_name}" = "${google_tags_tag_value.tag_value1.short_name}" - "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key2.short_name}" = "${google_tags_tag_value.tag_value2.short_name}" - } -} -`, context) -} - -func testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context map[string]interface{}) string { - return acctest.Nprintf(` -data "google_project" "project" { - provider = "google-beta" -} - -resource "google_tags_tag_key" "tag_key1" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key1%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value1" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "tf_test_tag_value1%{random_suffix}" -} - -resource "google_tags_tag_key" "tag_key2" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key2%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value2" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" - short_name = "tf_test_tag_value2%{random_suffix}" -} - -resource "google_bigquery_dataset" "dataset" { - provider = google-beta - - dataset_id = "dataset%{random_suffix}" - friendly_name = "test" - description = "This is a test description" - location = "EU" - - resource_tags = { - } -} -`, context) -} -{{- end }} diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl deleted file mode 100644 index b1b80240c433..000000000000 --- a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl +++ /dev/null @@ -1,2961 +0,0 @@ -package bigquery - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/bigquery/v2" -) - -func bigQueryTableSortArrayByName(array []interface{}) { - sort.Slice(array, func(i, k int) bool { - return array[i].(map[string]interface{})["name"].(string) < array[k].(map[string]interface{})["name"].(string) - }) -} - -func bigQueryArrayToMapIndexedByName(array []interface{}) map[string]interface{} { - out := map[string]interface{}{} - for _, v := range array { - name := v.(map[string]interface{})["name"].(string) - out[name] = v - } - return out -} - -func bigQueryTablecheckNameExists(jsonList []interface{}) error { - for _, m := range jsonList { - if _, ok := m.(map[string]interface{})["name"]; !ok { - return fmt.Errorf("No name in schema %+v", m) - } - } - - return nil -} - -// Compares two json's while optionally taking in a compareMapKeyVal function. -// This function will override any comparison of a given map[string]interface{} -// on a specific key value allowing for a separate equality in specific scenarios -func jsonCompareWithMapKeyOverride(key string, a, b interface{}, compareMapKeyVal func(key string, val1, val2 map[string]interface{}) bool) (bool, error) { - switch a.(type) { - case []interface{}: - arrayA := a.([]interface{}) - arrayB, ok := b.([]interface{}) - if !ok { - return false, nil - } else if len(arrayA) != len(arrayB) { - return false, nil - } - - // Sort fields by name so reordering them doesn't cause a diff. - if key == "schema" || key == "fields" { - if err := bigQueryTablecheckNameExists(arrayA); err != nil { - return false, err - } - bigQueryTableSortArrayByName(arrayA) - if err := bigQueryTablecheckNameExists(arrayB); err != nil { - return false, err - } - bigQueryTableSortArrayByName(arrayB) - } - for i := range arrayA { - eq, err := jsonCompareWithMapKeyOverride(strconv.Itoa(i), arrayA[i], arrayB[i], compareMapKeyVal) - if err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil - case map[string]interface{}: - objectA := a.(map[string]interface{}) - objectB, ok := b.(map[string]interface{}) - if !ok { - return false, nil - } - - var unionOfKeys map[string]bool = make(map[string]bool) - for subKey := range objectA { - unionOfKeys[subKey] = true - } - for subKey := range objectB { - unionOfKeys[subKey] = true - } - - for subKey := range unionOfKeys { - eq := compareMapKeyVal(subKey, objectA, objectB) - if !eq { - valA, ok1 := objectA[subKey] - valB, ok2 := objectB[subKey] - if !ok1 || !ok2 { - return false, nil - } - eq, err := jsonCompareWithMapKeyOverride(subKey, valA, valB, compareMapKeyVal) - if err != nil || !eq { - return false, err - } - } - } - return true, nil - case string, float64, bool, nil: - return a == b, nil - default: - log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") - return false, errors.New("unable to compare values") - } -} - -// checks if the value is within the array, only works for generics -// because objects and arrays will take the reference comparison -func valueIsInArray(value interface{}, array []interface{}) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -func bigQueryTableMapKeyOverride(key string, objectA, objectB map[string]interface{}) bool { - // we rely on the fallback to nil if the object does not have the key - valA := objectA[key] - valB := objectB[key] - switch key { - case "mode": - eq := bigQueryTableNormalizeMode(valA) == bigQueryTableNormalizeMode(valB) - return eq - case "description": - equivalentSet := []interface{}{nil, ""} - eq := valueIsInArray(valA, equivalentSet) && valueIsInArray(valB, equivalentSet) - return eq - case "type": - if valA == nil || valB == nil { - return false - } - return bigQueryTableTypeEq(valA.(string), valB.(string)) - case "policyTags": - eq := bigQueryTableNormalizePolicyTags(valA) == nil && bigQueryTableNormalizePolicyTags(valB) == nil - return eq - } - - // otherwise rely on default behavior - return false -} - -// Compare the JSON strings are equal -func bigQueryTableSchemaDiffSuppress(name, old, new string, _ *schema.ResourceData) bool { - // The API can return an empty schema which gets encoded to "null" during read. - if old == "null" { - old = "[]" - } - var a, b interface{} - if err := json.Unmarshal([]byte(old), &a); err != nil { - log.Printf("[DEBUG] unable to unmarshal old json - %v", err) - } - if err := json.Unmarshal([]byte(new), &b); err != nil { - log.Printf("[DEBUG] unable to unmarshal new json - %v", err) - } - - eq, err := jsonCompareWithMapKeyOverride(name, a, b, bigQueryTableMapKeyOverride) - if err != nil { - log.Printf("[DEBUG] %v", err) - log.Printf("[DEBUG] Error comparing JSON: %v, %v", old, new) - } - - return eq -} - -func bigQueryTableConnectionIdSuppress(name, old, new string, _ *schema.ResourceData) bool { - // API accepts connectionId in below two formats - // "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or - // "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}". - // but always returns "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" - - if tpgresource.IsEmptyValue(reflect.ValueOf(old)) || tpgresource.IsEmptyValue(reflect.ValueOf(new)) { - return false - } - - // Old is in the dot format, and new is in the slash format. - // They represent the same connection if the project, locaition, and IDs are - // the same. - // Location should use a case-insenstive comparison. - dotRe := regexp.MustCompile(`(.+)\.(.+)\.(.+)`) - slashRe := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/connections/(.+)") - dotMatches := dotRe.FindStringSubmatch(old) - slashMatches := slashRe.FindStringSubmatch(new) - if dotMatches != nil && slashMatches != nil { - sameProject := dotMatches[1] == slashMatches[1] - sameLocation := strings.EqualFold(dotMatches[2], slashMatches[2]) - sameId := dotMatches[3] == slashMatches[3] - return sameProject && sameLocation && sameId - } - - return false -} - -func bigQueryTableTypeEq(old, new string) bool { - // Do case-insensitive comparison. https://github.com/hashicorp/terraform-provider-google/issues/9472 - oldUpper := strings.ToUpper(old) - newUpper := strings.ToUpper(new) - - equivalentSet1 := []interface{}{"INTEGER", "INT64"} - equivalentSet2 := []interface{}{"FLOAT", "FLOAT64"} - equivalentSet3 := []interface{}{"BOOLEAN", "BOOL"} - eq0 := oldUpper == newUpper - eq1 := valueIsInArray(oldUpper, equivalentSet1) && valueIsInArray(newUpper, equivalentSet1) - eq2 := valueIsInArray(oldUpper, equivalentSet2) && valueIsInArray(newUpper, equivalentSet2) - eq3 := valueIsInArray(oldUpper, equivalentSet3) && valueIsInArray(newUpper, equivalentSet3) - eq := eq0 || eq1 || eq2 || eq3 - return eq -} - -func bigQueryTableNormalizeMode(mode interface{}) string { - if mode == nil { - return "NULLABLE" - } - // Upper-case to get case-insensitive comparisons. https://github.com/hashicorp/terraform-provider-google/issues/9472 - return strings.ToUpper(mode.(string)) -} - -func bigQueryTableModeIsForceNew(old, new string) bool { - eq := old == new - reqToNull := old == "REQUIRED" && new == "NULLABLE" - return !eq && !reqToNull -} - -func bigQueryTableNormalizePolicyTags(val interface{}) interface{} { - if val == nil { - return nil - } - if policyTags, ok := val.(map[string]interface{}); ok { - // policyTags = {} is same as nil. - if len(policyTags) == 0 { - return nil - } - // policyTags = {names = []} is same as nil. - if names, ok := policyTags["names"].([]interface{}); ok && len(names) == 0 { - return nil - } - } - return val -} - -// Compares two existing schema implementations and decides if -// it is changeable.. pairs with a force new on not changeable -func resourceBigQueryTableSchemaIsChangeable(old, new interface{}, isExternalTable bool, topLevel bool) (bool, error) { - switch old.(type) { - case []interface{}: - arrayOld := old.([]interface{}) - arrayNew, ok := new.([]interface{}) - sameNameColumns := 0 - droppedColumns := 0 - if !ok { - // if not both arrays not changeable - return false, nil - } - if err := bigQueryTablecheckNameExists(arrayOld); err != nil { - return false, err - } - mapOld := bigQueryArrayToMapIndexedByName(arrayOld) - if err := bigQueryTablecheckNameExists(arrayNew); err != nil { - return false, err - } - mapNew := bigQueryArrayToMapIndexedByName(arrayNew) - for key := range mapNew { - // making unchangeable if an newly added column is with REQUIRED mode - if _, ok := mapOld[key]; !ok { - items := mapNew[key].(map[string]interface{}) - for k := range items { - if k == "mode" && fmt.Sprintf("%v", items[k]) == "REQUIRED" { - return false, nil - } - } - } - } - for key := range mapOld { - // dropping top level columns can happen in-place - // but this doesn't apply to external tables - if _, ok := mapNew[key]; !ok { - if !topLevel || isExternalTable { - return false, nil - } - droppedColumns += 1 - continue - } - - isChangable, err := resourceBigQueryTableSchemaIsChangeable(mapOld[key], mapNew[key], isExternalTable, false) - if err != nil || !isChangable { - return false, err - } else if isChangable && topLevel { - // top level column that exists in the new schema - sameNameColumns += 1 - } - } - // in-place column dropping alongside column additions is not allowed - // as of now because user intention can be ambiguous (e.g. column renaming) - newColumns := len(arrayNew) - sameNameColumns - return (droppedColumns == 0) || (newColumns == 0), nil - case map[string]interface{}: - objectOld := old.(map[string]interface{}) - objectNew, ok := new.(map[string]interface{}) - if !ok { - // if both aren't objects - return false, nil - } - var unionOfKeys map[string]bool = make(map[string]bool) - for key := range objectOld { - unionOfKeys[key] = true - } - for key := range objectNew { - unionOfKeys[key] = true - } - for key := range unionOfKeys { - valOld := objectOld[key] - valNew := objectNew[key] - switch key { - case "name": - if valOld != valNew { - return false, nil - } - case "type": - if valOld == nil || valNew == nil { - // This is invalid, so it shouldn't require a ForceNew - return true, nil - } - if !bigQueryTableTypeEq(valOld.(string), valNew.(string)) { - return false, nil - } - case "mode": - if bigQueryTableModeIsForceNew( - bigQueryTableNormalizeMode(valOld), - bigQueryTableNormalizeMode(valNew), - ) { - return false, nil - } - case "fields": - return resourceBigQueryTableSchemaIsChangeable(valOld, valNew, isExternalTable, false) - - // other parameters: description, policyTags and - // policyTags.names[] are changeable - } - } - return true, nil - case string, float64, bool, nil: - // realistically this shouldn't hit - log.Printf("[DEBUG] comparison of generics hit... not expected") - return old == new, nil - default: - log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") - return false, errors.New("unable to compare values") - } -} - -func resourceBigQueryTableSchemaCustomizeDiffFunc(d tpgresource.TerraformResourceDiff) error { - if _, hasSchema := d.GetOk("schema"); hasSchema { - oldSchema, newSchema := d.GetChange("schema") - oldSchemaText := oldSchema.(string) - newSchemaText := newSchema.(string) - if oldSchemaText == "null" { - // The API can return an empty schema which gets encoded to "null" during read. - oldSchemaText = "[]" - } - if newSchemaText == "null" { - newSchemaText = "[]" - } - var old, new interface{} - if err := json.Unmarshal([]byte(oldSchemaText), &old); err != nil { - // don't return error, its possible we are going from no schema to schema - // this case will be cover on the conparision regardless. - log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) - } - if err := json.Unmarshal([]byte(newSchemaText), &new); err != nil { - // same as above - log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) - } - _, isExternalTable := d.GetOk("external_data_configuration") - isChangeable, err := resourceBigQueryTableSchemaIsChangeable(old, new, isExternalTable, true) - if err != nil { - return err - } - if !isChangeable { - if err := d.ForceNew("schema"); err != nil { - return err - } - } - return nil - } - return nil -} - -func resourceBigQueryTableSchemaCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { - return resourceBigQueryTableSchemaCustomizeDiffFunc(d) -} - -func validateBigQueryTableSchema(v interface{}, k string) (warnings []string, errs []error) { - if v == nil { - return - } - - if _, e := validation.StringIsJSON(v, k); e != nil { - errs = append(errs, e...) - return - } - - var jsonList []interface{} - if err := json.Unmarshal([]byte(v.(string)), &jsonList); err != nil { - errs = append(errs, fmt.Errorf("\"schema\" is not a JSON array: %s", err)) - return - } - - for _, v := range jsonList { - if v == nil { - errs = append(errs, errors.New("\"schema\" contains a nil element")) - return - } - } - - return -} - -func ResourceBigQueryTable() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryTableCreate, - Read: resourceBigQueryTableRead, - Delete: resourceBigQueryTableDelete, - Update: resourceBigQueryTableUpdate, - Importer: &schema.ResourceImporter{ - State: resourceBigQueryTableImport, - }, - CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, - resourceBigQueryTableSchemaCustomizeDiff, - tpgresource.SetLabelsDiff, - ), - Schema: map[string]*schema.Schema{ - // TableId: [Required] The ID of the table. The ID must contain only - // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum - // length is 1,024 characters. - "table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique ID for the resource. Changing this forces a new resource to be created.`, - }, - - // DatasetId: [Required] The ID of the dataset containing this table. - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The dataset ID to create the table in. Changing this forces a new resource to be created.`, - }, - - // ProjectId: [Required] The ID of the project containing this table. - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs.`, - }, - - // Description: [Optional] A user-friendly description of this table. - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The field description.`, - }, - - // ExpirationTime: [Optional] The time when this table expires, in - // milliseconds since the epoch. If not present, the table will persist - // indefinitely. Expired tables will be deleted and their storage - // reclaimed. - "expiration_time": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: `The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.`, - }, - - // ExternalDataConfiguration [Optional] Describes the data format, - // location, and other properties of a table stored outside of BigQuery. - // By defining these properties, the data source can then be queried as - // if it were a standard BigQuery table. - "external_data_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Autodetect : [Required] If true, let BigQuery try to autodetect the - // schema and format of the table. - "autodetect": { - Type: schema.TypeBool, - Required: true, - Description: `Let BigQuery try to autodetect the schema and format of the table.`, - }, - // SourceFormat [Required] The data format. - "source_format": { - Type: schema.TypeString, - Optional: true, - Description: `Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, - ValidateFunc: validation.StringInSlice([]string{ - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", - }, false), - }, - // SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud. - "source_uris": { - Type: schema.TypeList, - Required: true, - Description: `A list of the fully-qualified URIs that point to your data in Google Cloud.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - // FileSetSpecType: [Optional] Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. - "file_set_spec_type": { - Type: schema.TypeString, - Optional: true, - Description: `Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems.`, - }, - // Compression: [Optional] The compression type of the data source. - "compression": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"NONE", "GZIP"}, false), - Default: "NONE", - Description: `The compression type of the data source. Valid values are "NONE" or "GZIP".`, - }, - // Schema: [Optional] The schema for the data. - // Schema is required for CSV and JSON formats if autodetect is not on. - // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC, and Parquet formats. - "schema": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateBigQueryTableSchema, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - Description: `A JSON schema for the external table. Schema is required for CSV and JSON formats and is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats when using external tables.`, - }, - // CsvOptions: [Optional] Additional properties to set if - // sourceFormat is set to CSV. - "csv_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if source_format is set to "CSV".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Quote: [Required] The value that is used to quote data - // sections in a CSV file. - "quote": { - Type: schema.TypeString, - Required: true, - Description: `The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in Terraform escaped as \". Due to limitations with Terraform default values, this value is required to be explicitly set.`, - }, - // AllowJaggedRows: [Optional] Indicates if BigQuery should - // accept rows that are missing trailing optional columns. - "allow_jagged_rows": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Indicates if BigQuery should accept rows that are missing trailing optional columns.`, - }, - // AllowQuotedNewlines: [Optional] Indicates if BigQuery - // should allow quoted data sections that contain newline - // characters in a CSV file. The default value is false. - "allow_quoted_newlines": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.`, - }, - // Encoding: [Optional] The character encoding of the data. - // The supported values are UTF-8 or ISO-8859-1. - "encoding": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"ISO-8859-1", "UTF-8"}, false), - Default: "UTF-8", - Description: `The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.`, - }, - // FieldDelimiter: [Optional] The separator for fields in a CSV file. - "field_delimiter": { - Type: schema.TypeString, - Optional: true, - Default: ",", - Description: `The separator for fields in a CSV file.`, - }, - // SkipLeadingRows: [Optional] The number of rows at the top - // of a CSV file that BigQuery will skip when reading the data. - "skip_leading_rows": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: `The number of rows at the top of a CSV file that BigQuery will skip when reading the data.`, - }, - }, - }, - }, - // jsonOptions: [Optional] Additional properties to set if sourceFormat is set to JSON. - "json_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if sourceFormat is set to JSON.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "encoding": { - Type: schema.TypeString, - Optional: true, - Default: "UTF-8", - ValidateFunc: validation.StringInSlice([]string{"UTF-8", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE"}, false), - Description: `The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.`, - }, - }, - }, - }, - - "json_extension": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"GEOJSON"}, false), - Description: `Load option to be used together with sourceFormat newline-delimited JSON to indicate that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and sourceFormat must be set to NEWLINE_DELIMITED_JSON).`, - }, - - "bigtable_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if sourceFormat is set to BIGTABLE.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "column_family": { - Type: schema.TypeList, - Optional: true, - Description: `A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "column": { - Type: schema.TypeList, - Optional: true, - Description: `A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "qualifier_encoded": { - Type: schema.TypeString, - Optional: true, - Description: `Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.`, - }, - "qualifier_string": { - Type: schema.TypeString, - Optional: true, - Description: `Qualifier string.`, - }, - "field_name": { - Type: schema.TypeString, - Optional: true, - Description: `If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: `The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.`, - }, - "encoding": { - Type: schema.TypeString, - Optional: true, - Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.`, - }, - "only_read_latest": { - Type: schema.TypeBool, - Optional: true, - Description: `If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.`, - }, - }, - }, - }, - "family_id": { - Type: schema.TypeString, - Optional: true, - Description: `Identifier of the column family.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: `The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.`, - }, - "encoding": { - Type: schema.TypeString, - Optional: true, - Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.`, - }, - "only_read_latest": { - Type: schema.TypeBool, - Optional: true, - Description: `If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.`, - }, - }, - }, - }, - "ignore_unspecified_column_families": { - Type: schema.TypeBool, - Optional: true, - Description: `If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.`, - }, - "read_rowkey_as_string": { - Type: schema.TypeBool, - Optional: true, - Description: `If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.`, - }, - "output_column_families_as_json": { - Type: schema.TypeBool, - Optional: true, - Description: `If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.`, - }, - }, - }, - }, - - "parquet_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if sourceFormat is set to PARQUET.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enum_as_string": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.`, - }, - "enable_list_inference": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether to use schema inference specifically for Parquet LIST logical type.`, - }, - }, - }, - }, - // GoogleSheetsOptions: [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. - "google_sheets_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if source_format is set to "GOOGLE_SHEETS".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Range: [Optional] Range of a sheet to query from. Only used when non-empty. - // Typical format: !: - "range": { - Type: schema.TypeString, - Optional: true, - Description: `Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20`, - AtLeastOneOf: []string{ - "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", - "external_data_configuration.0.google_sheets_options.0.range", - }, - }, - // SkipLeadingRows: [Optional] The number of rows at the top - // of the sheet that BigQuery will skip when reading the data. - "skip_leading_rows": { - Type: schema.TypeInt, - Optional: true, - Description: `The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.`, - AtLeastOneOf: []string{ - "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", - "external_data_configuration.0.google_sheets_options.0.range", - }, - }, - }, - }, - }, - - // HivePartitioningOptions:: [Optional] Options for configuring hive partitioning detect. - "hive_partitioning_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Mode: [Optional] [Experimental] When set, what mode of hive partitioning to use when reading data. - // Two modes are supported. - //* AUTO: automatically infer partition key name(s) and type(s). - //* STRINGS: automatically infer partition key name(s). - "mode": { - Type: schema.TypeString, - Optional: true, - Description: `When set, what mode of hive partitioning to use when reading data.`, - }, - // RequirePartitionFilter: [Optional] If set to true, queries over this table - // require a partition filter that can be used for partition elimination to be - // specified. - "require_partition_filter": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - }, - // SourceUriPrefix: [Optional] [Experimental] When hive partition detection is requested, a common for all source uris must be required. - // The prefix must end immediately before the partition key encoding begins. - "source_uri_prefix": { - Type: schema.TypeString, - Optional: true, - Description: `When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins.`, - }, - }, - }, - }, - // AvroOptions: [Optional] Additional options if sourceFormat is set to AVRO. - "avro_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if source_format is set to "AVRO"`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "use_avro_logical_types": { - Type: schema.TypeBool, - Required: true, - Description: `If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).`, - }, - }, - }, - }, - - // IgnoreUnknownValues: [Optional] Indicates if BigQuery should - // allow extra values that are not represented in the table schema. - // If true, the extra values are ignored. If false, records with - // extra columns are treated as bad records, and if there are too - // many bad records, an invalid error is returned in the job result. - // The default value is false. - "ignore_unknown_values": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.`, - }, - // MaxBadRecords: [Optional] The maximum number of bad records that - // BigQuery can ignore when reading data. - "max_bad_records": { - Type: schema.TypeInt, - Optional: true, - Description: `The maximum number of bad records that BigQuery can ignore when reading data.`, - }, - // ConnectionId: [Optional] The connection specifying the credentials - // to be used to read external storage, such as Azure Blob, - // Cloud Storage, or S3. The connectionId can have the form - // "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or - // "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}". - "connection_id": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: bigQueryTableConnectionIdSuppress, - Description: `The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connectionId can have the form "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}".`, - }, - "reference_file_schema_uri": { - Type: schema.TypeString, - Optional: true, - Description: `When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.`, - }, - "metadata_cache_mode": { - Type: schema.TypeString, - Optional: true, - Description: `Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.`, - ValidateFunc: validation.StringInSlice([]string{"AUTOMATIC", "MANUAL"}, false), - }, - "object_metadata": { - Type: schema.TypeString, - Optional: true, - Description: `Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If ObjectMetadata is set, sourceFormat should be omitted.`, - ConflictsWith: []string{"external_data_configuration.0.source_format"}, - }, - }, - }, - }, - - // FriendlyName: [Optional] A descriptive name for this table. - "friendly_name": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive name for the table.`, - }, - - // max_staleness: [Optional] The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type. - "max_staleness": { - Type: schema.TypeString, - Optional: true, - Description: `The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of [SQL IntervalValue type](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type).`, - }, - - // Labels: [Experimental] The labels associated with this table. You can - // use these to organize and group your tables. Label keys and values - // can be no longer than 63 characters, can only contain lowercase - // letters, numeric characters, underscores and dashes. International - // characters are allowed. Label values are optional. Label keys must - // start with a letter and each label in the list must have a different - // key. - "labels": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A mapping of labels to assign to the resource. - - **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - Please refer to the field 'effective_labels' for all of the labels present on the resource.`, - }, - "terraform_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "effective_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - // Schema: [Optional] Describes the schema of this table. - "schema": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateBigQueryTableSchema, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - DiffSuppressFunc: bigQueryTableSchemaDiffSuppress, - Description: `A JSON schema for the table.`, - }, - // View: [Optional] If specified, configures this table as a view. - "view": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures this table as a view.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Query: [Required] A query that BigQuery executes when the view is - // referenced. - "query": { - Type: schema.TypeString, - Required: true, - Description: `A query that BigQuery executes when the view is referenced.`, - }, - - // UseLegacySQL: [Optional] Specifies whether to use BigQuery's - // legacy SQL for this view. The default value is true. If set to - // false, the view will use BigQuery's standard SQL: - "use_legacy_sql": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL`, - }, - }, - }, - }, - - // Materialized View: [Optional] If specified, configures this table as a materialized view. - "materialized_view": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures this table as a materialized view.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // EnableRefresh: [Optional] Enable automatic refresh of - // the materialized view when the base table is updated. The default - // value is "true". - "enable_refresh": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Specifies if BigQuery should automatically refresh materialized view when the base table is updated. The default is true.`, - }, - - // RefreshIntervalMs: [Optional] The maximum frequency - // at which this materialized view will be refreshed. The default value - // is 1800000 (30 minutes). - "refresh_interval_ms": { - Type: schema.TypeInt, - Default: 1800000, - Optional: true, - Description: `Specifies maximum frequency at which this materialized view will be refreshed. The default is 1800000.`, - }, - - "allow_non_incremental_definition": { - Type: schema.TypeBool, - Default: false, - Optional: true, - ForceNew: true, - Description: `Allow non incremental materialized view definition. The default value is false.`, - }, - - // Query: [Required] A query whose result is persisted - "query": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A query whose result is persisted.`, - }, - }, - }, - }, - - // TimePartitioning: [Experimental] If specified, configures time-based - // partitioning for this table. - "time_partitioning": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures time-based partitioning for this table.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ExpirationMs: [Optional] Number of milliseconds for which to keep the storage for a - // partition. If unspecified when the table is created in a dataset that has - // `defaultPartitionExpirationMs`, it will inherit the value of - // `defaultPartitionExpirationMs` from the dataset. - // To specify a unlimited expiration, set the value to 0. - "expiration_ms": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: `Number of milliseconds for which to keep the storage for a partition.`, - }, - - // Type: [Required] The supported types are DAY, HOUR, MONTH, and YEAR, which will generate - // one partition per day, hour, month, and year, respectively. - "type": { - Type: schema.TypeString, - Required: true, - Description: `The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.`, - ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR", "MONTH", "YEAR"}, false), - }, - - // Field: [Optional] The field used to determine how to create a time-based - // partition. If time-based partitioning is enabled without this value, the - // table is partitioned based on the load time. - "field": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.`, - }, - - // RequirePartitionFilter: [Optional] If set to true, queries over this table - // require a partition filter that can be used for partition elimination to be - // specified. - "require_partition_filter": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - Deprecated: `This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.`, - ConflictsWith: []string{"require_partition_filter"}, - }, - }, - }, - }, - - // RangePartitioning: [Optional] If specified, configures range-based - // partitioning for this table. - "range_partitioning": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures range-based partitioning for this table.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Field: [Required] The field used to determine how to create a range-based - // partition. - "field": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The field used to determine how to create a range-based partition.`, - }, - - // Range: [Required] Information required to partition based on ranges. - "range": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: `Information required to partition based on ranges. Structure is documented below.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Start: [Required] Start of the range partitioning, inclusive. - "start": { - Type: schema.TypeInt, - Required: true, - Description: `Start of the range partitioning, inclusive.`, - }, - - // End: [Required] End of the range partitioning, exclusive. - "end": { - Type: schema.TypeInt, - Required: true, - Description: `End of the range partitioning, exclusive.`, - }, - - // Interval: [Required] The width of each range within the partition. - "interval": { - Type: schema.TypeInt, - Required: true, - Description: `The width of each range within the partition.`, - }, - }, - }, - }, - }, - }, - }, - - // RequirePartitionFilter: [Optional] If set to true, queries over this table - // require a partition filter that can be used for partition elimination to be - // specified. - "require_partition_filter": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - ConflictsWith: []string{"time_partitioning.0.require_partition_filter"}, - }, - - // Clustering: [Optional] Specifies column names to use for data clustering. Up to four - // top-level columns are allowed, and should be specified in descending priority order. - "clustering": { - Type: schema.TypeList, - Optional: true, - MaxItems: 4, - Description: `Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "encryption_configuration": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the google_bigquery_default_service_account datasource and the google_kms_crypto_key_iam_binding resource.`, - }, - "kms_key_version": { - Type: schema.TypeString, - Computed: true, - Description: `The self link or full name of the kms key version used to encrypt this table.`, - }, - }, - }, - }, - - // CreationTime: [Output-only] The time when this table was created, in - // milliseconds since the epoch. - "creation_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this table was created, in milliseconds since the epoch.`, - }, - - // Etag: [Output-only] A hash of this resource. - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `A hash of the resource.`, - }, - - // LastModifiedTime: [Output-only] The time when this table was last - // modified, in milliseconds since the epoch. - "last_modified_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this table was last modified, in milliseconds since the epoch.`, - }, - - // Location: [Output-only] The geographic location where the table - // resides. This value is inherited from the dataset. - "location": { - Type: schema.TypeString, - Computed: true, - Description: `The geographic location where the table resides. This value is inherited from the dataset.`, - }, - - // NumBytes: [Output-only] The size of this table in bytes, excluding - // any data in the streaming buffer. - "num_bytes": { - Type: schema.TypeInt, - Computed: true, - Description: `The geographic location where the table resides. This value is inherited from the dataset.`, - }, - - // NumLongTermBytes: [Output-only] The number of bytes in the table that - // are considered "long-term storage". - "num_long_term_bytes": { - Type: schema.TypeInt, - Computed: true, - Description: `The number of bytes in the table that are considered "long-term storage".`, - }, - - // NumRows: [Output-only] The number of rows of data in this table, - // excluding any data in the streaming buffer. - "num_rows": { - Type: schema.TypeInt, - Computed: true, - Description: `The number of rows of data in this table, excluding any data in the streaming buffer.`, - }, - - // SelfLink: [Output-only] A URL that can be used to access this - // resource again. - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - // Type: [Output-only] Describes the table type. The following values - // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table - // defined by a SQL query. EXTERNAL: A table that references data stored - // in an external storage system, such as Google Cloud Storage. The - // default value is TABLE. - "type": { - Type: schema.TypeString, - Computed: true, - Description: `Describes the table type.`, - }, - - "deletion_protection": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Whether Terraform will be prevented from destroying the instance. When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the table will fail. When the field is set to false, deleting the table is allowed.`, - }, - - {{ if ne $.TargetVersionName `ga` -}} - "allow_resource_tags_on_deletion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether or not to allow table deletion when there are still resource tags attached.`, - }, - - {{ end }} - // TableConstraints: [Optional] Defines the primary key and foreign keys. - "table_constraints": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Defines the primary key and foreign keys.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // PrimaryKey: [Optional] Represents the primary key constraint - // on a table's columns. Present only if the table has a primary key. - // The primary key is not enforced. - "primary_key": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Represents a primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - //Columns: [Required] The columns that are composed of the primary key constraint. - "columns": { - Type: schema.TypeList, - Required: true, - Description: `The columns that are composed of the primary key constraint.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - // ForeignKeys: [Optional] Present only if the table has a foreign key. - // The foreign key is not enforced. - "foreign_keys": { - Type: schema.TypeList, - Optional: true, - Description: `Present only if the table has a foreign key. The foreign key is not enforced.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Name: [Optional] Set only if the foreign key constraint is named. - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Set only if the foreign key constraint is named.`, - }, - - // ReferencedTable: [Required] The table that holds the primary key - // and is referenced by this foreign key. - "referenced_table": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: `The table that holds the primary key and is referenced by this foreign key.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ProjectId: [Required] The ID of the project containing this table. - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the project containing this table.`, - }, - - // DatasetId: [Required] The ID of the dataset containing this table. - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the dataset containing this table.`, - }, - - // TableId: [Required] The ID of the table. The ID must contain only - // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum - // length is 1,024 characters. Certain operations allow suffixing of - // the table ID with a partition decorator, such as - // sample_table$20190123. - "table_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.`, - }, - }, - }, - }, - - // ColumnReferences: [Required] The pair of the foreign key column and primary key column. - "column_references": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: `The pair of the foreign key column and primary key column.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ReferencingColumn: [Required] The column that composes the foreign key. - "referencing_column": { - Type: schema.TypeString, - Required: true, - Description: `The column that composes the foreign key.`, - }, - - // ReferencedColumn: [Required] The column in the primary key that are - // referenced by the referencingColumn - "referenced_column": { - Type: schema.TypeString, - Required: true, - Description: `The column in the primary key that are referenced by the referencingColumn.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - // TableReplicationInfo: [Optional] Replication info of a table created using `AS REPLICA` DDL like: `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv`. - "table_replication_info": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the source project.`, - }, - "source_dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the source dataset.`, - }, - "source_table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the source materialized view.`, - }, - "replication_interval_ms": { - Type: schema.TypeInt, - Default: 300000, - Optional: true, - ForceNew: true, - Description: `The interval at which the source materialized view is polled for updates. The default is 300000.`, - }, - }, - }, - }, - {{- if ne $.TargetVersionName "ga" }} - "resource_tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".`, - }, - {{- end }} - }, - UseJSONNumber: true, - } -} - -func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { - config := meta.(*transport_tpg.Config) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - table := &bigquery.Table{ - TableReference: &bigquery.TableReference{ - DatasetId: d.Get("dataset_id").(string), - TableId: d.Get("table_id").(string), - ProjectId: project, - }, - } - - if v, ok := d.GetOk("view"); ok { - table.View = expandView(v) - } - - if v, ok := d.GetOk("materialized_view"); ok { - table.MaterializedView = expandMaterializedView(v) - } - - if v, ok := d.GetOk("description"); ok { - table.Description = v.(string) - } - - if v, ok := d.GetOk("expiration_time"); ok { - table.ExpirationTime = int64(v.(int)) - } - - if v, ok := d.GetOk("external_data_configuration"); ok { - externalDataConfiguration, err := expandExternalDataConfiguration(v) - if err != nil { - return nil, err - } - - table.ExternalDataConfiguration = externalDataConfiguration - } - - if v, ok := d.GetOk("friendly_name"); ok { - table.FriendlyName = v.(string) - } - - if v, ok := d.GetOk("max_staleness"); ok { - table.MaxStaleness = v.(string) - } - - if v, ok := d.GetOk("encryption_configuration.0.kms_key_name"); ok { - table.EncryptionConfiguration = &bigquery.EncryptionConfiguration{ - KmsKeyName: v.(string), - } - } - - if v, ok := d.GetOk("effective_labels"); ok { - labels := map[string]string{} - - for k, v := range v.(map[string]interface{}) { - labels[k] = v.(string) - } - - table.Labels = labels - } - - if v, ok := d.GetOk("schema"); ok { - _, viewPresent := d.GetOk("view") - _, materializedViewPresent := d.GetOk("materialized_view") - managePolicyTags := !viewPresent && !materializedViewPresent - schema, err := expandSchema(v, managePolicyTags) - if err != nil { - return nil, err - } - table.Schema = schema - } - - if v, ok := d.GetOk("time_partitioning"); ok { - table.TimePartitioning = expandTimePartitioning(v) - } - - if v, ok := d.GetOk("range_partitioning"); ok { - rangePartitioning, err := expandRangePartitioning(v) - if err != nil { - return nil, err - } - - table.RangePartitioning = rangePartitioning - } - - if v, ok := d.GetOk("require_partition_filter"); ok { - table.RequirePartitionFilter = v.(bool) - } - - if v, ok := d.GetOk("clustering"); ok { - table.Clustering = &bigquery.Clustering{ - Fields: tpgresource.ConvertStringArr(v.([]interface{})), - ForceSendFields: []string{"Fields"}, - } - } - - if v, ok := d.GetOk("table_constraints"); ok { - tableConstraints, err := expandTableConstraints(v) - if err != nil { - return nil, err - } - - table.TableConstraints = tableConstraints - } - - {{ if ne $.TargetVersionName `ga` -}} - table.ResourceTags = tpgresource.ExpandStringMap(d, "resource_tags") - - {{ end }} - return table, nil -} - -func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - - if v, ok := d.GetOk("table_replication_info"); ok { - if table.Schema != nil || table.View != nil || table.MaterializedView != nil { - return errors.New("Schema, view, or materialized view cannot be specified when table replication info is present") - } - - replicationDDL := fmt.Sprintf("CREATE MATERIALIZED VIEW %s.%s.%s", d.Get("project").(string), d.Get("dataset_id").(string), d.Get("table_id").(string)) - - tableReplicationInfo := expandTableReplicationInfo(v) - replicationIntervalMs := tableReplicationInfo["replication_interval_ms"].(int64) - if replicationIntervalMs > 0 { - replicationIntervalSeconds := replicationIntervalMs / 1000 - replicationDDL = fmt.Sprintf("%s OPTIONS(replication_interval_seconds=%d)", replicationDDL, replicationIntervalSeconds) - } - - replicationDDL = fmt.Sprintf("%s AS REPLICA OF %s.%s.%s", replicationDDL, tableReplicationInfo["source_project_id"], tableReplicationInfo["source_dataset_id"], tableReplicationInfo["source_table_id"]) - useLegacySQL := false - - req := &bigquery.QueryRequest{ - Query: replicationDDL, - UseLegacySql: &useLegacySQL, - } - - log.Printf("[INFO] Creating a replica materialized view with DDL: '%s'", replicationDDL) - - _, err := config.NewBigQueryClient(userAgent).Jobs.Query(project, req).Do() - - id := fmt.Sprintf("projects/%s/datasets/%s/tables/%s", project, datasetID, d.Get("table_id").(string)) - if err != nil { - if deleteErr := resourceBigQueryTableDelete(d, meta); deleteErr != nil { - log.Printf("[INFO] Unable to clean up table %s: %s", id, deleteErr) - } - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", id) - d.SetId(id) - - return resourceBigQueryTableRead(d, meta) - } - - if table.View != nil && table.Schema != nil { - - log.Printf("[INFO] Removing schema from table definition because BigQuery does not support setting schema on view creation") - schemaBack := table.Schema - table.Schema = nil - - log.Printf("[INFO] Creating BigQuery table: %s without schema", table.TableReference.TableId) - - res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() - if err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) - - table.Schema = schemaBack - log.Printf("[INFO] Updating BigQuery table: %s with schema", table.TableReference.TableId) - if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, res.TableReference.TableId, table).Do(); err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been updated with schema", res.Id) - } else { - log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) - - res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() - if err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) - } - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - res, err := config.NewBigQueryClient(userAgent).Tables.Get(project, datasetID, tableID).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("description", res.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("expiration_time", res.ExpirationTime); err != nil { - return fmt.Errorf("Error setting expiration_time: %s", err) - } - if err := d.Set("friendly_name", res.FriendlyName); err != nil { - return fmt.Errorf("Error setting friendly_name: %s", err) - } - if err := d.Set("max_staleness", res.MaxStaleness); err != nil { - return fmt.Errorf("Error setting max_staleness: %s", err) - } - if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { - return fmt.Errorf("Error setting labels: %s", err) - } - if err := tpgresource.SetLabels(res.Labels, d, "terraform_labels"); err != nil { - return fmt.Errorf("Error setting terraform_labels: %s", err) - } - if err := d.Set("effective_labels", res.Labels); err != nil { - return fmt.Errorf("Error setting effective_labels: %s", err) - } - if err := d.Set("creation_time", res.CreationTime); err != nil { - return fmt.Errorf("Error setting creation_time: %s", err) - } - if err := d.Set("etag", res.Etag); err != nil { - return fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("last_modified_time", res.LastModifiedTime); err != nil { - return fmt.Errorf("Error setting last_modified_time: %s", err) - } - if err := d.Set("location", res.Location); err != nil { - return fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("num_bytes", res.NumBytes); err != nil { - return fmt.Errorf("Error setting num_bytes: %s", err) - } - if err := d.Set("table_id", res.TableReference.TableId); err != nil { - return fmt.Errorf("Error setting table_id: %s", err) - } - if err := d.Set("dataset_id", res.TableReference.DatasetId); err != nil { - return fmt.Errorf("Error setting dataset_id: %s", err) - } - if err := d.Set("num_long_term_bytes", res.NumLongTermBytes); err != nil { - return fmt.Errorf("Error setting num_long_term_bytes: %s", err) - } - if err := d.Set("num_rows", res.NumRows); err != nil { - return fmt.Errorf("Error setting num_rows: %s", err) - } - if err := d.Set("self_link", res.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("type", res.Type); err != nil { - return fmt.Errorf("Error setting type: %s", err) - } - - // determine whether the deprecated require_partition_filter field is used - use_old_rpf := false - if _, ok := d.GetOk("time_partitioning.0.require_partition_filter"); ok { - use_old_rpf = true - } else if err := d.Set("require_partition_filter", res.RequirePartitionFilter); err != nil { - return fmt.Errorf("Error setting require_partition_filter: %s", err) - } - - if res.ExternalDataConfiguration != nil { - externalDataConfiguration, err := flattenExternalDataConfiguration(res.ExternalDataConfiguration) - if err != nil { - return err - } - - if v, ok := d.GetOk("external_data_configuration"); ok { - // The API response doesn't return the `external_data_configuration.schema` - // used when creating the table and it cannot be queried. - // After creation, a computed schema is stored in the toplevel `schema`, - // which combines `external_data_configuration.schema` - // with any hive partioning fields found in the `source_uri_prefix`. - // So just assume the configured schema has been applied after successful - // creation, by copying the configured value back into the resource schema. - // This avoids that reading back this field will be identified as a change. - // The `ForceNew=true` on `external_data_configuration.schema` will ensure - // the users' expectation that changing the configured input schema will - // recreate the resource. - edc := v.([]interface{})[0].(map[string]interface{}) - if edc["schema"] != nil { - externalDataConfiguration[0]["schema"] = edc["schema"] - } - } - - if err := d.Set("external_data_configuration", externalDataConfiguration); err != nil { - return fmt.Errorf("Error setting external_data_configuration: %s", err) - } - } - - if res.TimePartitioning != nil { - if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning, use_old_rpf)); err != nil { - return err - } - } - - if res.RangePartitioning != nil { - if err := d.Set("range_partitioning", flattenRangePartitioning(res.RangePartitioning)); err != nil { - return err - } - } - - if res.Clustering != nil { - if err := d.Set("clustering", res.Clustering.Fields); err != nil { - return fmt.Errorf("Error setting clustering: %s", err) - } - } - if res.EncryptionConfiguration != nil { - if err := d.Set("encryption_configuration", flattenEncryptionConfiguration(res.EncryptionConfiguration)); err != nil { - return err - } - } - - if res.Schema != nil { - schema, err := flattenSchema(res.Schema) - if err != nil { - return err - } - if err := d.Set("schema", schema); err != nil { - return fmt.Errorf("Error setting schema: %s", err) - } - } - - if res.View != nil { - view := flattenView(res.View) - if err := d.Set("view", view); err != nil { - return fmt.Errorf("Error setting view: %s", err) - } - } - - if res.MaterializedView != nil { - materialized_view := flattenMaterializedView(res.MaterializedView) - - if err := d.Set("materialized_view", materialized_view); err != nil { - return fmt.Errorf("Error setting materialized view: %s", err) - } - } - - if res.TableConstraints != nil { - table_constraints := flattenTableConstraints(res.TableConstraints) - - if err := d.Set("table_constraints", table_constraints); err != nil { - return fmt.Errorf("Error setting table constraints: %s", err) - } - } - - {{ if ne $.TargetVersionName `ga` -}} - if err := d.Set("resource_tags", res.ResourceTags); err != nil { - return fmt.Errorf("Error setting resource tags: %s", err) - } - - {{ end }} - // TODO: Update when the Get API fields for TableReplicationInfo are available in the client library. - url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}BigQueryBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}") - if err != nil { - return err - } - - log.Printf("[INFO] Reading BigQuery table through API: %s", url) - - getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - RawURL: url, - UserAgent: userAgent, - }) - if err != nil { - return err - } - - if v, ok := getRes["tableReplicationInfo"]; ok { - tableReplicationInfo := flattenTableReplicationInfo(v.(map[string]interface{})) - - if err := d.Set("table_replication_info", tableReplicationInfo); err != nil { - return fmt.Errorf("Error setting table replication info: %s", err) - } - } - - return nil -} - -type TableReference struct { - project string - datasetID string - tableID string -} - -func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - tableReference := &TableReference{ - project: project, - datasetID: datasetID, - tableID: tableID, - } - - if err = resourceBigQueryTableColumnDrop(config, userAgent, table, tableReference); err != nil { - return err - } - - if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, tableID, table).Do(); err != nil { - return err - } - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableColumnDrop(config *transport_tpg.Config, userAgent string, table *bigquery.Table, tableReference *TableReference) error { - oldTable, err := config.NewBigQueryClient(userAgent).Tables.Get(tableReference.project, tableReference.datasetID, tableReference.tableID).Do() - if err != nil { - return err - } - - if table.Schema == nil { - return nil - } - - newTableFields := map[string]bool{} - for _, field := range table.Schema.Fields { - newTableFields[field.Name] = true - } - - droppedColumns := []string{} - for _, field := range oldTable.Schema.Fields { - if !newTableFields[field.Name] { - droppedColumns = append(droppedColumns, field.Name) - } - } - - if len(droppedColumns) > 0 { - droppedColumnsString := strings.Join(droppedColumns, ", DROP COLUMN ") - - dropColumnsDDL := fmt.Sprintf("ALTER TABLE `%s.%s.%s` DROP COLUMN %s", tableReference.project, tableReference.datasetID, tableReference.tableID, droppedColumnsString) - log.Printf("[INFO] Dropping columns in-place: %s", dropColumnsDDL) - - useLegacySQL := false - req := &bigquery.QueryRequest{ - Query: dropColumnsDDL, - UseLegacySql: &useLegacySQL, - } - - _, err = config.NewBigQueryClient(userAgent).Jobs.Query(tableReference.project, req).Do() - if err != nil { - return err - } - } - - return nil -} - -func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { - if d.Get("deletion_protection").(bool) { - return fmt.Errorf("cannot destroy table %v without setting deletion_protection=false and running `terraform apply`", d.Id()) - } - {{- if ne $.TargetVersionName "ga" }} - if v, ok := d.GetOk("resource_tags"); ok { - if !d.Get("allow_resource_tags_on_deletion").(bool) { - var resourceTags []string - - for k, v := range v.(map[string]interface{}) { - resourceTags = append(resourceTags, fmt.Sprintf("%s:%s", k, v.(string))) - } - - return fmt.Errorf("cannot destroy table %v without unsetting the following resource tags or setting allow_resource_tags_on_deletion=true: %v", d.Id(), resourceTags) - } - } - - {{ end }} - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - if err := config.NewBigQueryClient(userAgent).Tables.Delete(project, datasetID, tableID).Do(); err != nil { - return err - } - - d.SetId("") - - return nil -} - -func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataConfiguration, error) { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - edc := &bigquery.ExternalDataConfiguration{ - Autodetect: raw["autodetect"].(bool), - } - - sourceUris := []string{} - for _, rawSourceUri := range raw["source_uris"].([]interface{}) { - sourceUris = append(sourceUris, rawSourceUri.(string)) - } - if len(sourceUris) > 0 { - edc.SourceUris = sourceUris - } - - if v, ok := raw["file_set_spec_type"]; ok { - edc.FileSetSpecType = v.(string) - } - - if v, ok := raw["compression"]; ok { - edc.Compression = v.(string) - } - - if v, ok := raw["json_extension"]; ok { - edc.JsonExtension = v.(string) - } - - if v, ok := raw["csv_options"]; ok { - edc.CsvOptions = expandCsvOptions(v) - } - if v, ok := raw["json_options"]; ok { - edc.JsonOptions = expandJsonOptions(v) - } - if v, ok := raw["bigtable_options"]; ok { - edc.BigtableOptions = expandBigtableOptions(v) - } - if v, ok := raw["google_sheets_options"]; ok { - edc.GoogleSheetsOptions = expandGoogleSheetsOptions(v) - } - if v, ok := raw["hive_partitioning_options"]; ok { - edc.HivePartitioningOptions = expandHivePartitioningOptions(v) - } - if v, ok := raw["avro_options"]; ok { - edc.AvroOptions = expandAvroOptions(v) - } - if v, ok := raw["parquet_options"]; ok { - edc.ParquetOptions = expandParquetOptions(v) - } - - if v, ok := raw["ignore_unknown_values"]; ok { - edc.IgnoreUnknownValues = v.(bool) - } - if v, ok := raw["max_bad_records"]; ok { - edc.MaxBadRecords = int64(v.(int)) - } - if v, ok := raw["schema"]; ok { - managePolicyTags := true - schema, err := expandSchema(v, managePolicyTags) - if err != nil { - return nil, err - } - edc.Schema = schema - } - if v, ok := raw["source_format"]; ok { - edc.SourceFormat = v.(string) - } - if v, ok := raw["connection_id"]; ok { - edc.ConnectionId = v.(string) - } - if v, ok := raw["reference_file_schema_uri"]; ok { - edc.ReferenceFileSchemaUri = v.(string) - } - if v, ok := raw["metadata_cache_mode"]; ok { - edc.MetadataCacheMode = v.(string) - } - if v, ok := raw["object_metadata"]; ok { - edc.ObjectMetadata = v.(string) - } - - return edc, nil - -} - -func flattenExternalDataConfiguration(edc *bigquery.ExternalDataConfiguration) ([]map[string]interface{}, error) { - result := map[string]interface{}{} - - result["autodetect"] = edc.Autodetect - result["source_uris"] = edc.SourceUris - - if edc.FileSetSpecType != "" { - result["file_set_spec_type"] = edc.FileSetSpecType - } - - if edc.Compression != "" { - result["compression"] = edc.Compression - } - - if edc.JsonExtension != "" { - result["json_extension"] = edc.JsonExtension - } - - if edc.CsvOptions != nil { - result["csv_options"] = flattenCsvOptions(edc.CsvOptions) - } - - if edc.GoogleSheetsOptions != nil { - result["google_sheets_options"] = flattenGoogleSheetsOptions(edc.GoogleSheetsOptions) - } - - if edc.HivePartitioningOptions != nil { - result["hive_partitioning_options"] = flattenHivePartitioningOptions(edc.HivePartitioningOptions) - } - - if edc.AvroOptions != nil { - result["avro_options"] = flattenAvroOptions(edc.AvroOptions) - } - - if edc.ParquetOptions != nil { - result["parquet_options"] = flattenParquetOptions(edc.ParquetOptions) - } - - if edc.JsonOptions != nil { - result["json_options"] = flattenJsonOptions(edc.JsonOptions) - } - - if edc.BigtableOptions != nil { - result["bigtable_options"] = flattenBigtableOptions(edc.BigtableOptions) - } - - if edc.IgnoreUnknownValues { - result["ignore_unknown_values"] = edc.IgnoreUnknownValues - } - if edc.MaxBadRecords != 0 { - result["max_bad_records"] = edc.MaxBadRecords - } - - if edc.SourceFormat != "" { - result["source_format"] = edc.SourceFormat - } - - if edc.ConnectionId != "" { - result["connection_id"] = edc.ConnectionId - } - - if edc.ReferenceFileSchemaUri != "" { - result["reference_file_schema_uri"] = edc.ReferenceFileSchemaUri - } - if edc.MetadataCacheMode != "" { - result["metadata_cache_mode"] = edc.MetadataCacheMode - } - - if edc.ObjectMetadata != "" { - result["object_metadata"] = edc.ObjectMetadata - } - - return []map[string]interface{}{result}, nil -} - -func expandCsvOptions(configured interface{}) *bigquery.CsvOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.CsvOptions{} - - if v, ok := raw["allow_jagged_rows"]; ok { - opts.AllowJaggedRows = v.(bool) - opts.ForceSendFields = append(opts.ForceSendFields, "allow_jagged_rows") - } - - if v, ok := raw["allow_quoted_newlines"]; ok { - opts.AllowQuotedNewlines = v.(bool) - opts.ForceSendFields = append(opts.ForceSendFields, "allow_quoted_newlines") - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["field_delimiter"]; ok { - opts.FieldDelimiter = v.(string) - } - - if v, ok := raw["skip_leading_rows"]; ok { - opts.SkipLeadingRows = int64(v.(int)) - } - - if v, ok := raw["quote"]; ok { - quote := v.(string) - opts.Quote = "e - } - - opts.ForceSendFields = []string{"Quote"} - - return opts -} - -func flattenCsvOptions(opts *bigquery.CsvOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.AllowJaggedRows { - result["allow_jagged_rows"] = opts.AllowJaggedRows - } - - if opts.AllowQuotedNewlines { - result["allow_quoted_newlines"] = opts.AllowQuotedNewlines - } - - if opts.Encoding != "" { - result["encoding"] = opts.Encoding - } - - if opts.FieldDelimiter != "" { - result["field_delimiter"] = opts.FieldDelimiter - } - - if opts.SkipLeadingRows != 0 { - result["skip_leading_rows"] = opts.SkipLeadingRows - } - - if opts.Quote != nil { - result["quote"] = *opts.Quote - } - - return []map[string]interface{}{result} -} - -func expandGoogleSheetsOptions(configured interface{}) *bigquery.GoogleSheetsOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.GoogleSheetsOptions{} - - if v, ok := raw["range"]; ok { - opts.Range = v.(string) - } - - if v, ok := raw["skip_leading_rows"]; ok { - opts.SkipLeadingRows = int64(v.(int)) - } - return opts -} - -func flattenGoogleSheetsOptions(opts *bigquery.GoogleSheetsOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Range != "" { - result["range"] = opts.Range - } - - if opts.SkipLeadingRows != 0 { - result["skip_leading_rows"] = opts.SkipLeadingRows - } - - return []map[string]interface{}{result} -} - -func expandHivePartitioningOptions(configured interface{}) *bigquery.HivePartitioningOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.HivePartitioningOptions{} - - if v, ok := raw["mode"]; ok { - opts.Mode = v.(string) - } - - if v, ok := raw["require_partition_filter"]; ok { - opts.RequirePartitionFilter = v.(bool) - } - - if v, ok := raw["source_uri_prefix"]; ok { - opts.SourceUriPrefix = v.(string) - } - - return opts -} - -func flattenHivePartitioningOptions(opts *bigquery.HivePartitioningOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Mode != "" { - result["mode"] = opts.Mode - } - - if opts.RequirePartitionFilter { - result["require_partition_filter"] = opts.RequirePartitionFilter - } - - if opts.SourceUriPrefix != "" { - result["source_uri_prefix"] = opts.SourceUriPrefix - } - - return []map[string]interface{}{result} -} - -func expandAvroOptions(configured interface{}) *bigquery.AvroOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.AvroOptions{} - - if v, ok := raw["use_avro_logical_types"]; ok { - opts.UseAvroLogicalTypes = v.(bool) - } - - return opts -} - -func flattenAvroOptions(opts *bigquery.AvroOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.UseAvroLogicalTypes { - result["use_avro_logical_types"] = opts.UseAvroLogicalTypes - } - - return []map[string]interface{}{result} -} - -func expandParquetOptions(configured interface{}) *bigquery.ParquetOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.ParquetOptions{} - - if v, ok := raw["enum_as_string"]; ok { - opts.EnumAsString = v.(bool) - } - - if v, ok := raw["enable_list_inference"]; ok { - opts.EnableListInference = v.(bool) - } - - return opts -} - -func flattenParquetOptions(opts *bigquery.ParquetOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.EnumAsString { - result["enum_as_string"] = opts.EnumAsString - } - - if opts.EnableListInference { - result["enable_list_inference"] = opts.EnableListInference - } - - return []map[string]interface{}{result} -} - -func expandBigtableOptions(configured interface{}) *bigquery.BigtableOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.BigtableOptions{} - - crs := []*bigquery.BigtableColumnFamily{} - if v, ok := raw["column_family"]; ok { - for _, columnFamily := range v.([]interface{}) { - crs = append(crs, expandBigtableColumnFamily(columnFamily)) - } - - if len(crs) > 0 { - opts.ColumnFamilies = crs - } - } - - if v, ok := raw["ignore_unspecified_column_families"]; ok { - opts.IgnoreUnspecifiedColumnFamilies = v.(bool) - } - - if v, ok := raw["read_rowkey_as_string"]; ok { - opts.ReadRowkeyAsString = v.(bool) - } - - if v, ok := raw["output_column_families_as_json"]; ok { - opts.OutputColumnFamiliesAsJson = v.(bool) - } - - return opts -} - -func flattenBigtableOptions(opts *bigquery.BigtableOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.ColumnFamilies != nil { - result["column_family"] = flattenBigtableColumnFamily(opts.ColumnFamilies) - } - - if opts.IgnoreUnspecifiedColumnFamilies { - result["ignore_unspecified_column_families"] = opts.IgnoreUnspecifiedColumnFamilies - } - - if opts.ReadRowkeyAsString { - result["read_rowkey_as_string"] = opts.ReadRowkeyAsString - } - - if opts.OutputColumnFamiliesAsJson { - result["output_column_families_as_json"] = opts.OutputColumnFamiliesAsJson - } - - return []map[string]interface{}{result} -} - -func expandBigtableColumnFamily(configured interface{}) *bigquery.BigtableColumnFamily { - raw := configured.(map[string]interface{}) - - opts := &bigquery.BigtableColumnFamily{} - - crs := []*bigquery.BigtableColumn{} - if v, ok := raw["column"]; ok { - for _, column := range v.([]interface{}) { - crs = append(crs, expandBigtableColumn(column)) - } - - if len(crs) > 0 { - opts.Columns = crs - } - } - - if v, ok := raw["family_id"]; ok { - opts.FamilyId = v.(string) - } - - if v, ok := raw["type"]; ok { - opts.Type = v.(string) - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["only_read_latest"]; ok { - opts.OnlyReadLatest = v.(bool) - } - - return opts -} - -func flattenBigtableColumnFamily(edc []*bigquery.BigtableColumnFamily) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, fr := range edc { - result := map[string]interface{}{} - if fr.Columns != nil { - result["column"] = flattenBigtableColumn(fr.Columns) - } - result["family_id"] = fr.FamilyId - result["type"] = fr.Type - result["encoding"] = fr.Encoding - result["only_read_latest"] = fr.OnlyReadLatest - results = append(results, result) - } - - return results -} - -func expandBigtableColumn(configured interface{}) *bigquery.BigtableColumn { - raw := configured.(map[string]interface{}) - - opts := &bigquery.BigtableColumn{} - - if v, ok := raw["qualifier_encoded"]; ok { - opts.QualifierEncoded = v.(string) - } - - if v, ok := raw["qualifier_string"]; ok { - opts.QualifierString = v.(string) - } - - if v, ok := raw["field_name"]; ok { - opts.FieldName = v.(string) - } - - if v, ok := raw["type"]; ok { - opts.Type = v.(string) - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["only_read_latest"]; ok { - opts.OnlyReadLatest = v.(bool) - } - - return opts -} - -func flattenBigtableColumn(edc []*bigquery.BigtableColumn) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, fr := range edc { - result := map[string]interface{}{} - result["qualifier_encoded"] = fr.QualifierEncoded - result["qualifier_string"] = fr.QualifierString - result["field_name"] = fr.FieldName - result["type"] = fr.Type - result["encoding"] = fr.Encoding - result["only_read_latest"] = fr.OnlyReadLatest - results = append(results, result) - } - - return results -} - -func expandJsonOptions(configured interface{}) *bigquery.JsonOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.JsonOptions{} - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - return opts -} - -func flattenJsonOptions(opts *bigquery.JsonOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Encoding != "" { - result["encoding"] = opts.Encoding - } - - return []map[string]interface{}{result} -} - -func expandSchema(raw interface{}, managePolicyTags bool) (*bigquery.TableSchema, error) { - var fields []*bigquery.TableFieldSchema - - if len(raw.(string)) == 0 { - return nil, nil - } - - if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { - return nil, err - } - - if managePolicyTags { - for _, field := range fields { - setEmptyPolicyTagsInSchema(field) - } - } - - return &bigquery.TableSchema{Fields: fields}, nil -} - -func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { - schema, err := json.Marshal(tableSchema.Fields) - if err != nil { - return "", err - } - - return string(schema), nil -} - -// Explicitly set empty PolicyTags unless the PolicyTags field is specified in the schema. -func setEmptyPolicyTagsInSchema(field *bigquery.TableFieldSchema) { - // Field has children fields. - if len(field.Fields) > 0 { - for _, subField := range field.Fields { - setEmptyPolicyTagsInSchema(subField) - } - return - } - // Field is a leaf. - if field.PolicyTags == nil { - field.PolicyTags = &bigquery.TableFieldSchemaPolicyTags{Names: []string{}} - } -} - -func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { - raw := configured.([]interface{})[0].(map[string]interface{}) - tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} - - if v, ok := raw["field"]; ok { - tp.Field = v.(string) - } - - if v, ok := raw["expiration_ms"]; ok { - tp.ExpirationMs = int64(v.(int)) - } - - if v, ok := raw["require_partition_filter"]; ok { - tp.RequirePartitionFilter = v.(bool) - } - - return tp -} - -func expandRangePartitioning(configured interface{}) (*bigquery.RangePartitioning, error) { - if configured == nil { - return nil, nil - } - - rpList := configured.([]interface{}) - if len(rpList) == 0 || rpList[0] == nil { - return nil, errors.New("Error casting range partitioning interface to expected structure") - } - - rangePartJson := rpList[0].(map[string]interface{}) - rp := &bigquery.RangePartitioning{ - Field: rangePartJson["field"].(string), - } - - if v, ok := rangePartJson["range"]; ok && v != nil { - rangeLs := v.([]interface{}) - if len(rangeLs) != 1 || rangeLs[0] == nil { - return nil, errors.New("Non-empty range must be given for range partitioning") - } - - rangeJson := rangeLs[0].(map[string]interface{}) - rp.Range = &bigquery.RangePartitioningRange{ - Start: int64(rangeJson["start"].(int)), - End: int64(rangeJson["end"].(int)), - Interval: int64(rangeJson["interval"].(int)), - ForceSendFields: []string{"Start"}, - } - } - - return rp, nil -} - -func flattenEncryptionConfiguration(ec *bigquery.EncryptionConfiguration) []map[string]interface{} { - re := regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) - paths := re.FindStringSubmatch(ec.KmsKeyName) - - if len(ec.KmsKeyName) == 0 { - return nil - } - - if len(paths) > 0 { - return []map[string]interface{}{ - { - "kms_key_name": paths[1], - "kms_key_version": ec.KmsKeyName, - }, - } - } - - // The key name was returned, no need to set the version - return []map[string]interface{}{{"{{"}}"kms_key_name": ec.KmsKeyName, "kms_key_version": ""{{"}}"}} -} - -func flattenTimePartitioning(tp *bigquery.TimePartitioning, use_old_rpf bool) []map[string]interface{} { - result := map[string]interface{}{"type": tp.Type} - - if tp.Field != "" { - result["field"] = tp.Field - } - - if tp.ExpirationMs != 0 { - result["expiration_ms"] = tp.ExpirationMs - } - - if tp.RequirePartitionFilter && use_old_rpf { - result["require_partition_filter"] = tp.RequirePartitionFilter - } - - return []map[string]interface{}{result} -} - -func flattenRangePartitioning(rp *bigquery.RangePartitioning) []map[string]interface{} { - result := map[string]interface{}{ - "field": rp.Field, - "range": []map[string]interface{}{ - { - "start": rp.Range.Start, - "end": rp.Range.End, - "interval": rp.Range.Interval, - }, - }, - } - - return []map[string]interface{}{result} -} - -func expandView(configured interface{}) *bigquery.ViewDefinition { - raw := configured.([]interface{})[0].(map[string]interface{}) - vd := &bigquery.ViewDefinition{Query: raw["query"].(string)} - - if v, ok := raw["use_legacy_sql"]; ok { - vd.UseLegacySql = v.(bool) - vd.ForceSendFields = append(vd.ForceSendFields, "UseLegacySql") - } - - return vd -} - -func flattenView(vd *bigquery.ViewDefinition) []map[string]interface{} { - result := map[string]interface{}{"query": vd.Query} - result["use_legacy_sql"] = vd.UseLegacySql - - return []map[string]interface{}{result} -} - -func expandMaterializedView(configured interface{}) *bigquery.MaterializedViewDefinition { - raw := configured.([]interface{})[0].(map[string]interface{}) - mvd := &bigquery.MaterializedViewDefinition{Query: raw["query"].(string)} - - if v, ok := raw["enable_refresh"]; ok { - mvd.EnableRefresh = v.(bool) - mvd.ForceSendFields = append(mvd.ForceSendFields, "EnableRefresh") - } - - if v, ok := raw["refresh_interval_ms"]; ok { - mvd.RefreshIntervalMs = int64(v.(int)) - mvd.ForceSendFields = append(mvd.ForceSendFields, "RefreshIntervalMs") - } - - if v, ok := raw["allow_non_incremental_definition"]; ok { - mvd.AllowNonIncrementalDefinition = v.(bool) - mvd.ForceSendFields = append(mvd.ForceSendFields, "AllowNonIncrementalDefinition") - } - - return mvd -} - -func flattenMaterializedView(mvd *bigquery.MaterializedViewDefinition) []map[string]interface{} { - result := map[string]interface{}{"query": mvd.Query} - result["enable_refresh"] = mvd.EnableRefresh - result["refresh_interval_ms"] = mvd.RefreshIntervalMs - result["allow_non_incremental_definition"] = mvd.AllowNonIncrementalDefinition - - return []map[string]interface{}{result} -} - -func expandPrimaryKey(configured interface{}) *bigquery.TableConstraintsPrimaryKey { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - pk := &bigquery.TableConstraintsPrimaryKey{} - - columns := []string{} - for _, rawColumn := range raw["columns"].([]interface{}) { - if rawColumn == nil { - // Terraform reads "" as nil, which ends up crashing when we cast below - // sending "" to the API triggers a 400, which is okay. - rawColumn = "" - } - columns = append(columns, rawColumn.(string)) - } - if len(columns) > 0 { - pk.Columns = columns - } - - return pk -} - -func flattenPrimaryKey(edc *bigquery.TableConstraintsPrimaryKey) []map[string]interface{} { - result := map[string]interface{}{} - - if edc.Columns != nil { - result["columns"] = edc.Columns - } - - return []map[string]interface{}{result} -} - -func expandReferencedTable(configured interface{}) *bigquery.TableConstraintsForeignKeysReferencedTable { - raw := configured.([]interface{})[0].(map[string]interface{}) - rt := &bigquery.TableConstraintsForeignKeysReferencedTable{} - - if v, ok := raw["project_id"]; ok { - rt.ProjectId = v.(string) - } - if v, ok := raw["dataset_id"]; ok { - rt.DatasetId = v.(string) - } - if v, ok := raw["table_id"]; ok { - rt.TableId = v.(string) - } - - return rt -} - -func flattenReferencedTable(edc *bigquery.TableConstraintsForeignKeysReferencedTable) []map[string]interface{} { - result := map[string]interface{}{} - - result["project_id"] = edc.ProjectId - result["dataset_id"] = edc.DatasetId - result["table_id"] = edc.TableId - - return []map[string]interface{}{result} -} - -func expandColumnReference(configured interface{}) *bigquery.TableConstraintsForeignKeysColumnReferences { - raw := configured.(map[string]interface{}) - - cr := &bigquery.TableConstraintsForeignKeysColumnReferences{} - - if v, ok := raw["referencing_column"]; ok { - cr.ReferencingColumn = v.(string) - } - if v, ok := raw["referenced_column"]; ok { - cr.ReferencedColumn = v.(string) - } - - return cr -} - -func flattenColumnReferences(edc []*bigquery.TableConstraintsForeignKeysColumnReferences) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, cr := range edc { - result := map[string]interface{}{} - result["referenced_column"] = cr.ReferencedColumn - result["referencing_column"] = cr.ReferencingColumn - results = append(results, result) - } - - return results -} - -func expandForeignKey(configured interface{}) *bigquery.TableConstraintsForeignKeys { - raw := configured.(map[string]interface{}) - - fk := &bigquery.TableConstraintsForeignKeys{} - if v, ok := raw["name"]; ok { - fk.Name = v.(string) - } - if v, ok := raw["referenced_table"]; ok { - fk.ReferencedTable = expandReferencedTable(v) - } - crs := []*bigquery.TableConstraintsForeignKeysColumnReferences{} - if v, ok := raw["column_references"]; ok { - for _, rawColumnReferences := range v.([]interface{}) { - crs = append(crs, expandColumnReference(rawColumnReferences)) - } - } - - if len(crs) > 0 { - fk.ColumnReferences = crs - } - - return fk -} - -func flattenForeignKeys(edc []*bigquery.TableConstraintsForeignKeys) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, fr := range edc { - result := map[string]interface{}{} - result["name"] = fr.Name - result["column_references"] = flattenColumnReferences(fr.ColumnReferences) - result["referenced_table"] = flattenReferencedTable(fr.ReferencedTable) - results = append(results, result) - } - - return results -} - -func expandTableConstraints(cfg interface{}) (*bigquery.TableConstraints, error) { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - edc := &bigquery.TableConstraints{} - - if v, ok := raw["primary_key"]; ok { - edc.PrimaryKey = expandPrimaryKey(v) - } - - fks := []*bigquery.TableConstraintsForeignKeys{} - - if v, ok := raw["foreign_keys"]; ok { - for _, rawForeignKey := range v.([]interface{}) { - fks = append(fks, expandForeignKey(rawForeignKey)) - } - } - - if len(fks) > 0 { - edc.ForeignKeys = fks - } - - return edc, nil - -} - -func flattenTableConstraints(edc *bigquery.TableConstraints) []map[string]interface{} { - result := map[string]interface{}{} - - if edc.PrimaryKey != nil { - result["primary_key"] = flattenPrimaryKey(edc.PrimaryKey) - } - if edc.ForeignKeys != nil { - result["foreign_keys"] = flattenForeignKeys(edc.ForeignKeys) - } - - return []map[string]interface{}{result} -} - -func expandTableReplicationInfo(cfg interface{}) map[string]interface{} { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - result := map[string]interface{}{} - - if v, ok := raw["source_project_id"]; ok { - result["source_project_id"] = v.(string) - } - - if v, ok := raw["source_dataset_id"]; ok { - result["source_dataset_id"] = v.(string) - } - - if v, ok := raw["source_table_id"]; ok { - result["source_table_id"] = v.(string) - } - - if v, ok := raw["replication_interval_ms"]; ok { - result["replication_interval_ms"] = int64(v.(int)) - } - - return result -} - -func flattenTableReplicationInfo(tableReplicationInfo map[string]interface{}) []map[string]interface{} { - result := map[string]interface{}{} - - if v, ok := tableReplicationInfo["sourceTable"]; ok { - sourceTable := v.(map[string]interface{}) - if v, ok := sourceTable["projectId"]; ok { - result["source_project_id"] = v.(string) - } - if v, ok := sourceTable["datasetId"]; ok { - result["source_dataset_id"] = v.(string) - } - if v, ok := sourceTable["tableId"]; ok { - result["source_table_id"] = v.(string) - } - } - - if v, ok := tableReplicationInfo["replicationIntervalMs"]; ok { - replicationIntervalMs := v.(string) - if i, err := strconv.Atoi(replicationIntervalMs); err == nil { - result["replication_interval_ms"] = int64(i) - } - } - - return []map[string]interface{}{result} -} - -func resourceBigQueryTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Explicitly set virtual fields to default values on import - if err := d.Set("deletion_protection", true); err != nil { - return nil, fmt.Errorf("Error setting deletion_protection: %s", err) - } - {{- if ne $.TargetVersionName "ga" }} - if err := d.Set("allow_resource_tags_on_deletion", false); err != nil { - return nil, fmt.Errorf("Error setting allow_resource_tags_on_deletion: %s", err) - } - {{- end }} - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl deleted file mode 100644 index 20439416efb3..000000000000 --- a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl +++ /dev/null @@ -1,4261 +0,0 @@ -package bigquery_test - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/envvar" -) - -func TestAccBigQueryTable_Basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "DAY"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_DropColumns(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioningDropColumns(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableTimePartitioningDropColumnsUpdate(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Kms(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - kms := acctest.BootstrapKMSKey(t) - cryptoKeyName := kms.CryptoKey.Name - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableKms(cryptoKeyName, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "HOUR"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MonthlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "MONTH"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_YearlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "YEAR"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_HivePartitioning(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableHivePartitioning(bucketName, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_HivePartitioningCustomSchema(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableHivePartitioningCustomSchema(bucketName, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"external_data_configuration.0.schema", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_AvroPartitioning(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - avroFilePath := "./test-fixtures/avro-generated.avro" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableAvroPartitioning(bucketName, avroFilePath, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_json(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableJson(datasetID, tableID, bucketName, "UTF-8"), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"external_data_configuration.0.schema", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableJson(datasetID, tableID, bucketName, "UTF-16BE"), - }, - }, - }) -} - -func TestAccBigQueryTable_RangePartitioning(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableRangePartitioning(datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_PrimaryKey(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTablePrimaryKey(datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_ForeignKey(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_pk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_fk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableForeignKeys(projectID, datasetID, tableID_pk, tableID_fk), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_updateTableConstraints(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_pk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_fk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableForeignKeys(projectID, datasetID, tableID_pk, tableID_fk), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableTableConstraintsUpdate(projectID, datasetID, tableID_pk, tableID_fk), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_View(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithView(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_updateView(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithView(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithNewSqlView(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_WithViewAndSchema(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithViewAndSchema(datasetID, tableID, "table description1"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithViewAndSchema(datasetID, tableID, "table description2"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) - queryNew := strings.ReplaceAll(query, "2019", "2020") - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, queryNew), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Update(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) - - enable_refresh := "false" - refresh_interval_ms := "3600000" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning(datasetID, tableID, materialized_viewID, enable_refresh, refresh_interval_ms, query), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MaterializedView_NonIncremental_basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) - maxStaleness := "0-0 0 10:0:0" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithMatViewNonIncremental_basic(datasetID, tableID, materialized_viewID, query, maxStaleness), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "require_partition_filter", "time_partitioning.0.require_partition_filter"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "require_partition_filter", "time_partitioning.0.require_partition_filter"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_parquet(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquet(datasetID, tableID, bucketName, objectName), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_parquetOptions(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquetOptions(datasetID, tableID, bucketName, objectName, true, true), - }, - { - Config: testAccBigQueryTableFromGCSParquetOptions(datasetID, tableID, bucketName, objectName, false, false), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_iceberg(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSIceberg(datasetID, tableID, bucketName), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_parquetFileSetSpecType(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - parquetFileName := "test.parquet" - manifestName := fmt.Sprintf("tf_test_%s.manifest.json", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquetManifest(datasetID, tableID, bucketName, manifestName, parquetFileName), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_queryAcceleration(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - metadataCacheMode := "AUTOMATIC" - // including an optional field. Should work without specifiying. - // Has to follow google sql IntervalValue encoding - maxStaleness := "0-0 0 10:0:0" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquetWithQueryAcceleration(connectionID, datasetID, tableID, bucketName, objectName, metadataCacheMode, maxStaleness), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_objectTable(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - // including an optional field. Should work without specifiying. - // Has to follow google sql IntervalValue encoding - maxStaleness := "0-0 0 10:0:0" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), - }, - { - Config: testAccBigQueryTableFromGCSObjectTableMetadata(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), - }, - { - Config: testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseNameReference(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "US" - connection_id_reference := "google_bigquery_connection.test.name" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "US" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsCentral1LowerCase(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "us-central1" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsEast1(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "US-EAST1" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_EuropeWest8(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "EUROPE-WEST8" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCS(datasetID, tableID, bucketName, objectName, TEST_CSV, "CSV", "\\\""), - Check: testAccCheckBigQueryExtData(t, "\""), - }, - { - Config: testAccBigQueryTableFromGCS(datasetID, tableID, bucketName, objectName, TEST_CSV, "CSV", ""), - Check: testAccCheckBigQueryExtData(t, ""), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchema_InvalidSchemas(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_NOT_JSON), - ExpectError: regexp.MustCompile("contains an invalid JSON"), - }, - { - Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_NOT_JSON_LIST), - ExpectError: regexp.MustCompile("\"schema\" is not a JSON array"), - }, - { - Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_JSON_LIST_WITH_NULL_ELEMENT), - ExpectError: regexp.MustCompile("\"schema\" contains a nil element"), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchemaAndConnectionID_UpdateNoConnectionID(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateToConnectionID(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId2(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateAllowQuotedNewlines(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchema_UpdatAllowQuotedNewlines(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_bigtable(t *testing.T) { - // bigtable instance does not use the shared HTTP client, this test creates an instance - acctest.SkipIfVcr(t) - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 8), - "project": envvar.GetTestProjectFromEnv(), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromBigtable(context), - }, - { - ResourceName: "google_bigquery_table.table", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_bigtable_options(t *testing.T) { - // bigtable instance does not use the shared HTTP client, this test creates an instance - acctest.SkipIfVcr(t) - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 8), - "project": envvar.GetTestProjectFromEnv(), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromBigtableOptions(context), - }, - { - ResourceName: "google_bigquery_table.table", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromBigtable(context), - }, - }, - }) -} - -func TestAccBigQueryDataTable_sheet(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromSheet(context), - }, - { - ResourceName: "google_bigquery_table.table", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_jsonEquivalency(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_jsonEq(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryTable_jsonEqModeRemoved(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_canReorderParameters(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - // we don't run any checks because the resource will error out if - // it attempts to destroy/tear down. - Config: testAccBigQueryTable_jsonPreventDestroy(datasetID, tableID), - }, - { - Config: testAccBigQueryTable_jsonPreventDestroyOrderChanged(datasetID, tableID), - }, - { - Config: testAccBigQueryTable_jsonEq(datasetID, tableID), - }, - }, - }) -} - -func TestAccBigQueryDataTable_expandArray(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_arrayInitial(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryTable_arrayExpanded(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryTable_allowDestroy(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_noAllowDestroy(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryTable_noAllowDestroy(datasetID, tableID), - Destroy: true, - ExpectError: regexp.MustCompile("deletion_protection"), - }, - { - Config: testAccBigQueryTable_noAllowDestroyUpdated(datasetID, tableID), - }, - }, - }) -} - -func TestAccBigQueryTable_emptySchema(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_mimicCreateFromConsole(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTable_emptySchema(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithoutPolicyTagsToWithPolicyTags(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchema(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToNoPolicyTag(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchema(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToEmptyPolicyTag(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchemaWithEmptyPolicyTags(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToEmptyPolicyTagNames(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchemaWithEmptyPolicyTagNames(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_invalidSchemas(t *testing.T) { - t.Parallel() - // Pending VCR support in https://github.com/hashicorp/terraform-provider-google/issues/15427. - acctest.SkipIfVcr(t) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_NOT_JSON), - ExpectError: regexp.MustCompile("contains an invalid JSON"), - }, - { - Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_NOT_JSON_LIST), - ExpectError: regexp.MustCompile("\"schema\" is not a JSON array"), - }, - { - Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_JSON_LIST_WITH_NULL_ELEMENT), - ExpectError: regexp.MustCompile("\"schema\" contains a nil element"), - }, - }, - }) -} - -func TestAccBigQueryTable_TableReplicationInfo_ConflictsWithView(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithReplicationInfoAndView(datasetID, tableID), - ExpectError: regexp.MustCompile("Schema, view, or materialized view cannot be specified when table replication info is present"), - }, - }, - }) -} - -func TestAccBigQueryTable_TableReplicationInfo_WithoutReplicationInterval(t *testing.T) { - t.Parallel() - - projectID := envvar.GetTestProjectFromEnv() - - sourceDatasetID := fmt.Sprintf("tf_test_source_dataset_%s", acctest.RandString(t, 10)) - sourceTableID := fmt.Sprintf("tf_test_source_table_%s", acctest.RandString(t, 10)) - sourceMVID := fmt.Sprintf("tf_test_source_mv_%s", acctest.RandString(t, 10)) - replicaDatasetID := fmt.Sprintf("tf_test_replica_dataset_%s", acctest.RandString(t, 10)) - replicaMVID := fmt.Sprintf("tf_test_replica_mv_%s", acctest.RandString(t, 10)) - sourceMVJobID := fmt.Sprintf("tf_test_create_source_mv_job_%s", acctest.RandString(t, 10)) - dropMVJobID := fmt.Sprintf("tf_test_drop_source_mv_job_%s", acctest.RandString(t, 10)) - replicationIntervalExpr := "" - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - ExternalProviders: map[string]resource.ExternalProvider{ - "time": {}, - }, - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithReplicationInfo(projectID, sourceDatasetID, sourceTableID, sourceMVID, replicaDatasetID, replicaMVID, sourceMVJobID, dropMVJobID, replicationIntervalExpr), - }, - { - ResourceName: "google_bigquery_table.replica_mv", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_TableReplicationInfo_WithReplicationInterval(t *testing.T) { - t.Parallel() - - projectID := envvar.GetTestProjectFromEnv() - - sourceDatasetID := fmt.Sprintf("tf_test_source_dataset_%s", acctest.RandString(t, 10)) - sourceTableID := fmt.Sprintf("tf_test_source_table_%s", acctest.RandString(t, 10)) - sourceMVID := fmt.Sprintf("tf_test_source_mv_%s", acctest.RandString(t, 10)) - replicaDatasetID := fmt.Sprintf("tf_test_replica_dataset_%s", acctest.RandString(t, 10)) - replicaMVID := fmt.Sprintf("tf_test_replica_mv_%s", acctest.RandString(t, 10)) - sourceMVJobID := fmt.Sprintf("tf_test_create_source_mv_job_%s", acctest.RandString(t, 10)) - dropMVJobID := fmt.Sprintf("tf_test_drop_source_mv_job_%s", acctest.RandString(t, 10)) - replicationIntervalExpr := "replication_interval_ms = 600000" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - ExternalProviders: map[string]resource.ExternalProvider{ - "time": {}, - }, - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithReplicationInfo(projectID, sourceDatasetID, sourceTableID, sourceMVID, replicaDatasetID, replicaMVID, sourceMVJobID, dropMVJobID, replicationIntervalExpr), - }, - { - ResourceName: "google_bigquery_table.replica_mv", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -{{ if ne $.TargetVersionName `ga` -}} -func TestAccBigQueryTable_ResourceTags(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "project_id": envvar.GetTestProjectFromEnv(), - "dataset_id": fmt.Sprintf("tf_test_dataset_%s", acctest.RandString(t, 10)), - "table_id" : fmt.Sprintf("tf_test_table_%s", acctest.RandString(t, 10)), - "tag_key_name1": fmt.Sprintf("tf_test_tag_key1_%s", acctest.RandString(t, 10)), - "tag_value_name1": fmt.Sprintf("tf_test_tag_value1_%s", acctest.RandString(t, 10)), - "tag_key_name2": fmt.Sprintf("tf_test_tag_key2_%s", acctest.RandString(t, 10)), - "tag_value_name2": fmt.Sprintf("tf_test_tag_value2_%s", acctest.RandString(t, 10)), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithResourceTags(context), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, - }, - { - Config: testAccBigQueryTableWithResourceTagsUpdate(context), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, - }, - // testAccBigQueryTableWithResourceTagsDestroy must be called at the end of this test to clear the resource tag bindings of the table before deletion. - { - Config: testAccBigQueryTableWithResourceTagsDestroy(context), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, - }, - }, - }) -} - -{{ end }} -func testAccCheckBigQueryExtData(t *testing.T, expectedQuoteChar string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_bigquery_table" { - continue - } - - config := acctest.GoogleProviderConfig(t) - dataset := rs.Primary.Attributes["dataset_id"] - table := rs.Primary.Attributes["table_id"] - res, err := config.NewBigQueryClient(config.UserAgent).Tables.Get(config.Project, dataset, table).Do() - if err != nil { - return err - } - - if res.Type != "EXTERNAL" { - return fmt.Errorf("Table \"%s.%s\" is of type \"%s\", expected EXTERNAL.", dataset, table, res.Type) - } - edc := res.ExternalDataConfiguration - cvsOpts := edc.CsvOptions - if cvsOpts == nil || *cvsOpts.Quote != expectedQuoteChar { - return fmt.Errorf("Table \"%s.%s\" quote should be '%s' but was '%s'", dataset, table, expectedQuoteChar, *cvsOpts.Quote) - } - } - return nil - } -} - -func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.State) error { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_bigquery_table" { - continue - } - - config := acctest.GoogleProviderConfig(t) - _, err := config.NewBigQueryClient(config.UserAgent).Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["table_id"]).Do() - if err == nil { - return fmt.Errorf("Table still present") - } - } - - return nil - } -} - -func testAccBigQueryTableBasicSchema(datasetID, tableID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" -} - -resource "google_bigquery_table" "test" { - deletion_protection = false - table_id = "%s" - dataset_id = google_bigquery_dataset.test.dataset_id - - schema = < 0 { + ni.AliasIpRanges = commonAliasIpRanges + } op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error removing alias_ip_range: {{"{{"}}err{{"}}"}}", err) @@ -2717,6 +2761,10 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok && v != "" { + disk.Interface = v.(string) + } + keyValue, keyOk := diskConfig["disk_encryption_key_raw"] if keyOk { if keyValue != "" { @@ -2918,6 +2966,10 @@ func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, projec disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok { + disk.Interface = v.(string) + } + if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { if v != "" { disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ @@ -3018,6 +3070,9 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config // originally specified to avoid diffs. "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), } + if _,ok := d.GetOk("boot_disk.0.interface"); ok { + result["interface"] = disk.Interface + } diskDetails, err := getDisk(disk.Source, d, config) if err != nil { @@ -3174,3 +3229,20 @@ func isEmptyServiceAccountBlock(d *schema.ResourceData) bool { } return false } + +// Alias ip ranges cannot be removed and created at the same time. This checks if there are any unchanged alias ip ranges +// to be kept in between the PATCH operations on Network Interface +func CheckForCommonAliasIp(old, new *compute.NetworkInterface) []*compute.AliasIpRange { + newAliasIpMap := make(map[string]bool) + for _, ipRange := range new.AliasIpRanges { + newAliasIpMap[ipRange.IpCidrRange] = true + } + + resultAliasIpRanges := make([]*compute.AliasIpRange, 0) + for _, val := range old.AliasIpRanges { + if newAliasIpMap[val.IpCidrRange] { + resultAliasIpRanges = append(resultAliasIpRanges, val) + } + } + return resultAliasIpRanges +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl index 2ab1d486b783..3f1a669417c7 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl @@ -46,17 +46,6 @@ func computeInstanceFromMachineImageSchema() map[string]*schema.Schema { s[field].Optional = true } - // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. - // Passing field_name = [] in this mode differentiates between an intentionally empty - // block vs an ignored computed block. - nic := s["network_interface"].Elem.(*schema.Resource) - nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr - nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr - - for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { - s[field].ConfigMode = schema.SchemaConfigModeAttr - } - recurseOnSchema(s, func(field *schema.Schema) { // We don't want to accidentally use default values to override the instance // machine image, so remove defaults. diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl index d01ec9aa745a..fbcc2f25170d 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl @@ -120,7 +120,7 @@ func TestAccComputeInstanceFromMachineImageWithOverride_localSsdRecoveryTimeout( var expectedLocalSsdRecoveryTimeout = compute.Duration{} expectedLocalSsdRecoveryTimeout.Nanos = 0 expectedLocalSsdRecoveryTimeout.Seconds = 7200 - + acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), @@ -224,6 +224,69 @@ func TestAccComputeInstanceFromMachineImage_diffProject(t *testing.T) { }) } +func TestAccComputeInstanceFromMachineImage_confidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instance compute.Instance + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnable(fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10))), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar1", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar1", "machine_type", "n2d-standard-2"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar1", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, ""), + ), + }, + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnableSev(fmt.Sprintf("tf-test-sev0-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-sev0-generated-%s", acctest.RandString(t, 10)), "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar2", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar2", "machine_type", "n2d-standard-2"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar2", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), + ), + }, + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigNoEnableSev(fmt.Sprintf("tf-test-sev1-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-sev1-generated-%s", acctest.RandString(t, 10)), "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar3", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar3", "min_cpu_platform", "AMD Milan"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar3", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV"), + ), + }, + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigSevSnp(fmt.Sprintf("tf-test-sev-snp-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-sev-snp-generated-%s", acctest.RandString(t, 10)), "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar4", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar4", "min_cpu_platform", "AMD Milan"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar4", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV_SNP"), + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigTdx(fmt.Sprintf("tf-test-tdx-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-tdx-generated-%s", acctest.RandString(t, 10)), "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar5", &instance), + // Check that fields were set based on the template + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar5", "machine_type", "c3-standard-4"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar5", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "TDX"), + ), + + }, + {{- end }} + }, + }) +} + func testAccCheckComputeInstanceFromMachineImageDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -298,6 +361,298 @@ resource "google_compute_instance_from_machine_image" "foobar" { `, instance, instance, newInstance) } +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnable(instance string, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm1" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + confidential_instance_config { + enable_confidential_compute = true + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar1" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm1.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar1" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar1.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + enable_confidential_compute = true + } + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, instance, newInstance) +} + +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnableSev(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm2" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + confidential_instance_config { + enable_confidential_compute = true + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar2" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm2.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar2" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar2.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + enable_confidential_compute = true + confidential_instance_type = %q + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} + +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigNoEnableSev(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm3" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + min_cpu_platform = "AMD Milan" + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } +} + +resource "google_compute_machine_image" "foobar3" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm3.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar3" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar3.self_link + + labels = { + my_key = "my_value" + } + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} + +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigSevSnp(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm4" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + min_cpu_platform = "AMD Milan" + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar4" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm4.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar4" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar4.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigTdx(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm5" { + provider = google-beta + + boot_disk { + initialize_params { + image = "tdx-guest-images/ubuntu-2204-jammy-v20240701" + } + } + + name = "%s" + machine_type = "c3-standard-4" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar5" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm5.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar5" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar5.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + confidential_instance_type = %q + } + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} +{{- end }} + {{ if ne $.TargetVersionName `ga` -}} func testAccComputeInstanceFromMachineImage_maxRunDuration(instance, newInstance string) string { return fmt.Sprintf(` @@ -501,7 +856,7 @@ resource "google_compute_instance" "vm" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key = "value" } @@ -530,7 +885,7 @@ resource "google_compute_instance_from_machine_image" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl index fae3529c0ea6..6a691b8317a4 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl @@ -50,17 +50,6 @@ func computeInstanceFromTemplateSchema() map[string]*schema.Schema { s[field].Optional = true } - // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. - // Passing field_name = [] in this mode differentiates between an intentionally empty - // block vs an ignored computed block. - nic := s["network_interface"].Elem.(*schema.Resource) - nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr - nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr - - for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { - s[field].ConfigMode = schema.SchemaConfigModeAttr - } - // Remove deprecated/removed fields that are never d.Set. We can't // programmatically remove all of them, because some of them still have d.Set // calls. diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl index 27eb5b8f6030..7b32689d6a2f 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl @@ -377,49 +377,6 @@ func TestAccComputeInstanceFromTemplate_overrideScheduling(t *testing.T) { }) } -func TestAccComputeInstanceFromTemplate_012_removableFields(t *testing.T) { - t.Parallel() - - var instance compute.Instance - instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) - templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) - resourceName := "google_compute_instance_from_template.inst" - - // First config is a basic instance from template, second tests the empty list syntax - config1 := testAccComputeInstanceFromTemplate_012_removableFieldsTpl(templateName) + - testAccComputeInstanceFromTemplate_012_removableFields1(instanceName) - config2 := testAccComputeInstanceFromTemplate_012_removableFieldsTpl(templateName) + - testAccComputeInstanceFromTemplate_012_removableFields2(instanceName) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: config1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists(t, resourceName, &instance), - - resource.TestCheckResourceAttr(resourceName, "service_account.#", "1"), - resource.TestCheckResourceAttr(resourceName, "service_account.0.scopes.#", "3"), - ), - }, - { - Config: config2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists(t, resourceName, &instance), - - // Check that fields were able to be removed - resource.TestCheckResourceAttr(resourceName, "scratch_disk.#", "0"), - resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_interface.0.alias_ip_range.#", "0"), - ), - }, - }, - }) -} - func TestAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(t *testing.T) { var instance compute.Instance instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) @@ -463,6 +420,71 @@ func testAccCheckComputeInstanceFromTemplateDestroyProducer(t *testing.T) func(s } } +func TestAccComputeInstanceFromTemplate_confidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instance2 compute.Instance + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_confidentialInstanceConfigEnable( + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst1", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, true, ""), + ), + }, + { + Config: testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigSevSnp( + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst1", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV_SNP"), + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigTdx( + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst1", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "TDX"), + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "TDX"), + ), + }, + {{- end }} + }, + }) +} + func testAccComputeInstanceFromTemplate_basic(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -878,7 +900,7 @@ resource "google_compute_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 @@ -964,7 +986,7 @@ resource "google_compute_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" } @@ -989,7 +1011,7 @@ resource "google_compute_instance_from_template" "foobar" { automatic_restart = false } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 @@ -1430,8 +1452,7 @@ resource "google_compute_instance_from_template" "inst" { `, templateDisk, template, instance) } -func testAccComputeInstanceFromTemplate_012_removableFieldsTpl(template string) string { - +func testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { family = "debian-11" @@ -1445,7 +1466,6 @@ resource "google_compute_instance_template" "foobar" { disk { source_image = data.google_compute_image.my_image.self_link auto_delete = true - disk_size_gb = 20 boot = true } @@ -1454,75 +1474,242 @@ resource "google_compute_instance_template" "foobar" { } metadata = { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] + startup-script = "#!/bin/bash\necho Hello" } can_ip_forward = true } -`, template) -} -func testAccComputeInstanceFromTemplate_012_removableFields1(instance string) string { - return fmt.Sprintf(` resource "google_compute_instance_from_template" "inst" { name = "%s" zone = "us-central1-a" - allow_stopping_for_update = true - source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + metadata = { + startup-script = "" + } } -`, instance) +`, template, instance) } -func testAccComputeInstanceFromTemplate_012_removableFields2(instance string) string { +func testAccComputeInstanceFromTemplate_confidentialInstanceConfigEnable(templateDisk string, image string, template string, instance string, template2 string, instance2 string, confidentialInstanceType string) string { return fmt.Sprintf(` -resource "google_compute_instance_from_template" "inst" { +data "google_compute_image" "my_image1" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_disk" "foobar1" { + name = "%s" + image = data.google_compute_image.my_image1.self_link + size = 10 + type = "pd-standard" + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar1" { + name = "%s" + source_disk = google_compute_disk.foobar1.self_link +} + +resource "google_compute_instance_template" "foobar1" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = true + confidential_instance_type = %q + } +} + +resource "google_compute_instance_from_template" "inst1" { name = "%s" zone = "us-central1-a" - allow_stopping_for_update = true + source_instance_template = google_compute_instance_template.foobar1.self_link +} - source_instance_template = google_compute_instance_template.foobar.self_link +resource "google_compute_instance_template" "foobar2" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = true + } +} + +resource "google_compute_instance_from_template" "inst2" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar2.self_link +} +`, templateDisk, image, template, confidentialInstanceType, instance, template2, instance2) +} + +func testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigSevSnp(templateDisk string, image string, template string, instance string, template2 string, instance2 string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image1" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_disk" "foobar1" { + name = "%s" + image = data.google_compute_image.my_image1.self_link + size = 10 + type = "pd-standard" + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar1" { + name = "%s" + source_disk = google_compute_disk.foobar1.self_link +} + +resource "google_compute_instance_template" "foobar3" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true + } - // Overrides network_interface { - alias_ip_range = [] + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } +} + +resource "google_compute_instance_from_template" "inst1" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar3.self_link +} + +resource "google_compute_instance_template" "foobar4" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true } - service_account = [] + network_interface { + network = "default" + } - scratch_disk = [] + metadata = { + foo = "bar" + } - attached_disk = [] + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } - timeouts { - create = "10m" - update = "10m" + confidential_instance_config { + confidential_instance_type = %q } } -`, instance) + +resource "google_compute_instance_from_template" "inst2" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar4.self_link +} +`, templateDisk, image, template, confidentialInstanceType, instance, template2, confidentialInstanceType, instance2) } -func testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instance, template string) string { +func testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigTdx(templateDisk string, image string, template string, instance string, template2 string, instance2 string, confidentialInstanceType string) string { return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-11" - project = "debian-cloud" +data "google_compute_image" "my_image2" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" } -resource "google_compute_instance_template" "foobar" { +resource "google_compute_disk" "foobar2" { + name = "%s" + image = data.google_compute_image.my_image2.self_link + size = 10 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar2" { + name = "%s" + source_disk = google_compute_disk.foobar2.self_link +} + +resource "google_compute_instance_template" "foobar5" { name = "%s" - machine_type = "e2-medium" + machine_type = "c3-standard-4" disk { - source_image = data.google_compute_image.my_image.self_link + source_image = google_compute_image.foobar2.name auto_delete = true boot = true + disk_type = "pd-balanced" + type = "PERSISTENT" } network_interface { @@ -1530,22 +1717,142 @@ resource "google_compute_instance_template" "foobar" { } metadata = { - startup-script = "#!/bin/bash\necho Hello" + foo = "bar" } - can_ip_forward = true + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } } -resource "google_compute_instance_from_template" "inst" { +resource "google_compute_instance_from_template" "inst1" { name = "%s" zone = "us-central1-a" - source_instance_template = google_compute_instance_template.foobar.self_link + source_instance_template = google_compute_instance_template.foobar5.self_link +} + +resource "google_compute_instance_template" "foobar6" { + name = "%s" + machine_type = "c3-standard-4" + + disk { + source_image = google_compute_image.foobar2.name + auto_delete = true + boot = true + disk_type = "pd-balanced" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } - // Overrides metadata = { - startup-script = "" + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + confidential_instance_type = %q } } -`, template, instance) + +resource "google_compute_instance_from_template" "inst2" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar6.self_link +} +`, templateDisk, image, template, confidentialInstanceType, instance, template2, confidentialInstanceType, instance2) +} + +func TestAccComputeInstanceFromTemplateWithOverride_interface(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplateWithOverride_interface(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "boot_disk.0.interface", "SCSI"), + ), + }, + }, + }) +} + +func testAccComputeInstanceFromTemplateWithOverride_interface(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobarboot" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "foobarattach" { + name = "%s" + size = 100 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobarboot.name + auto_delete = false + boot = true + } + + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + attached_disk { + source = google_compute_disk.foobarattach.name + } + // Overrides + boot_disk { + interface = "SCSI" + source = google_compute_disk.foobarboot.name + } +} +`, template, instance, template, instance) } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go index b172d6e00f44..7567a28d28e8 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go @@ -12,7 +12,7 @@ func TestAccComputeInstanceSettings_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), + "random_suffix": acctest.RandString(t, 10), } acctest.VcrTest(t, resource.TestCase{ diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl index 7914f0c9dafe..bb3b7d8521c2 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl @@ -95,14 +95,14 @@ func ResourceComputeInstanceTemplate() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, + Description: `Creates a unique name beginning with the specified prefix. Conflicts with name. Max length is 54 characters. Prefixes with lengths longer than 37 characters will use a shortened UUID that will be more prone to collisions.`, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. + // shortened uuid is 9 characters, limit the prefix to 55. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -775,7 +775,7 @@ be from 0 to 999,999,999 inclusive.`, recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour.`, - + Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "seconds": { @@ -910,9 +910,10 @@ be from 0 to 999,999,999 inclusive.`, Optional: true, ForceNew: true, Description: ` - Specifies which confidential computing technology to use. - This could be one of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required. TDX is only available in beta.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, }, @@ -1002,7 +1003,7 @@ be from 0 to 999,999,999 inclusive.`, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A set of key/value label pairs to assign to instances created from this template. - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1432,7 +1433,12 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + itName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + itName = id.PrefixedUniqueId(prefix) + } } else { itName = id.UniqueId() } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index ac553ff83357..da3487257612 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -804,6 +804,15 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceTemplateConfidentialInstanceConfigEnableTdx(acctest.RandString(t, 10), "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar5", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, false, "TDX"), + ), + }, + {{- end }} }, }) } @@ -865,6 +874,45 @@ func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_withNamePrefix(t *testing.T) { + t.Parallel() + + // 8 + 46 = 54 which is the valid max + normalPrefix := "tf-test-" + fmt.Sprintf("%046s", "") + reducedSuffixPrefix := "tf-test-" + fmt.Sprintf("%029s", "") + invalidPrefix := "tf-test-" + fmt.Sprintf("%047s", "") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_withNamePrefix(normalPrefix), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + { + Config: testAccComputeInstanceTemplate_withNamePrefix(invalidPrefix), + PlanOnly: true, + ExpectError: regexp.MustCompile("cannot be longer than 54 characters"), + }, + { + Config: testAccComputeInstanceTemplate_withNamePrefix(reducedSuffixPrefix), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + func TestAccComputeInstanceTemplate_withScratchDisk(t *testing.T) { t.Parallel() @@ -1043,7 +1091,7 @@ func TestAccComputeInstanceTemplate_managedEnvoy(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, - }, + }, }) } @@ -1621,7 +1669,7 @@ func testAccCheckComputeInstanceTemplateExistsInProject(t *testing.T, n, p strin found, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( p, templateName).View("FULL").Do() {{- end }} - + if err != nil { return err } @@ -2599,6 +2647,28 @@ resource "google_compute_instance_template" "foobar" { `, suffix, suffix) } +func testAccComputeInstanceTemplate_withNamePrefix(prefix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-12" + project = "debian-cloud" +} +resource "google_compute_instance_template" "foobar" { + name_prefix = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.name + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} +`, prefix) +} + func testAccComputeInstanceTemplate_with375GbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3442,6 +3512,41 @@ resource "google_compute_instance_template" "foobar4" { `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplateConfidentialInstanceConfigEnableTdx(suffix string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image3" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" +} + +resource "google_compute_instance_template" "foobar5" { + name = "tf-test-instance5-template-%s" + machine_type = "c3-standard-4" + + disk { + source_image = data.google_compute_image.my_image3.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix, confidentialInstanceType) +} +{{- end }} + func testAccComputeInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3976,7 +4081,7 @@ resource "google_compute_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 7308ff48735d..1645c003fed7 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" @@ -101,6 +102,60 @@ func TestMinCpuPlatformDiffSuppress(t *testing.T) { } } +func TestCheckForCommonAliasIp(t *testing.T) { + type testCase struct { + old, new []*compute.AliasIpRange + expected []*compute.AliasIpRange + } + + testCases := []testCase{ + { + old: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + {IpCidrRange: "10.0.1.0/24"}, + }, + new: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + {IpCidrRange: "10.0.2.0/24"}, + }, + expected: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + }, + }, + { + old: []*compute.AliasIpRange{ + {IpCidrRange: "172.16.0.0/24"}, + {IpCidrRange: "10.0.1.0/24"}, + }, + new: []*compute.AliasIpRange{ + {IpCidrRange: "172.16.0.0/24"}, + {IpCidrRange: "10.0.2.0/24"}, + }, + expected: []*compute.AliasIpRange{ + {IpCidrRange: "172.16.0.0/24"}, + }, + }, + { + old: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + {IpCidrRange: "10.0.1.0/24"}, + }, + new: []*compute.AliasIpRange{ + {IpCidrRange: "192.168.0.0/24"}, + {IpCidrRange: "172.17.0.0/24"}, + }, + expected: []*compute.AliasIpRange{}, + }, + } + + for _, tc := range testCases { + oldInterface := &compute.NetworkInterface{AliasIpRanges: tc.old} + newInterface := &compute.NetworkInterface{AliasIpRanges: tc.new} + result := tpgcompute.CheckForCommonAliasIp(oldInterface, newInterface) + assert.Equal(t, tc.expected, result) + } +} + func computeInstanceImportStep(zone, instanceName string, additionalImportIgnores []string) resource.TestStep { // metadata is only read into state if set in the config // importing doesn't know whether metadata.startup_script vs metadata_startup_script is set in the config, @@ -1820,7 +1875,7 @@ func TestAccComputeInstance_secondaryAliasIpRange(t *testing.T) { testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-secondary", "172.16.0.0/24"), ), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), { Config: testAccComputeInstance_secondaryAliasIpRangeUpdate(networkName, subnetName, instanceName), Check: resource.ComposeTestCheckFunc( @@ -1828,7 +1883,51 @@ func TestAccComputeInstance_secondaryAliasIpRange(t *testing.T) { testAccCheckComputeInstanceHasAliasIpRange(&instance, "", "10.0.1.0/24"), ), }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), + }, + }) +} + +func TestAccComputeInstance_aliasIpRangeCommonAddresses(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_secondaryAliasIpRangeTwoAliasIps(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.1.0/24"), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.2.0/24"), + ), + }, computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddress(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.1.0/24"), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.3.0/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddressDifferentRanges(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-secondary", "172.16.1.0/24"), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.3.0/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), }, }) } @@ -1927,11 +2026,22 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), ), }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceConfidentialInstanceConfigEnableTdx(instanceName, "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar5", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "TDX"), + ), + }, + {{- end }} }, }) } func TestAccComputeInstance_confidentialHyperDiskBootDisk(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() kms := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-bootstrap-hyperdisk-key1") @@ -2563,15 +2673,72 @@ func TestAccComputeInstance_subnetworkUpdate(t *testing.T) { { Config: testAccComputeInstance_subnetworkUpdate(suffix, instanceName), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update", "network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), { Config: testAccComputeInstance_subnetworkUpdateTwo(suffix, instanceName), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update", "network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), { Config: testAccComputeInstance_subnetworkUpdate(suffix, instanceName), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update", "network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), + }, + }) +} + +func TestAccComputeInstance_subnetworkProjectMustMatchError(t *testing.T) { + t.Parallel() + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnetworkProjectExpectError(suffix, instanceName), + ExpectError: regexp.MustCompile("must match subnetwork_project"), + }, + }, + }) +} + +func TestAccComputeInstance_networkIpUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkIpUpdate(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, "10.3.0.3"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_networkIpUpdateByHand(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, "10.3.0.4"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_networkIpUpdateWithComputeAddress(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, "10.3.0.5"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), }, }) } @@ -3964,7 +4131,7 @@ func testAccCheckComputeInstanceScratchDisk(instance *compute.Instance, interfac i, deviceName, disk.DeviceName) } } - + i++ } } @@ -7177,6 +7344,162 @@ resource "google_compute_instance" "foobar" { `, network, subnet, instance) } +func testAccComputeInstance_secondaryAliasIpRangeTwoAliasIps(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.1.0/24" + } + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.2.0/24" + } + } +} +`, network, subnet, instance) +} + +func testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddress(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.1.0/24" + } + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.3.0/24" + } + } +} +`, network, subnet, instance) +} + +func testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddressDifferentRanges(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + subnetwork_range_name = "inst-test-secondary" + ip_cidr_range = "172.16.1.0/24" + } + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.3.0/24" + } + } +} +`, network, subnet, instance) +} + func testAccComputeInstance_hostname(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -7839,6 +8162,42 @@ resource "google_compute_instance" "foobar6" { `, instance, minCpuPlatform, confidentialInstanceType, instance, minCpuPlatform, confidentialInstanceType) } +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceConfidentialInstanceConfigEnableTdx(instance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image3" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" +} + +resource "google_compute_instance" "foobar5" { + name = "%s" + machine_type = "c3-standard-4" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image3.self_link + } + } + + network_interface { + network = "default" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, instance, confidentialInstanceType) +} +{{- end }} + func testAccComputeInstance_attributionLabelCreate(instance, add, strategy string) string { return fmt.Sprintf(` provider "google" { @@ -8352,6 +8711,183 @@ func testAccComputeInstance_subnetworkUpdateTwo(suffix, instance string) string `, suffix, suffix, suffix, suffix, instance) } +func testAccComputeInstance_subnetworkProjectExpectError(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + subnetwork_project = "placeholder" + } + } +`, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIpUpdate(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_address" "inst-test-address" { + name = "tf-test-compute-address-%s" + region = "us-east1" + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + address_type = "INTERNAL" + address = "10.3.0.5" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + network_ip = "10.3.0.3" + } + } +`, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIpUpdateByHand(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_address" "inst-test-address" { + name = "tf-test-compute-address-%s" + region = "us-east1" + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + address_type = "INTERNAL" + address = "10.3.0.5" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + network_ip = "10.3.0.4" + } + } +`, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIpUpdateWithComputeAddress(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_address" "inst-test-address" { + name = "tf-test-compute-address-%s" + region = "us-east1" + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + address_type = "INTERNAL" + address = "10.3.0.5" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + network_ip = google_compute_address.inst-test-address.address + } + } +`, suffix, suffix, suffix, instance) +} + func testAccComputeInstance_queueCountSet(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -8640,7 +9176,7 @@ resource "google_compute_instance" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 @@ -8994,7 +9530,7 @@ resource "google_compute_subnetwork" "subnet2" { stack_type = "IPV4_ONLY" network = google_compute_network.net2.id } - + resource "google_compute_subnetwork" "subnet-ipv62" { region = "europe-west1" name = "tf-test-subnet-ip62-%s" @@ -9009,7 +9545,7 @@ resource "google_compute_address" "normal-address2" { region = "europe-west1" name = "tf-test-addr-normal2-%s" } - + resource "google_compute_address" "ipv6-address2" { region = "europe-west1" name = "tf-test-addr-ipv62-%s" @@ -9166,7 +9702,7 @@ resource "google_compute_subnetwork" "subnet2" { stack_type = "IPV4_ONLY" network = google_compute_network.net2.id } - + resource "google_compute_subnetwork" "subnet-ipv62" { region = "europe-west1" name = "tf-test-subnet-ip62-%s" @@ -9181,7 +9717,7 @@ resource "google_compute_address" "normal-address2" { region = "europe-west1" name = "tf-test-addr-normal2-%s" } - + resource "google_compute_address" "ipv6-address2" { region = "europe-west1" name = "tf-test-addr-ipv62-%s" @@ -9722,6 +10258,8 @@ resource "google_compute_instance" "foobar" { } func TestAccComputeInstance_bootDisk_storagePoolSpecified(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() instanceName := fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) @@ -9817,3 +10355,79 @@ resource "google_compute_instance" "foobar" { } `, instanceName, zone, storagePoolUrl) } + +func TestAccComputeInstance_bootAndAttachedDisk_interface(t *testing.T) { + t.Parallel() + + instanceName1 := fmt.Sprintf("tf-test-vm1-%s", acctest.RandString(t, 10)) + diskName1 := fmt.Sprintf("tf-test-disk1-%s", acctest.RandString(t, 10)) + instanceName2 := fmt.Sprintf("tf-test-vm2-%s", acctest.RandString(t, 10)) + diskName2 := fmt.Sprintf("tf-test-disk2-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootAndAttachedDisk_interface(instanceName1, diskName1, envvar.GetTestZoneFromEnv(), "h3-standard-88", "NVME", false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.interface", "NVME"), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "machine_type", "h3-standard-88"), + ), + }, + //computeInstanceImportStep("us-central1-a", instanceName1, []string{"desired_status","allow_stopping_for_update"}), + { + Config: testAccComputeInstance_bootAndAttachedDisk_interface(instanceName2, diskName2, envvar.GetTestZoneFromEnv(), "n2-standard-8", "SCSI", true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.interface", "SCSI"), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "machine_type", "n2-standard-8"), + ), + }, + //computeInstanceImportStep("us-central1-a", instanceName2, []string{"desired_status","allow_stopping_for_update"}), + }, + }) +} + +func testAccComputeInstance_bootAndAttachedDisk_interface(instanceName, diskName, zone, machineType, bootDiskInterface string, allowStoppingForUpdate bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2204-lts" + project = "ubuntu-os-cloud" +} + +data "google_project" "project" {} + +resource "google_compute_disk" "foorbarattach" { + name = "%s" + size = 100 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type= "%s" + zone = "%s" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + type = "pd-balanced" + size = 500 + } + interface = "%s" + + } + + attached_disk { + source = google_compute_disk.foorbarattach.self_link + } + + network_interface { + network = "default" + } + allow_stopping_for_update = %t + desired_status = "RUNNING" + +} +`, diskName, instanceName, machineType, zone, bootDiskInterface, allowStoppingForUpdate) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go index 30c7e2c11691..cfaaebaa5840 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go @@ -66,6 +66,8 @@ func TestAccComputeNetworkFirewallPolicyRule_update(t *testing.T) { } func TestAccComputeNetworkFirewallPolicyRule_multipleRules(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl index ddd4243dd38c..5a2eed79d6b4 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl @@ -262,22 +262,22 @@ func TestAccComputeRegionBackendService_withBackendAndIAP(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - { - Config: testAccComputeRegionBackendService_ilbBasicwithIAP(backendName, checkName), + { + Config: testAccComputeRegionBackendService_ilbBasic(backendName, checkName), }, { ResourceName: "google_compute_region_backend_service.foobar", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, }, { - Config: testAccComputeRegionBackendService_ilbBasic(backendName, checkName), + Config: testAccComputeRegionBackendService_ilbBasicwithIAP(backendName, checkName), }, { ResourceName: "google_compute_region_backend_service.foobar", ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, }, }, }) @@ -589,7 +589,8 @@ resource "google_compute_region_backend_service" "foobar" { health_checks = [google_compute_health_check.zero.self_link] region = "us-central1" - protocol = "%s" + protocol = "%s" + connection_draining_timeout_sec = 0 failover_policy { # Disable connection drain on failover cannot be set when the protocol is UDP drop_traffic_if_unhealthy = "%s" @@ -615,7 +616,8 @@ resource "google_compute_region_backend_service" "foobar" { health_checks = [google_compute_health_check.zero.self_link] region = "us-central1" - protocol = "%s" + protocol = "%s" + connection_draining_timeout_sec = 0 failover_policy { # Disable connection drain on failover cannot be set when the protocol is UDP drop_traffic_if_unhealthy = "%s" @@ -703,6 +705,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group + balancing_mode = "CONNECTION" {{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} failover = true } @@ -772,6 +775,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group + balancing_mode = "CONNECTION" {{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} failover = true } @@ -874,6 +878,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group capacity_scaler = 1.0 + balancing_mode = "CONNECTION" } health_checks = [google_compute_health_check.default.self_link] @@ -1042,6 +1047,7 @@ resource "google_compute_region_backend_service" "foobar" { } iap { + enabled = true oauth2_client_id = "test" oauth2_client_secret = "test" } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl index 3439bb4abb7a..1c99f210953e 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl @@ -76,11 +76,11 @@ func ResourceComputeRegionInstanceTemplate() *schema.Resource { Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -858,6 +858,7 @@ be from 0 to 999,999,999 inclusive.`, Description: `Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, + {{- if eq $.TargetVersionName "ga" }} "confidential_instance_type": { Type: schema.TypeString, Optional: true, @@ -868,6 +869,19 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, + {{- else }} + "confidential_instance_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: ` + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + {{- end }} }, }, }, @@ -955,7 +969,7 @@ be from 0 to 999,999,999 inclusive.`, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A set of key/value label pairs to assign to instances created from this template, - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1129,7 +1143,12 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + itName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + itName = id.PrefixedUniqueId(prefix) + } } else { itName = id.UniqueId() } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl index d22cdd5968b0..06c9ada78409 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -714,6 +714,15 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeRegionInstanceTemplateConfidentialInstanceConfigEnableTdx(acctest.RandString(t, 10), "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar5", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, false, "TDX"), + ), + }, + {{- end }} }, }) } @@ -2973,6 +2982,42 @@ resource "google_compute_region_instance_template" "foobar4" { `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionInstanceTemplateConfidentialInstanceConfigEnableTdx(suffix string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image3" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" +} + +resource "google_compute_region_instance_template" "foobar5" { + name = "tf-test-instance-template-%s" + machine_type = "c3-standard-4" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image3.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix, confidentialInstanceType) +} +{{- end }} + func testAccComputeRegionInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3519,7 +3564,7 @@ resource "google_compute_region_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl new file mode 100644 index 000000000000..29631d2844ed --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl @@ -0,0 +1,992 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAccComputeRegionTargetHttpsProxy_update(t *testing.T) { + t.Parallel() + + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_basic1(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_basic2(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_basic3(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_basic1(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar1.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar1.self_link] +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id) +} + +func testAccComputeRegionTargetHttpsProxy_basic2(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar2.self_link + ssl_certificates = [ + google_compute_region_ssl_certificate.foobar1.self_link, + google_compute_region_ssl_certificate.foobar2.self_link, + ] +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id) +} + +func testAccComputeRegionTargetHttpsProxy_basic3(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar2.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar2.self_link] + ssl_policy = google_compute_region_ssl_policy.foobar.self_link +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_policy" "foobar" { + name = "sslproxy-test-%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" + region = "us-central1" +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id, id) +} + +func TestAccComputeRegionTargetHttpsProxy_addSslPolicy_withForwardingRule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "resource_suffix": acctest.RandString(t, 10), + "project_id": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] + ssl_policy = google_compute_region_ssl_policy.default.id +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_policy" "default" { + project = "%{project_id}" + region = "us-central1" + name = "ssl-policy-%{resource_suffix}" + + profile = "RESTRICTED" + min_tls_version = "TLS_1_2" +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccComputeRegionTargetHttpsProxy_addServerTlsPolicy_withForwardingRule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "resource_suffix": acctest.RandString(t, 10), + "project_id": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withoutServerTlsPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withServerTlsPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withoutServerTlsPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withoutServerTlsPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] +} + +resource "google_compute_region_url_map" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + provider = google-beta + + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + provider = google-beta + + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + provider = google-beta + + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withServerTlsPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` + +data "google_project" "project" { + provider = google-beta + project_id = "%{project_id}" +} + +resource "google_compute_forwarding_rule" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] + server_tls_policy = google_network_security_server_tls_policy.default.id +} + +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + + project = "%{project_id}" + location = "us-central1" + name = "trust-config-%{resource_suffix}" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } +} + +resource "google_network_security_server_tls_policy" "default" { + provider = google-beta + + project = "%{project_id}" + location = "us-central1" + name = "tls-policy-%{resource_suffix}" + allow_open = "false" + mtls_policy { + client_validation_mode = "REJECT_INVALID" + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/us-central1/trustConfigs/${google_certificate_manager_trust_config.default.name}" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_url_map" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + provider = google-beta + + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + provider = google-beta + + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + provider = google-beta + + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl index 220c69f01509..2f298fdbc6ec 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl @@ -188,21 +188,85 @@ func TestAccComputeSubnetwork_secondaryIpRanges(t *testing.T) { ), }, { - Config: testAccComputeSubnetwork_secondaryIpRanges_update4(cnName, subnetworkName), + Config: testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), - testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), ), }, + }, + }) +} + +func TestAccComputeSubnetwork_secondaryIpRanges_sendEmpty(t *testing.T) { + t.Parallel() + + var subnetwork compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + // Start without secondary_ip_range at all { - Config: testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName), + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + ), + }, + // Add one secondary_ip_range + { + Config: testAccComputeSubnetwork_sendEmpty_single(cnName, subnetworkName, "true"), Check: resource.ComposeTestCheckFunc( testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + // Remove it with send_secondary_ip_range_if_empty = true + { + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + // Apply two secondary_ip_range + { + Config: testAccComputeSubnetwork_sendEmpty_double(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), + ), + }, + // Remove both with send_secondary_ip_range_if_empty = true + { + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), ), }, + // Apply one secondary_ip_range + { + Config: testAccComputeSubnetwork_sendEmpty_single(cnName, subnetworkName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + // Check removing without send_secondary_ip_range_if_empty produces no diff (normal computed behavior) + { + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "false"), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, }, }) } @@ -604,7 +668,7 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" `, cnName, subnetworkName) } -func testAccComputeSubnetwork_secondaryIpRanges_update4(cnName, subnetworkName string) string { +func testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, sendEmpty string) string { return fmt.Sprintf(` resource "google_compute_network" "custom-test" { name = "%s" @@ -616,9 +680,59 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" ip_cidr_range = "10.2.0.0/16" region = "us-central1" network = google_compute_network.custom-test.self_link - secondary_ip_range = [] + send_secondary_ip_range_if_empty = "%s" } -`, cnName, subnetworkName) +`, cnName, subnetworkName, sendEmpty) +} + +func testAccComputeSubnetwork_sendEmpty_single(cnName, subnetworkName, sendEmpty string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update2" + ip_cidr_range = "192.168.11.0/24" + } + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + send_secondary_ip_range_if_empty = "%s" +} +`, cnName, subnetworkName, sendEmpty) +} + +func testAccComputeSubnetwork_sendEmpty_double(cnName, subnetworkName, sendEmpty string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update2" + ip_cidr_range = "192.168.11.0/24" + } + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + send_secondary_ip_range_if_empty = "%s" +} +`, cnName, subnetworkName, sendEmpty) } func testAccComputeSubnetwork_flowLogs(cnName, subnetworkName string) string { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl index 4d037cfe92f9..4a32212c9ff3 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl @@ -17,8 +17,10 @@ import ( ) const ( - canonicalSslCertificateTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s" - canonicalCertificateMapTemplate = "//certificatemanager.googleapis.com/projects/%s/locations/global/certificateMaps/%s" + canonicalSslCertificateTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s" + canonicalSslPolicyTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslPolicies/%s" + canonicalCertificateMapTemplate = "//certificatemanager.googleapis.com/projects/%s/locations/global/certificateMaps/%s" + canonicalServerTlsPolicyTemplate = "//networksecurity.googleapis.com/projects/%s/locations/global/serverTlsPolicies/%s" ) func TestAccComputeTargetHttpsProxy_update(t *testing.T) { @@ -39,9 +41,10 @@ func TestAccComputeTargetHttpsProxy_update(t *testing.T) { t, "google_compute_target_https_proxy.foobar", &proxy), testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert1-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasServerTlsPolicy(t, "tf-test-server-tls-policy-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasSslPolicy(t, "tf-test-httpsproxy-sslpolicy-"+resourceSuffix, &proxy), ), }, - { Config: testAccComputeTargetHttpsProxy_basic2(resourceSuffix), Check: resource.ComposeTestCheckFunc( @@ -50,6 +53,8 @@ func TestAccComputeTargetHttpsProxy_update(t *testing.T) { testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert1-"+resourceSuffix, &proxy), testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert2-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasSslPolicy(t, "tf-test-httpsproxy-sslpolicy2-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t, &proxy), ), }, }, @@ -80,6 +85,45 @@ func TestAccComputeTargetHttpsProxy_certificateMap(t *testing.T) { }) } +func TestAccComputeTargetHttpsProxyServerTlsPolicy_update(t *testing.T) { + t.Parallel() + + var proxy compute.TargetHttpsProxy + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetHttpsProxyWithoutServerTlsPolicy(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t, &proxy), + ), + }, + { + Config: testAccComputeTargetHttpsProxyWithServerTlsPolicy(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyHasServerTlsPolicy(t, "tf-test-server-tls-policy-"+resourceSuffix, &proxy), + ), + }, + { + Config: testAccComputeTargetHttpsProxyWithoutServerTlsPolicy(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t, &proxy), + ), + }, + }, + }) +} + func testAccCheckComputeTargetHttpsProxyExists(t *testing.T, n string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -115,6 +159,7 @@ func testAccComputeTargetHttpsProxyDescription(description string, proxy *comput if proxy.Description != description { return fmt.Errorf("Wrong description: expected '%s' got '%s'", description, proxy.Description) } + return nil } } @@ -130,7 +175,43 @@ func testAccComputeTargetHttpsProxyHasSslCertificate(t *testing.T, cert string, } } - return fmt.Errorf("Ssl certificate not found: expected'%s'", certUrl) + return fmt.Errorf("Ssl certificate not found: expected '%s'", certUrl) + } +} + +func testAccComputeTargetHttpsProxyHasSslPolicy(t *testing.T, sslPolicy string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + sslPolicyUrl := fmt.Sprintf(canonicalSslPolicyTemplate, config.Project, sslPolicy) + + if tpgresource.ConvertSelfLinkToV1(proxy.SslPolicy) == sslPolicyUrl { + return nil + } + + return fmt.Errorf("Ssl Policy not found: expected '%s'", sslPolicyUrl) + } +} + +func testAccComputeTargetHttpsProxyHasServerTlsPolicy(t *testing.T, policy string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + serverTlsPolicyUrl := fmt.Sprintf(canonicalServerTlsPolicyTemplate, config.Project, policy) + + if tpgresource.ConvertSelfLinkToV1(proxy.ServerTlsPolicy) == serverTlsPolicyUrl { + return nil + } + + return fmt.Errorf("Server Tls Policy not found: expected '%s'", serverTlsPolicyUrl) + } +} + +func testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t *testing.T, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + if proxy.ServerTlsPolicy != "" { + return fmt.Errorf("Server Tls Policy found: expected 'null'") + } + + return nil } } @@ -143,18 +224,21 @@ func testAccComputeTargetHttpsProxyHasCertificateMap(t *testing.T, certificateMa return nil } - return fmt.Errorf("certificate map not found: expected'%s'", certificateMapUrl) + return fmt.Errorf("certificate map not found: expected '%s'", certificateMapUrl) } } func testAccComputeTargetHttpsProxy_basic1(id string) string { return fmt.Sprintf(` +data "google_project" "project" {} + resource "google_compute_target_https_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "tf-test-httpsproxy-%s" - url_map = google_compute_url_map.foobar.self_link - ssl_certificates = [google_compute_ssl_certificate.foobar1.self_link] - ssl_policy = google_compute_ssl_policy.foobar.self_link + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar1.self_link] + ssl_policy = google_compute_ssl_policy.foobar.self_link + server_tls_policy = google_network_security_server_tls_policy.server_tls_policy.id } resource "google_compute_backend_service" "foobar" { @@ -192,7 +276,7 @@ resource "google_compute_url_map" "foobar" { } resource "google_compute_ssl_policy" "foobar" { - name = "tf-test-sslproxy-%s" + name = "tf-test-httpsproxy-sslpolicy-%s" description = "my-description" min_tls_version = "TLS_1_2" profile = "MODERN" @@ -211,7 +295,25 @@ resource "google_compute_ssl_certificate" "foobar2" { private_key = file("test-fixtures/test.key") certificate = file("test-fixtures/test.crt") } -`, id, id, id, id, id, id, id) + +resource "google_certificate_manager_trust_config" "trust_config" { + name = "tf-test-trust-config-%s" + location = "global" + + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } +} + +resource "google_network_security_server_tls_policy" "server_tls_policy" { + name = "tf-test-server-tls-policy-%s" + + mtls_policy { + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/global/trustConfigs/${google_certificate_manager_trust_config.trust_config.name}" + client_validation_mode = "ALLOW_INVALID_OR_MISSING_CLIENT_CERT" + } +} +`, id, id, id, id, id, id, id, id, id) } func testAccComputeTargetHttpsProxy_basic2(id string) string { @@ -224,8 +326,10 @@ resource "google_compute_target_https_proxy" "foobar" { google_compute_ssl_certificate.foobar1.self_link, google_compute_ssl_certificate.foobar2.self_link, ] - quic_override = "ENABLE" - tls_early_data = "STRICT" + ssl_policy = google_compute_ssl_policy.foobar2.self_link + quic_override = "ENABLE" + tls_early_data = "STRICT" + server_tls_policy = null } resource "google_compute_backend_service" "foobar" { @@ -262,8 +366,8 @@ resource "google_compute_url_map" "foobar" { } } -resource "google_compute_ssl_policy" "foobar" { - name = "tf-test-sslproxy-%s" +resource "google_compute_ssl_policy" "foobar2" { + name = "tf-test-httpsproxy-sslpolicy2-%s" description = "my-description" min_tls_version = "TLS_1_2" profile = "MODERN" @@ -288,9 +392,9 @@ resource "google_compute_ssl_certificate" "foobar2" { func testAccComputeTargetHttpsProxy_certificateMap(id string) string { return fmt.Sprintf(` resource "google_compute_target_https_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "tf-test-httpsproxy-%s" - url_map = google_compute_url_map.foobar.self_link + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.map.id}" } @@ -339,6 +443,102 @@ resource "google_certificate_manager_dns_authorization" "instance" { name = "tf-test-dnsauthz-%s" domain = "mysite.com" } - `, id, id, id, id, id, id, id, id) } + +func testAccComputeTargetHttpsProxyWithoutServerTlsPolicy(id string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar.self_link] +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link +} + +resource "google_compute_ssl_certificate" "foobar" { + name = "tf-test-httpsproxy-cert-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id) +} + +func testAccComputeTargetHttpsProxyWithServerTlsPolicy(id string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar.self_link] + server_tls_policy = google_network_security_server_tls_policy.server_tls_policy.id +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link +} + +resource "google_compute_ssl_certificate" "foobar" { + name = "tf-test-httpsproxy-cert-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_certificate_manager_trust_config" "trust_config" { + name = "tf-test-trust-config-%s" + location = "global" + + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } +} + +resource "google_network_security_server_tls_policy" "server_tls_policy" { + name = "tf-test-server-tls-policy-%s" + + mtls_policy { + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/global/trustConfigs/${google_certificate_manager_trust_config.trust_config.name}" + client_validation_mode = "ALLOW_INVALID_OR_MISSING_CLIENT_CERT" + } + + lifecycle { + create_before_destroy = true + } +} +`, id, id, id, id, id, id, id) +} diff --git a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl index 08a6419af17c..c5e1425a1f0a 100644 --- a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl @@ -77,6 +77,18 @@ func schemaContainerdConfig() *schema.Schema { } } +// Note: this is a bool internally, but implementing as an enum internally to +// make it easier to accept API level defaults. +func schemaInsecureKubeletReadonlyPortEnabled() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.", + ValidateFunc: validation.StringInSlice([]string{"FALSE","TRUE"}, false), + } +} + func schemaLoggingVariant() *schema.Schema { return &schema.Schema{ Type: schema.TypeString, @@ -138,9 +150,6 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Computed: true, ForceNew: true, - // Legacy config mode allows removing GPU's from an existing resource - // See https://www.terraform.io/docs/configuration/attr-as-blocks.html - ConfigMode: schema.SchemaConfigModeAttr, Description: `List of the type and count of accelerator cards attached to the instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -163,7 +172,6 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Computed: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `Configuration for auto installation of GPU driver.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -188,7 +196,6 @@ func schemaNodeConfig() *schema.Schema { MaxItems: 1, Optional: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `Configuration for GPU sharing.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -601,6 +608,7 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, }, + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "pod_pids_limit": { Type: schema.TypeInt, Optional: true, @@ -772,8 +780,24 @@ func schemaNodeConfig() *schema.Schema { } } +// Separate since this currently only supports a single value -- a subset of +// the overall NodeKubeletConfig +func schemaNodePoolAutoConfigNodeKubeletConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), + }, + }, + } +} + func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { - configs := configured.([]interface{}) + configs := configured.([]interface{}) if len(configs) == 0 || configs[0] == nil { return nil } @@ -781,6 +805,12 @@ func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefau nodeConfigDefaults := &container.NodeConfigDefaults{} nodeConfigDefaults.ContainerdConfig = expandContainerdConfig(config["containerd_config"]) + if v, ok := config["insecure_kubelet_readonly_port_enabled"]; ok { + nodeConfigDefaults.NodeKubeletConfig = &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(v), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + } + } if variant, ok := config["logging_variant"]; ok { nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{ VariantConfig: &container.LoggingVariantConfig{ @@ -789,14 +819,14 @@ func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefau } } {{- if ne $.TargetVersionName "ga" }} - if v, ok := config["gcfs_config"]; ok && len(v.([]interface{})) > 0 { - gcfsConfig := v.([]interface{})[0].(map[string]interface{}) + if v, ok := config["gcfs_config"]; ok && len(v.([]interface{})) > 0 { + gcfsConfig := v.([]interface{})[0].(map[string]interface{}) nodeConfigDefaults.GcfsConfig = &container.GcfsConfig{ Enabled: gcfsConfig["enabled"].(bool), } } {{- end }} - return nodeConfigDefaults + return nodeConfigDefaults } func expandNodeConfig(v interface{}) *container.NodeConfig { @@ -1138,6 +1168,13 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf return wmc } +func expandInsecureKubeletReadonlyPortEnabled(v interface{}) bool { + if v == "TRUE" { + return true + } + return false +} + func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { if v == nil { return nil @@ -1158,6 +1195,10 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) } + if insecureKubeletReadonlyPortEnabled, ok := cfg["insecure_kubelet_readonly_port_enabled"]; ok { + kConfig.InsecureKubeletReadonlyPortEnabled = expandInsecureKubeletReadonlyPortEnabled(insecureKubeletReadonlyPortEnabled) + kConfig.ForceSendFields = append(kConfig.ForceSendFields, "InsecureKubeletReadonlyPortEnabled") + } if podPidsLimit, ok := cfg["pod_pids_limit"]; ok { kConfig.PodPidsLimit = int64(podPidsLimit.(int)) } @@ -1366,6 +1407,8 @@ func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]int result[0]["containerd_config"] = flattenContainerdConfig(c.ContainerdConfig) + result[0]["insecure_kubelet_readonly_port_enabled"] = flattenInsecureKubeletReadonlyPortEnabled(c.NodeKubeletConfig) + result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig) {{ if ne $.TargetVersionName `ga` -}} @@ -1557,6 +1600,14 @@ func flattenSecondaryBootDisks(c []*container.SecondaryBootDisk) []map[string]in return result } +func flattenInsecureKubeletReadonlyPortEnabled(c *container.NodeKubeletConfig) string { + // Convert bool from the API to the enum values used internally + if c != nil && c.InsecureKubeletReadonlyPortEnabled { + return "TRUE" + } + return "FALSE" +} + func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string { variant := "DEFAULT" if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" { @@ -1706,10 +1757,21 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "cpu_cfs_quota": c.CpuCfsQuota, - "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, - "cpu_manager_policy": c.CpuManagerPolicy, - "pod_pids_limit": c.PodPidsLimit, + "cpu_cfs_quota": c.CpuCfsQuota, + "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, + "cpu_manager_policy": c.CpuManagerPolicy, + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), + "pod_pids_limit": c.PodPidsLimit, + }) + } + return result +} + +func flattenNodePoolAutoConfigNodeKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), }) } return result diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl index 84078431c4e5..9707d9febc5b 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl @@ -161,6 +161,7 @@ func clusterSchemaNodePoolDefaults() *schema.Schema { {{- if ne $.TargetVersionName "ga" }} "gcfs_config": schemaGcfsConfig(false), {{- end }} + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "logging_variant": schemaLoggingVariant(), }, }, @@ -217,6 +218,7 @@ func ResourceContainerCluster() *schema.Resource { containerClusterSurgeSettingsCustomizeDiff, containerClusterEnableK8sBetaApisCustomizeDiff, containerClusterNodeVersionCustomizeDiff, + tpgresource.SetDiffForLabelsWithCustomizedName("resource_labels"), ), Timeouts: &schema.ResourceTimeout{ @@ -1289,20 +1291,9 @@ func ResourceContainerCluster() *schema.Resource { Description: `Whether or not the advanced datapath metrics are enabled.`, }, "enable_relay": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether or not Relay is enabled.`, - Default: false, - ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.relay_mode"}, - }, - "relay_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field.", - Description: `Mode used to make Relay available.`, - ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), - ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.enable_relay"}, + Type: schema.TypeBool, + Required: true, + Description: `Whether or not Relay is enabled.`, }, }, }, @@ -1513,6 +1504,7 @@ func ResourceContainerCluster() *schema.Resource { Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "node_kubelet_config": schemaNodePoolAutoConfigNodeKubeletConfig(), "network_tags": { Type: schema.TypeList, Optional: true, @@ -1563,7 +1555,6 @@ func ResourceContainerCluster() *schema.Resource { }, }, {{- end }} -{{- if ne $.TargetVersionName "ga" }} "secret_manager_config": { Type: schema.TypeList, Optional: true, @@ -1580,7 +1571,6 @@ func ResourceContainerCluster() *schema.Resource { }, }, }, -{{- end }} "project": { Type: schema.TypeString, @@ -1821,7 +1811,22 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster.`, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "label_fingerprint": { @@ -1960,12 +1965,13 @@ func ResourceContainerCluster() *schema.Resource { "channel": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE", "EXTENDED"}, false), Description: `The selected release channel. Accepted values are: * UNSPECIFIED: Not set. * RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. * REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. -* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky.`, +* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky. +* EXTENDED: GKE provides extended support for Kubernetes minor versions through the Extended channel. With this channel, you can stay on a minor version for up to 24 months.`, }, }, }, @@ -2235,12 +2241,6 @@ func ResourceContainerCluster() *schema.Resource { // One quirk with this approach is that configs with mixed count=0 and count>0 accelerator blocks will // show a confusing diff if one of there are config changes that result in a legitimate diff as the count=0 // blocks will not be in state. -// -// This could also be modelled by setting `guest_accelerator = []` in the config. However since the -// previous syntax requires that schema.SchemaConfigModeAttr is set on the field it is advisable that -// we have a work around for removing guest accelerators. Also Terraform 0.11 cannot use dynamic blocks -// so this isn't a solution for module authors who want to dynamically omit guest accelerators -// See https://github.com/hashicorp/terraform-provider-google/issues/3786 func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { old, new := diff.GetChange("node_config.0.guest_accelerator") oList := old.([]interface{}) @@ -2352,9 +2352,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er {{- if ne $.TargetVersionName "ga" }} PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), {{- end }} -{{- if ne $.TargetVersionName "ga" }} - SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), -{{- end }} + SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization")), Autopilot: &container.Autopilot{ @@ -2384,7 +2382,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er MasterAuth: expandMasterAuth(d.Get("master_auth")), NotificationConfig: expandNotificationConfig(d.Get("notification_config")), ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), - ResourceLabels: tpgresource.ExpandStringMap(d, "resource_labels"), + ResourceLabels: tpgresource.ExpandStringMap(d, "effective_labels"), NodePoolAutoConfig: expandNodePoolAutoConfig(d.Get("node_pool_auto_config")), {{- if ne $.TargetVersionName "ga" }} ProtectConfig: expandProtectConfig(d.Get("protect_config")), @@ -2996,14 +2994,20 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("cluster_telemetry", flattenClusterTelemetry(cluster.ClusterTelemetry)); err != nil { return err } +{{- end }} if err := d.Set("secret_manager_config", flattenSecretManagerConfig(cluster.SecretManagerConfig)); err != nil { return err } -{{- end }} - if err := d.Set("resource_labels", cluster.ResourceLabels); err != nil { - return fmt.Errorf("Error setting resource_labels: %s", err) + if err := tpgresource.SetLabels(cluster.ResourceLabels, d, "resource_labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(cluster.ResourceLabels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", cluster.ResourceLabels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) } if err := d.Set("label_fingerprint", cluster.LabelFingerprint); err != nil { return fmt.Errorf("Error setting label_fingerprint: %s", err) @@ -3824,6 +3828,60 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) } + + if d.HasChange("node_config.0.kubelet_config") { + + defaultPool := "default-pool" + + timeout := d.Timeout(schema.TimeoutCreate) + + nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) + if err != nil { + return err + } + + // Acquire write-lock on nodepool. + npLockKey := nodePoolInfo.nodePoolLockKey(defaultPool) + + // Note: probably long term this should be handled broadly for all the + // items in kubelet_config in a simpler / DRYer way. + // See b/361634104 + if d.HasChange("node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled") { + it := d.Get("node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled").(string) + + // While we're getting the value from the drepcated field in + // node_config.kubelet_config, the actual setting that needs to be updated + // is on the default nodepool. + req := &container.UpdateNodePoolRequest{ + Name: defaultPool, + KubeletConfig: &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(it), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + }, + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(defaultPool), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, + "updating GKE node pool insecure_kubelet_readonly_port_enabled", userAgent, timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: default-pool setting for insecure_kubelet_readonly_port_enabled updated to %s", d.Id(), it) + } + } } if d.HasChange("notification_config") { @@ -3986,7 +4044,6 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } {{- end }} -{{ if ne $.TargetVersionName `ga` -}} if d.HasChange("secret_manager_config") { c := d.Get("secret_manager_config") req := &container.UpdateClusterRequest{ @@ -4013,7 +4070,6 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } log.Printf("[INFO] GKE cluster %s secret manager csi add-on has been updated", d.Id()) } -{{- end }} if d.HasChange("workload_identity_config") { // Because GKE uses a non-RESTful update function, when removing the @@ -4095,8 +4151,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s monitoring config has been updated", d.Id()) } - if d.HasChange("resource_labels") { - resourceLabels := d.Get("resource_labels").(map[string]interface{}) + if d.HasChange("effective_labels") { + resourceLabels := d.Get("effective_labels").(map[string]interface{}) labelFingerprint := d.Get("label_fingerprint").(string) req := &container.SetLabelsRequest{ ResourceLabels: tpgresource.ConvertStringMap(resourceLabels), @@ -4250,6 +4306,28 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.insecure_kubelet_readonly_port_enabled") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.insecure_kubelet_readonly_port_enabled"); ok { + insecureKubeletReadonlyPortEnabled := v.(string) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeKubeletConfig: &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(insecureKubeletReadonlyPortEnabled), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster desired node pool insecure kubelet readonly port configuration defaults.") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool insecure_kubelet_readonly_port_enabled default has been updated", d.Id()) + } + } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") { if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok { loggingVariant := v.(string) @@ -4325,6 +4403,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_auto_config.0.node_kubelet_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigKubeletConfig: expandKubeletConfig( + d.Get("node_pool_auto_config.0.node_kubelet_config"), + ), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config node_kubelet_config parameters") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config node_kubelet_config parameters have been updated", d.Id()) + } + if d.HasChange("node_pool_auto_config.0.network_tags.0.tags") { tags := d.Get("node_pool_auto_config.0.network_tags.0.tags").([]interface{}) @@ -4876,6 +4972,7 @@ func expandClusterAutoscaling(configured interface{}, d *schema.ResourceData) *c ResourceLimits: resourceLimits, AutoscalingProfile: config["autoscaling_profile"].(string), AutoprovisioningNodePoolDefaults: expandAutoProvisioningDefaults(config["auto_provisioning_defaults"], d), + AutoprovisioningLocations: tpgresource.ConvertStringArr(config["auto_provisioning_locations"].([]interface{})), } } @@ -5410,7 +5507,6 @@ func expandPodSecurityPolicyConfig(configured interface{}) *container.PodSecurit } {{- end }} -{{ if ne $.TargetVersionName `ga` -}} func expandSecretManagerConfig(configured interface{}) *container.SecretManagerConfig { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -5423,7 +5519,6 @@ func expandSecretManagerConfig(configured interface{}) *container.SecretManagerC ForceSendFields: []string{"Enabled"}, } } -{{- end }} func expandDefaultMaxPodsConstraint(v interface{}) *container.MaxPodsConstraint { if v == nil { @@ -5581,21 +5676,10 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig if v, ok := config["advanced_datapath_observability_config"]; ok && len(v.([]interface{})) > 0 { advanced_datapath_observability_config := v.([]interface{})[0].(map[string]interface{}) - mc.AdvancedDatapathObservabilityConfig = &container.AdvancedDatapathObservabilityConfig{ EnableMetrics: advanced_datapath_observability_config["enable_metrics"].(bool), - } - - enable_relay := advanced_datapath_observability_config["enable_relay"].(bool) - relay_mode := advanced_datapath_observability_config["relay_mode"].(string) - if enable_relay { - mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay - } else if relay_mode == "INTERNAL_VPC_LB" || relay_mode == "EXTERNAL_LB" { - mc.AdvancedDatapathObservabilityConfig.RelayMode = relay_mode - } else { - mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay - mc.AdvancedDatapathObservabilityConfig.RelayMode = "DISABLED" - mc.AdvancedDatapathObservabilityConfig.ForceSendFields = []string{"EnableRelay"} + EnableRelay: advanced_datapath_observability_config["enable_relay"].(bool), + ForceSendFields: []string{"EnableRelay"}, } } @@ -5671,6 +5755,10 @@ func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoCon npac := &container.NodePoolAutoConfig{} config := l[0].(map[string]interface{}) + if v, ok := config["node_kubelet_config"]; ok { + npac.NodeKubeletConfig = expandKubeletConfig(v) + } + if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) } @@ -6187,6 +6275,7 @@ func flattenClusterAutoscaling(a *container.ClusterAutoscaling) []map[string]int r["resource_limits"] = resourceLimits r["enabled"] = true r["auto_provisioning_defaults"] = flattenAutoProvisioningDefaults(a.AutoprovisioningNodePoolDefaults) + r["auto_provisioning_locations"] = a.AutoprovisioningLocations } else { r["enabled"] = false } @@ -6308,7 +6397,6 @@ func flattenPodSecurityPolicyConfig(c *container.PodSecurityPolicyConfig) []map[ {{ end }} -{{ if ne $.TargetVersionName `ga` -}} func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]interface{} { if c == nil { return []map[string]interface{}{ @@ -6324,7 +6412,6 @@ func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]i } } -{{ end }} func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) []map[string]interface{} { if c == nil { @@ -6488,29 +6575,10 @@ func flattenAdvancedDatapathObservabilityConfig(c *container.AdvancedDatapathObs return nil } - if c.EnableRelay { - return []map[string]interface{}{ - { - "enable_metrics": c.EnableMetrics, - "enable_relay": c.EnableRelay, - }, - } - } - - if c.RelayMode == "INTERNAL_VPC_LB" || c.RelayMode == "EXTERNAL_LB" { - return []map[string]interface{}{ - { - "enable_metrics": c.EnableMetrics, - "relay_mode": c.RelayMode, - }, - } - } - return []map[string]interface{}{ { "enable_metrics": c.EnableMetrics, - "enable_relay": false, - "relay_mode": "DISABLED", + "enable_relay": c.EnableRelay, }, } } @@ -6529,6 +6597,9 @@ func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]int } result := make(map[string]interface{}) + if c.NodeKubeletConfig != nil { + result["node_kubelet_config"] = flattenNodePoolAutoConfigNodeKubeletConfig(c.NodeKubeletConfig) + } if c.NetworkTags != nil { result["network_tags"] = flattenNodePoolAutoConfigNetworkTags(c.NetworkTags) } @@ -6773,7 +6844,6 @@ func podSecurityPolicyCfgSuppress(k, old, new string, r *schema.ResourceData) bo } {{- end }} -{{ if ne $.TargetVersionName `ga` -}} func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { if k == "secret_manager_config.#" && old == "1" && new == "0" { if v, ok := r.GetOk("secret_manager_config"); ok { @@ -6787,7 +6857,6 @@ func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { } return false } -{{- end }} func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.ResourceData) bool { // if network_policy configuration is empty, we store it as populated and enabled=false, and diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl index e823197d5234..52311225f650 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl @@ -559,6 +559,13 @@ func resourceContainerClusterResourceV1() *schema.Resource { }, }, }, + "auto_provisioning_locations": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP.`, + }, {{- if ne $.TargetVersionName "ga" }} "autoscaling_profile": { Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl index c630cfe47a4b..b46574d58170 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl @@ -7,6 +7,7 @@ import ( "regexp" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -917,6 +918,26 @@ func TestAccContainerCluster_withReleaseChannelEnabledDefaultVersion(t *testing. ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, + { + Config: testAccContainerCluster_withReleaseChannelEnabledDefaultVersion(clusterName, "EXTENDED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "EXTENDED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, { Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "UNSPECIFIED", networkName, subnetworkName), }, @@ -946,7 +967,7 @@ func TestAccContainerCluster_withInvalidReleaseChannel(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "CANARY", networkName, subnetworkName), - ExpectError: regexp.MustCompile(`expected release_channel\.0\.channel to be one of \["?UNSPECIFIED"? "?RAPID"? "?REGULAR"? "?STABLE"?\], got CANARY`), + ExpectError: regexp.MustCompile(`expected release_channel\.0\.channel to be one of \["?UNSPECIFIED"? "?RAPID"? "?REGULAR"? "?STABLE"? "?EXTENDED"?\], got CANARY`), }, }, }) @@ -1514,6 +1535,146 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }) } +// This is for node_config.kubelet_config, which affects the default node-pool +// (default-pool) when created via the google_container_cluster resource +func TestAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfigUpdates(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfig(clusterName, networkName, subnetworkName, "TRUE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_in_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfig(clusterName, networkName, subnetworkName, "FALSE"), + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_in_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodePool(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodePool(clusterName, nodePoolName, networkName, subnetworkName, "TRUE"), + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_in_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +// This is for `node_pool_defaults.node_config_defaults` - the default settings +// for newly created nodepools +func TestAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdates(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + // Test API default (no value set in config) first + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdateBaseline(clusterName, networkName, subnetworkName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, "TRUE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, "FALSE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, "TRUE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + + func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -3086,6 +3247,52 @@ func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) { }) } +func TestAccContainerCluster_withAutopilotKubeletConfig(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilotKubeletConfigBaseline(clusterName), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotKubeletConfigUpdates(clusterName, "FALSE"), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotKubeletConfigUpdates(clusterName, "TRUE"), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + + func TestAccContainerCluster_withAutopilotResourceManagerTags(t *testing.T) { t.Parallel() @@ -3261,7 +3468,6 @@ func TestAccContainerCluster_withIdentityServiceConfig(t *testing.T) { }) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccContainerCluster_withSecretManagerConfig(t *testing.T) { t.Parallel() @@ -3312,7 +3518,6 @@ func TestAccContainerCluster_withSecretManagerConfig(t *testing.T) { }, }) } -{{- end }} func TestAccContainerCluster_withLoggingConfig(t *testing.T) { t.Parallel() @@ -3401,24 +3606,6 @@ func TestAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityCo ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, - { - Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(clusterName), - }, - { - ResourceName: "google_container_cluster.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, - }, - { - Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(clusterName), - }, - { - ResourceName: "google_container_cluster.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, - }, }, }) } @@ -3957,6 +4144,60 @@ func TestAccContainerCluster_autoprovisioningDefaultsManagement(t *testing.T) { }) } +func TestAccContainerCluster_autoprovisioningLocations(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningLocations(clusterName, networkName, subnetworkName, []string{"us-central1-a", "us-central1-f"}), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.enabled", "true"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.0", "us-central1-a"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.1", "us-central1-f"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_locations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningLocations(clusterName, networkName, subnetworkName, []string{"us-central1-b", "us-central1-c"}), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.enabled", "true"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.0", "us-central1-b"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.1", "us-central1-c"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_locations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + // This resource originally cleaned up the dangling cluster directly, but now // taints it, having Terraform clean it up during the next apply. This test // name is now inexact, but is being preserved to maintain the test history. @@ -6417,26 +6658,31 @@ resource "google_container_cluster" "with_node_config" { `, clusterName, networkName, subnetworkName) } -func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant, networkName, subnetworkName string) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfig(clusterName, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_logging_variant_in_node_config" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_in_node_config" { name = "%s" location = "us-central1-f" initial_node_count = 1 node_config { - logging_variant = "%s" + kubelet_config { + # Must be set when kubelet_config is, but causes permadrift unless set to + # undocumented empty value + cpu_manager_policy = "" + insecure_kubelet_readonly_port_enabled = "%s" + } } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, loggingVariant, networkName, subnetworkName) +`, clusterName, insecureKubeletReadonlyPortEnabled, networkName, subnetworkName) } -func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant, networkName, subnetworkName string) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodePool(clusterName, nodePoolName, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_logging_variant_in_node_pool" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_in_node_pool" { name = "%s" location = "us-central1-f" @@ -6444,74 +6690,147 @@ resource "google_container_cluster" "with_logging_variant_in_node_pool" { name = "%s" initial_node_count = 1 node_config { - logging_variant = "%s" + kubelet_config { + cpu_manager_policy = "static" + insecure_kubelet_readonly_port_enabled = "%s" + } } } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, nodePoolName, loggingVariant, networkName, subnetworkName) +`, clusterName, nodePoolName, insecureKubeletReadonlyPortEnabled, networkName, subnetworkName) } -func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant, networkName, subnetworkName string) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdateBaseline(clusterName, networkName, subnetworkName string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_logging_variant_node_pool_default" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_node_pool_update" { name = "%s" location = "us-central1-f" initial_node_count = 1 - node_pool_defaults { - node_config_defaults { - logging_variant = "%s" - } - } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, loggingVariant, networkName, subnetworkName) +`, clusterName, networkName, subnetworkName) } -func testAccContainerCluster_withAdvancedMachineFeaturesInNodePool(clusterName, nodePoolName, networkName, subnetworkName string, nvEnabled bool) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_advanced_machine_features_in_node_pool" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_node_pool_update" { name = "%s" location = "us-central1-f" + initial_node_count = 1 - node_pool { - name = "%s" - initial_node_count = 1 - node_config { - machine_type = "c2-standard-4" - advanced_machine_features { - threads_per_core = 1 - enable_nested_virtualization = "%t" - } + node_pool_defaults { + node_config_defaults { + insecure_kubelet_readonly_port_enabled = "%s" } } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, nodePoolName, nvEnabled, networkName, subnetworkName) +`, clusterName, insecureKubeletReadonlyPortEnabled, networkName, subnetworkName) } -{{ if ne $.TargetVersionName `ga` -}} -func testAccContainerCluster_withNodePoolDefaults(clusterName, enabled, networkName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_container_cluster" "with_node_pool_defaults" { +func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_in_node_config" { name = "%s" location = "us-central1-f" initial_node_count = 1 - node_pool_defaults { - node_config_defaults { - gcfs_config { - enabled = "%s" - } - } - } + node_config { + logging_variant = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_in_node_pool" { + name = "%s" + location = "us-central1-f" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + logging_variant = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, nodePoolName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_node_pool_default" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_pool_defaults { + node_config_defaults { + logging_variant = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withAdvancedMachineFeaturesInNodePool(clusterName, nodePoolName, networkName, subnetworkName string, nvEnabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_advanced_machine_features_in_node_pool" { + name = "%s" + location = "us-central1-f" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = "%t" + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, nodePoolName, nvEnabled, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withNodePoolDefaults(clusterName, enabled, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_defaults" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_pool_defaults { + node_config_defaults { + gcfs_config { + enabled = "%s" + } + } + } deletion_protection = false network = "%s" subnetwork = "%s" @@ -6952,6 +7271,46 @@ resource "google_container_cluster" "with_autoprovisioning_management" { `, clusterName, autoUpgrade, autoRepair, networkName, subnetworkName) } +func testAccContainerCluster_autoprovisioningLocations(clusterName, networkName, subnetworkName string, locations []string) string { + var autoprovisionLocationsStr string + for i := 0; i < len(locations); i++ { + autoprovisionLocationsStr += fmt.Sprintf("\"%s\",", locations[i]) + } + var apl string + if len(autoprovisionLocationsStr) > 0 { + apl = fmt.Sprintf(` + auto_provisioning_locations = [%s] + `, autoprovisionLocationsStr) + } + + return fmt.Sprintf(` +resource "google_container_cluster" "with_autoprovisioning_locations" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + cluster_autoscaling { + enabled = true + + resource_limits { + resource_type = "cpu" + maximum = 2 + } + + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + %s + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, apl, networkName, subnetworkName) +} + func testAccContainerCluster_backendRef(cluster, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "my-backend-service" { @@ -9360,7 +9719,6 @@ resource "google_container_cluster" "primary" { `, name, networkName, subnetworkName) } -{{ if ne $.TargetVersionName `ga` -}} func testAccContainerCluster_withSecretManagerConfigEnabled(name, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "primary" { @@ -9393,7 +9751,6 @@ resource "google_container_cluster" "primary" { } `, name, networkName, subnetworkName) } -{{- end }} func testAccContainerCluster_withLoggingConfigEnabled(name, networkName, subnetworkName string) string { return fmt.Sprintf(` @@ -9607,56 +9964,6 @@ resource "google_container_cluster" "primary" { `, name, name) } -func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(name string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-nw" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "container_subnetwork" { - name = google_compute_network.container_network.name - network = google_compute_network.container_network.name - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "services-range" - ip_cidr_range = "192.168.1.0/24" - } - - secondary_ip_range { - range_name = "pod-ranges" - ip_cidr_range = "192.168.64.0/22" - } -} - -resource "google_container_cluster" "primary" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - datapath_provider = "ADVANCED_DATAPATH" - - network = google_compute_network.container_network.name - subnetwork = google_compute_subnetwork.container_subnetwork.name - ip_allocation_policy { - cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name - services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name - } - - monitoring_config { - enable_components = [] - advanced_datapath_observability_config { - enable_metrics = true - relay_mode = "INTERNAL_VPC_LB" - } - } - deletion_protection = false -} -`, name, name) -} - func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabled(name string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { @@ -9707,56 +10014,6 @@ resource "google_container_cluster" "primary" { `, name, name) } -func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(name string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-nw" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "container_subnetwork" { - name = google_compute_network.container_network.name - network = google_compute_network.container_network.name - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "services-range" - ip_cidr_range = "192.168.1.0/24" - } - - secondary_ip_range { - range_name = "pod-ranges" - ip_cidr_range = "192.168.64.0/22" - } -} - -resource "google_container_cluster" "primary" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - datapath_provider = "ADVANCED_DATAPATH" - - network = google_compute_network.container_network.name - subnetwork = google_compute_subnetwork.container_subnetwork.name - ip_allocation_policy { - cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name - services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name - } - - monitoring_config { - enable_components = [] - advanced_datapath_observability_config { - enable_metrics = false - relay_mode = "DISABLED" - } - } - deletion_protection = false -} -`, name, name) -} - func testAccContainerCluster_withSoleTenantGroup(name, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_compute_node_template" "soletenant-tmpl" { @@ -10305,6 +10562,37 @@ func testAccContainerCluster_withWorkloadALTSConfigAutopilot(projectID, name str {{ end }} +func testAccContainerCluster_withAutopilotKubeletConfigBaseline(name string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autopilot_kubelet_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + enable_autopilot = true + deletion_protection = false + } +`, name) +} + +func testAccContainerCluster_withAutopilotKubeletConfigUpdates(name, insecureKubeletReadonlyPortEnabled string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autopilot_kubelet_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + node_pool_auto_config { + node_kubelet_config { + insecure_kubelet_readonly_port_enabled = "%s" + } + } + + enable_autopilot = true + deletion_protection = false + } +`, name, insecureKubeletReadonlyPortEnabled) +} + func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { return fmt.Sprintf(` data "google_project" "project" { @@ -10817,6 +11105,7 @@ resource "google_container_cluster" "with_autopilot" { } func TestAccContainerCluster_privateRegistry(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -10935,6 +11224,11 @@ resource "google_container_cluster" "primary" { network = "%s" subnetwork = "%s" + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } node_pool_defaults { node_config_defaults { containerd_config { @@ -10970,11 +11264,6 @@ resource "google_container_cluster" "primary" { network = "%s" subnetwork = "%s" - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - ] - } node_pool_defaults { node_config_defaults { containerd_config { @@ -11102,3 +11391,158 @@ resource "google_container_cluster" "primary" { } `, secretID, clusterName, networkName, subnetworkName) } + +func TestAccContainerCluster_withProviderDefaultLabels(t *testing.T) { + // The test failed if VCR testing is enabled, because the cached provider config is used. + // With the cached provider config, any changes in the provider default labels will not be applied. + acctest.SkipIfVcr(t) + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withProviderDefaultLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + { + Config: testAccContainerCluster_resourceLabelsOverridesProviderDefaultLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.created-by", "terraform"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + { + Config: testAccContainerCluster_moveResourceLabelToProviderDefaultLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "0"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "0"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "0"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "0"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccContainerCluster_withProviderDefaultLabels(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + resource_labels = { + created-by = "terraform" + } +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_resourceLabelsOverridesProviderDefaultLabels(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + resource_labels = { + created-by = "terraform" + default_key1 = "value1" + } +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_moveResourceLabelToProviderDefaultLabels(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + created-by = "terraform" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl index 00b6b19d17a3..36a609075836 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -526,10 +527,17 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100ms", networkName, subnetworkName, true, 2048), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100ms", networkName, subnetworkName, "TRUE", true, 2048), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.cpu_cfs_quota", "true"), + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled", "TRUE"), resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.pod_pids_limit", "2048"), ), @@ -540,10 +548,17 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", networkName, subnetworkName, false, 1024), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", networkName, subnetworkName, "FALSE", false, 1024), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.cpu_cfs_quota", "false"), + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled", "FALSE"), ), }, { @@ -571,7 +586,7 @@ func TestAccContainerNodePool_withInvalidKubeletCpuManagerPolicy(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", networkName, subnetworkName, true, 1024), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", networkName, subnetworkName,"TRUE", false, 1024), ExpectError: regexp.MustCompile(`.*to be one of \["?static"? "?none"? "?"?\].*`), }, }, @@ -1292,39 +1307,6 @@ func TestAccContainerNodePool_regionalClusters(t *testing.T) { }) } -func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) { - t.Parallel() - - cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) - np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) - networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") - subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName), - }, - { - ResourceName: "google_container_node_pool.np", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName), - }, - { - ResourceName: "google_container_node_pool.np", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { t.Parallel() @@ -3136,7 +3118,7 @@ resource "google_container_node_pool" "with_sandbox_config" { } {{- end }} -func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period, networkName, subnetworkName string, quota bool, podPidsLimit int) string { +func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string, quota bool, podPidsLimit int) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -3162,10 +3144,11 @@ resource "google_container_node_pool" "with_kubelet_config" { node_config { image_type = "COS_CONTAINERD" kubelet_config { - cpu_manager_policy = %q - cpu_cfs_quota = %v - cpu_cfs_quota_period = %q - pod_pids_limit = %d + cpu_manager_policy = %q + cpu_cfs_quota = %v + cpu_cfs_quota_period = %q + insecure_kubelet_readonly_port_enabled = "%s" + pod_pids_limit = %d } oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", @@ -3174,7 +3157,7 @@ resource "google_container_node_pool" "with_kubelet_config" { logging_variant = "DEFAULT" } } -`, cluster, networkName, subnetworkName, np, policy, quota, period, podPidsLimit) +`, cluster, networkName, subnetworkName, np, policy, quota, period, insecureKubeletReadonlyPortEnabled, podPidsLimit) } func testAccContainerNodePool_withLinuxNodeConfig(cluster, np, tcpMem, networkName, subnetworkName string) string { @@ -3752,58 +3735,6 @@ resource "google_container_node_pool" "np" { `, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_container_cluster" "cluster" { - name = "%s" - location = "us-central1-f" - initial_node_count = 3 - deletion_protection = false - network = "%s" - subnetwork = "%s" -} - -resource "google_container_node_pool" "np" { - name = "%s" - location = "us-central1-f" - cluster = google_container_cluster.cluster.name - initial_node_count = 1 - - node_config { - guest_accelerator { - count = 1 - type = "nvidia-tesla-t4" - } - machine_type = "n1-highmem-4" - } -} -`, cluster, networkName, subnetworkName, np) -} - -func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_container_cluster" "cluster" { - name = "%s" - location = "us-central1-f" - initial_node_count = 3 - deletion_protection = false - network = "%s" - subnetwork = "%s" -} - -resource "google_container_node_pool" "np" { - name = "%s" - location = "us-central1-f" - cluster = google_container_cluster.cluster.name - initial_node_count = 1 - - node_config { - guest_accelerator = [] - } -} -`, cluster, networkName, subnetworkName, np) -} - func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { @@ -4873,6 +4804,7 @@ resource "google_container_node_pool" "np" { } func TestAccContainerNodePool_defaultDriverInstallation(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -4897,13 +4829,17 @@ func TestAccContainerNodePool_defaultDriverInstallation(t *testing.T) { func testAccContainerNodePool_defaultDriverInstallation(cluster, np string) string { return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false - min_master_version = "1.30.1-gke.1329003" + min_master_version = data.google_container_engine_versions.central1a.release_channel_latest_version["RAPID"] release_channel { channel = "RAPID" } @@ -4931,4 +4867,4 @@ resource "google_container_node_pool" "np" { } } `, cluster, np) -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl index 0ae3a078f41b..a8d095fe46f7 100644 --- a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl @@ -1349,7 +1349,7 @@ resource "google_dataflow_flex_template_job" "flex_job_kms" { kms_key_name = "%s" } -`, topicName, bucket, crypto_key, job) +`, topicName, bucket, job, crypto_key) } func testAccDataflowFlexTemplateJob_additionalExperiments(job, bucket, topicName string, experiments []string) string { diff --git a/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl b/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl index f650c90f298b..35c1c39f2b07 100644 --- a/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl @@ -94,7 +94,7 @@ resource "google_sourcerepo_repository" "git_repository" { resource "google_secret_manager_secret" "secret" { provider = google-beta - secret_id = "secret" + secret_id = "tf-test-secret%{random_suffix}" replication { auto {} diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go index 20012fab7365..962b8f521239 100644 --- a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go @@ -105,6 +105,8 @@ func TestAccDataprocCluster_basic(t *testing.T) { } func TestAccDataprocVirtualCluster_basic(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() var cluster dataproc.Cluster diff --git a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go index 640d82c9e1e5..8e9c3aed7b54 100644 --- a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go +++ b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go @@ -3,9 +3,9 @@ package dialogflow_test import ( "testing" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) func TestAccDialogflowAgent_update(t *testing.T) { diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl index 69fee4e86421..f1ce98b8067b 100644 --- a/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl @@ -328,6 +328,15 @@ func TestAccDNSRecordSet_routingPolicy(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccDnsRecordSet_routingPolicyRegionalL7PrimaryBackupMultipleNoLbType(networkName, proxySubnetName, httpHealthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccDnsRecordSet_routingPolicyCrossRegionL7PrimaryBackup(networkName, backendSubnetName, proxySubnetName, httpHealthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, 300), }, @@ -1065,6 +1074,127 @@ resource "google_dns_record_set" "foobar" { `, networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) } +func testAccDnsRecordSet_routingPolicyRegionalL7PrimaryBackupMultipleNoLbType(networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_subnetwork" "proxy_subnet" { + name = "%s" + ip_cidr_range = "10.100.0.0/24" + region = "us-central1" + purpose = "INTERNAL_HTTPS_LOAD_BALANCER" + role = "ACTIVE" + network = google_compute_network.default.id +} + +resource "google_compute_region_health_check" "health_check" { + name = "%s" + region = "us-central1" + + http_health_check { + port = 80 + } +} + +resource "google_compute_region_backend_service" "backend" { + name = "%s" + region = "us-central1" + load_balancing_scheme = "INTERNAL_MANAGED" + protocol = "HTTP" + health_checks = [google_compute_region_health_check.health_check.id] +} + +resource "google_compute_region_url_map" "url_map" { + name = "%s" + region = "us-central1" + default_service = google_compute_region_backend_service.backend.id +} + +resource "google_compute_region_target_http_proxy" "http_proxy" { + name = "%s" + region = "us-central1" + url_map = google_compute_region_url_map.url_map.id +} + +resource "google_compute_forwarding_rule" "default" { + name = "%s" + region = "us-central1" + depends_on = [google_compute_subnetwork.proxy_subnet] + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_http_proxy.http_proxy.id + port_range = "80" + allow_global_access = true + network = google_compute_network.default.name + ip_protocol = "TCP" +} + +resource "google_compute_forwarding_rule" "duplicate" { + name = "%s" + region = "us-central1" + depends_on = [google_compute_subnetwork.proxy_subnet] + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_http_proxy.http_proxy.id + port_range = "80" + allow_global_access = true + network = google_compute_network.default.name + ip_protocol = "TCP" +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + primary_backup { + trickle_ratio = 0.1 + enable_geo_fencing_for_backups = true + + primary { + internal_load_balancers { + ip_address = google_compute_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.default.project + region = google_compute_forwarding_rule.default.region + } + + internal_load_balancers { + ip_address = google_compute_forwarding_rule.duplicate.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.duplicate.project + region = google_compute_forwarding_rule.duplicate.region + } + } + + backup_geo { + location = "us-west1" + rrdatas = ["1.2.3.4"] + } + + backup_geo { + location = "asia-east1" + rrdatas = ["5.6.7.8"] + } + } + } +} +`, networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, forwardingRuleName+"-2", zoneName, zoneName, zoneName, ttl) +} + func testAccDnsRecordSet_routingPolicyCrossRegionL7PrimaryBackup(networkName, backendSubnetName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName string, ttl int) string { return fmt.Sprintf(` resource "google_compute_network" "default" { diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl index b8c2fc402273..dc799efdc85d 100644 --- a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl @@ -16,7 +16,7 @@ func TestAccDataSourceGoogleFirebaseAndroidAppConfig(t *testing.T) { context := map[string]interface{}{ "project_id": envvar.GetTestProjectFromEnv(), - "package_name": "android.package.app" + acctest.RandString(t, 5), + "package_name": "android.package.app" + acctest.RandString(t, 5), "display_name": "tf-test Display Name AndroidAppConfig DataSource", } diff --git a/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl index 0efe16dc20f4..b3f60174a717 100644 --- a/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl @@ -158,11 +158,13 @@ resource "google_gke_hub_feature_membership" "feature_member_1" { version = "1.18.2" config_sync { source_format = "hierarchy" + enabled = true git { sync_repo = "https://github.com/GoogleCloudPlatform/magic-modules" secret_type = "none" } } + management = "MANAGEMENT_AUTOMATIC" } } diff --git a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl index cb2517c469f4..310b805dcaa2 100644 --- a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl @@ -492,10 +492,53 @@ func TestAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(t *testing.T) ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementEnableAutomaticManagementUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementRemovalUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementAutomaticManagement(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, }, }) } +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementAutomaticManagement(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + fleet_default_member_config { + configmanagement { + management = "MANAGEMENT_AUTOMATIC" + config_sync { + enabled = true + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(context map[string]interface{}) string { return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` resource "google_gke_hub_feature" "feature" { @@ -531,6 +574,7 @@ resource "google_gke_hub_feature" "feature" { fleet_default_member_config { configmanagement { version = "1.16.1" + management = "MANAGEMENT_MANUAL" config_sync { enabled = true prevent_drift = true @@ -551,6 +595,45 @@ resource "google_gke_hub_feature" "feature" { `, context) } +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementEnableAutomaticManagementUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + fleet_default_member_config { + configmanagement { + version = "1.16.1" + management = "MANAGEMENT_AUTOMATIC" + config_sync { + prevent_drift = true + source_format = "unstructured" + oci { + sync_repo = "us-central1-docker.pkg.dev/corp-gke-build-artifacts/acm/configs:latest" + policy_dir = "/acm/nonprod-root/" + secret_type = "gcpserviceaccount" + sync_wait_secs = "15" + gcp_service_account_email = "gke-cluster@gke-foo-nonprod.iam.gserviceaccount.com" + } + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementRemovalUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + func TestAccGKEHubFeature_Clusterupgrade(t *testing.T) { // VCR fails to handle batched project services acctest.SkipIfVcr(t) diff --git a/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go b/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go index 3725784c4660..c1227b201afa 100644 --- a/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go +++ b/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go @@ -76,7 +76,7 @@ func TestAccIAM2DenyPolicy_iamDenyPolicyFolderParent(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "parent"}, }, - { + { Config: testAccIAM2DenyPolicy_iamDenyPolicyFolderUpdate(context), }, { diff --git a/mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl b/mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl new file mode 100644 index 000000000000..8d3b03899014 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl @@ -0,0 +1,1309 @@ +package kms_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/kms" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestCryptoKeyIdParsing(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + ImportId string + ExpectedError bool + ExpectedTerraformId string + ExpectedCryptoKeyId string + Config *transport_tpg.Config + }{ + "id is in project/location/keyRingName/cryptoKeyName format": { + ImportId: "test-project/us-central1/test-key-ring/test-key-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-key-ring/test-key-name", + ExpectedCryptoKeyId: "projects/test-project/locations/us-central1/keyRings/test-key-ring/cryptoKeys/test-key-name", + }, + "id is in domain:project/location/keyRingName/cryptoKeyName format": { + ImportId: "example.com:test-project/us-central1/test-key-ring/test-key-name", + ExpectedError: false, + ExpectedTerraformId: "example.com:test-project/us-central1/test-key-ring/test-key-name", + ExpectedCryptoKeyId: "projects/example.com:test-project/locations/us-central1/keyRings/test-key-ring/cryptoKeys/test-key-name", + }, + "id contains name that is longer than 63 characters": { + ImportId: "test-project/us-central1/test-key-ring/can-you-believe-that-this-cryptokey-name-is-this-extravagantly-long", + ExpectedError: true, + }, + "id is in location/keyRingName/cryptoKeyName format": { + ImportId: "us-central1/test-key-ring/test-key-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-key-ring/test-key-name", + ExpectedCryptoKeyId: "projects/test-project/locations/us-central1/keyRings/test-key-ring/cryptoKeys/test-key-name", + Config: &transport_tpg.Config{Project: "test-project"}, + }, + "id is in location/keyRingName/cryptoKeyName format without project in config": { + ImportId: "us-central1/test-key-ring/test-key-name", + ExpectedError: true, + Config: &transport_tpg.Config{Project: ""}, + }, + } + + for tn, tc := range cases { + cryptoKeyId, err := kms.ParseKmsCryptoKeyId(tc.ImportId, tc.Config) + + if tc.ExpectedError && err == nil { + t.Fatalf("bad: %s, expected an error", tn) + } + + if err != nil { + if tc.ExpectedError { + continue + } + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if cryptoKeyId.TerraformId() != tc.ExpectedTerraformId { + t.Fatalf("bad: %s, expected Terraform ID to be `%s` but is `%s`", tn, tc.ExpectedTerraformId, cryptoKeyId.TerraformId()) + } + + if cryptoKeyId.CryptoKeyId() != tc.ExpectedCryptoKeyId { + t.Fatalf("bad: %s, expected CryptoKey ID to be `%s` but is `%s`", tn, tc.ExpectedCryptoKeyId, cryptoKeyId.CryptoKeyId()) + } + } +} + +func TestCryptoKeyStateUpgradeV0(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Attributes map[string]interface{} + Expected map[string]string + Meta interface{} + }{ + "change key_ring from terraform id fmt to link fmt": { + Attributes: map[string]interface{}{ + "key_ring": "my-project/my-location/my-key-ring", + }, + Expected: map[string]string{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Meta: &transport_tpg.Config{}, + }, + "key_ring link fmt stays as link fmt": { + Attributes: map[string]interface{}{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Expected: map[string]string{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Meta: &transport_tpg.Config{}, + }, + "key_ring without project to link fmt": { + Attributes: map[string]interface{}{ + "key_ring": "my-location/my-key-ring", + }, + Expected: map[string]string{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Meta: &transport_tpg.Config{ + Project: "my-project", + }, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + actual, err := kms.ResourceKMSCryptoKeyUpgradeV0(context.Background(), tc.Attributes, tc.Meta) + + if err != nil { + t.Error(err) + } + + for k, v := range tc.Expected { + if actual[k] != v { + t.Errorf("expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + k, v, k, actual[k], actual) + } + } + }) + } +} + +func TestAccKmsCryptoKey_basic(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_kms_crypto_key.crypto_key", "primary.0.name"), + ), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Test importing with a short id + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", projectId, location, keyRingName, cryptoKeyName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +func TestAccKmsCryptoKey_rotation(t *testing.T) { + // when rotation is set, next rotation time is set using time.Now + acctest.SkipIfVcr(t) + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + rotationPeriod := "100000s" + updatedRotationPeriod := "7776000s" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_rotation(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, rotationPeriod), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleKmsCryptoKey_rotation(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedRotationPeriod), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleKmsCryptoKey_rotationRemoved(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +func TestAccKmsCryptoKey_template(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + algorithm := "EC_SIGN_P256_SHA256" + updatedAlgorithm := "EC_SIGN_P384_SHA384" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_template(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, algorithm), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleKmsCryptoKey_template(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedAlgorithm), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +func TestAccKmsCryptoKey_destroyDuration(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_destroyDuration(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsCryptoKey_keyAccessJustificationsPolicy(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + allowedAccessReason := "CUSTOMER_INITIATED_SUPPORT" + updatedAllowedAccessReason := "GOOGLE_INITIATED_SERVICE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowedAccessReason), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedAllowedAccessReason), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removedBeta(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} +{{- end }} + +func TestAccKmsCryptoKey_importOnly(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_importOnly(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"skip_initial_version_creation", "labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +// KMS KeyRings cannot be deleted. This ensures that the CryptoKey resource was removed from state, +// even though the server-side resource was not removed. +func testAccCheckGoogleKmsCryptoKeyWasRemovedFromState(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[resourceName] + + if ok { + return fmt.Errorf("Resource was not removed from state: %s", resourceName) + } + + return nil + } +} + +// KMS KeyRings cannot be deleted. This ensures that the CryptoKey resource's CryptoKeyVersion +// sub-resources were scheduled to be destroyed, rendering the key itself inoperable. +func testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t *testing.T, projectId, location, keyRingName, cryptoKeyName string) resource.TestCheckFunc { + return func(_ *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + gcpResourceUri := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", projectId, location, keyRingName, cryptoKeyName) + + response, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions.List(gcpResourceUri).Do() + + if err != nil { + return fmt.Errorf("Unexpected failure to list versions: %s", err) + } + + versions := response.CryptoKeyVersions + + for _, v := range versions { + if v.State != "DESTROY_SCHEDULED" && v.State != "DESTROYED" { + return fmt.Errorf("CryptoKey %s should have no versions, but version %s has state %s", cryptoKeyName, v.Name, v.State) + } + } + + return nil + } +} + +// KMS KeyRings cannot be deleted. This ensures that the CryptoKey autorotation +// was disabled to prevent more versions of the key from being created. +func testAccCheckGoogleKmsCryptoKeyRotationDisabled(t *testing.T, projectId, location, keyRingName, cryptoKeyName string) resource.TestCheckFunc { + return func(_ *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + gcpResourceUri := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", projectId, location, keyRingName, cryptoKeyName) + + response, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.CryptoKeys.Get(gcpResourceUri).Do() + if err != nil { + return fmt.Errorf("Unexpected failure while verifying 'deleted' crypto key: %s", err) + } + + if response.NextRotationTime != "" { + return fmt.Errorf("Expected empty nextRotationTime for 'deleted' crypto key, got %s", response.NextRotationTime) + } + if response.RotationPeriod != "" { + return fmt.Errorf("Expected empty RotationPeriod for 'deleted' crypto key, got %s", response.RotationPeriod) + } + + return nil + } +} + +func TestAccKmsCryptoKeyVersion_basic(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersionWithSymmetricHSM(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersionWithSymmetricHSM(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_skipInitialVersion(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_skipInitialVersion(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_patch(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + state := "DISABLED" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_patchInitialize(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_patch("true", projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, state), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_patch("false", projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, state), + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_externalProtectionLevelOptions(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyUri := "data.google_secret_manager_secret_version.key_uri.secret_data" + updatedKeyUri := "data.google_secret_manager_secret_version.key_uri_updated.secret_data" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptions(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, keyUri), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptions(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedKeyUri), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(t *testing.T) { + // This test relies on manual steps to set up the EkmConnection used for the + // CryptoKeyVersion creation, which means we can't spin up a temporary project. + // We also can't use bootstrapped keys because that would defeat the purpose of + // this key creation test, so we skip this test for VCR to avoid KMS resource + // accumulation in the TF test project (since KMS resources can't be deleted). + acctest.SkipIfVcr(t) + t.Parallel() + + projectId := envvar.GetTestProjectFromEnv() + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + ekmConnectionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyPath := "data.google_secret_manager_secret_version.key_path.secret_data" + updatedKeyPath := "data.google_secret_manager_secret_version.key_path_updated.secret_data" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(projectId, keyRingName, cryptoKeyName, ekmConnectionName, keyPath), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(projectId, keyRingName, cryptoKeyName, ekmConnectionName, updatedKeyPath), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +// This test runs in its own project, otherwise the test project would start to get filled +// with undeletable resources +func testGoogleKmsCryptoKey_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKey_rotation(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, rotationPeriod string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + rotation_period = "%s" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, rotationPeriod) +} + +func testGoogleKmsCryptoKey_rotationRemoved(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKey_template(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, algorithm string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ASYMMETRIC_SIGN" + + version_template { + algorithm = "%s" + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, algorithm) +} + +func testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testGoogleKmsCryptoKey_removedBeta(projectId, projectOrg, projectBillingAccount, keyRingName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + provider = google-beta + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + provider = google-beta + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + provider = google-beta + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName) +} +{{- end }} + +func testGoogleKmsCryptoKey_destroyDuration(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + destroy_scheduled_duration = "129600s" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowed_access_reason string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + provider = google-beta + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + provider = google-beta + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + provider = google-beta + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + provider = google-beta + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + key_access_justifications_policy { + allowed_access_reasons = ["%s"] + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowed_access_reason) +} +{{- end }} + +func testGoogleKmsCryptoKey_importOnly(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + skip_initial_version_creation = true + import_only = true +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersionWithSymmetricHSM(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + version_template { + algorithm = "GOOGLE_SYMMETRIC_ENCRYPTION" + protection_level = "HSM" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_skipInitialVersion(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + skip_initial_version_creation = true +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} +func testGoogleKmsCryptoKeyVersion_patchInitialize(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + lifecycle { + prevent_destroy = true + } + state = "ENABLED" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_patch(preventDestroy, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, state string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + lifecycle { + prevent_destroy = %s + } + state = "%s" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, preventDestroy, state) +} + +func testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptions(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, keyUri string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + + version_template { + algorithm = "EXTERNAL_SYMMETRIC_ENCRYPTION" + protection_level = "EXTERNAL" + } + + labels = { + key = "value" + } + skip_initial_version_creation = true +} + +data "google_secret_manager_secret_version" "key_uri" { + secret = "external-full-key-uri" + project = "315636579862" +} +data "google_secret_manager_secret_version" "key_uri_updated" { + secret = "external-full-key-uri-update-test" + project = "315636579862" +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + external_protection_level_options { + external_key_uri = %s + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, keyUri) +} + +// EkmConnection setup and creation is based off of resource_kms_ekm_connection_test.go +func testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(projectId, keyRingName, cryptoKeyName, ekmConnectionName, keyPath string) string { + return fmt.Sprintf(` +data "google_project" "vpc-project" { + project_id = "cloud-ekm-refekm-playground" +} +data "google_project" "project" { + project_id = "%s" +} + +data "google_secret_manager_secret_version" "raw_der" { + secret = "playground-cert" + project = "315636579862" +} +data "google_secret_manager_secret_version" "hostname" { + secret = "external-uri" + project = "315636579862" +} +data "google_secret_manager_secret_version" "servicedirectoryservice" { + secret = "external-servicedirectoryservice" + project = "315636579862" +} + +resource "google_project_iam_member" "add_sdviewer" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.viewer" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} +resource "google_project_iam_member" "add_pscAuthorizedService" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.pscAuthorizedService" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} + +resource "google_kms_ekm_connection" "example-ekmconnection" { + name = "%s" + location = "us-central1" + key_management_mode = "MANUAL" + service_resolvers { + service_directory_service = data.google_secret_manager_secret_version.servicedirectoryservice.secret_data + hostname = data.google_secret_manager_secret_version.hostname.secret_data + server_certificates { + raw_der = data.google_secret_manager_secret_version.raw_der.secret_data + } + } + depends_on = [ + google_project_iam_member.add_pscAuthorizedService, + google_project_iam_member.add_sdviewer + ] +} + +resource "google_kms_key_ring" "key_ring" { + project = data.google_project.project.project_id + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + + version_template { + algorithm = "EXTERNAL_SYMMETRIC_ENCRYPTION" + protection_level = "EXTERNAL_VPC" + } + + labels = { + key = "value" + } + crypto_key_backend = google_kms_ekm_connection.example-ekmconnection.id + skip_initial_version_creation = true +} + +data "google_secret_manager_secret_version" "key_path" { + secret = "external-keypath" + project = "315636579862" +} +data "google_secret_manager_secret_version" "key_path_updated" { + secret = "external-keypath-update-test" + project = "315636579862" +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + external_protection_level_options { + ekm_connection_key_path = %s + } +} +`, projectId, ekmConnectionName, keyRingName, cryptoKeyName, keyPath) +} diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl index e32616174019..45106b1dbdec 100644 --- a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl @@ -10,6 +10,7 @@ import ( ) func TestAccNetworkSecurityClientTlsPolicy_update(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() clientTlsPolicyName := fmt.Sprintf("tf-test-client-tls-policy-%s", acctest.RandString(t, 10)) diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl index 916f326ac9d0..f2c0987c06ce 100644 --- a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl @@ -74,6 +74,7 @@ resource "google_network_services_tcp_route" "foobar" { weight = 1 } original_destination = false + idle_timeout = "60s" } } } @@ -111,6 +112,7 @@ resource "google_network_services_tcp_route" "foobar" { weight = 1 } original_destination = false + idle_timeout = "120s" } } } diff --git a/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl b/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl index 0ffa5fc7ee63..740f2f5dde37 100644 --- a/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl @@ -1,3 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + + package parallelstore_test {{ if ne $.TargetVersionName `ga` -}} @@ -5,8 +23,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - - "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/acctest" ) func TestAccParallelstoreInstance_parallelstoreInstanceBasicExample_update(t *testing.T) { @@ -53,6 +70,8 @@ resource "google_parallelstore_instance" "instance" { capacity_gib = 12000 network = google_compute_network.network.name reserved_ip_range = google_compute_global_address.private_ip_alloc.name + file_stripe_level = "FILE_STRIPE_LEVEL_MIN" + directory_stripe_level = "DIRECTORY_STRIPE_LEVEL_MIN" labels = { test = "value" } diff --git a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl index 6009bc618f23..960e6cce3250 100644 --- a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl @@ -23,7 +23,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "TUESDAY", maintenanceHours: 2, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -33,7 +33,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "TUESDAY", maintenanceHours: 2, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -82,7 +82,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -92,7 +92,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 2 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -102,7 +102,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 0 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -112,7 +112,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -131,7 +131,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with shard count 3 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -141,7 +141,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // update shard count to 5 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -151,7 +151,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -174,6 +174,11 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", + maintenanceDay: "MONDAY", + maintenanceHours: 1, + maintenanceMinutes: 0, + maintenanceSeconds: 0, + maintenanceNanos: 0, redisConfigs: map[string]string{ "maxmemory-policy": "volatile-ttl", }}), @@ -190,6 +195,11 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", + maintenanceDay: "MONDAY", + maintenanceHours: 1, + maintenanceMinutes: 0, + maintenanceSeconds: 0, + maintenanceNanos: 0, redisConfigs: map[string]string{ "maxmemory-policy": "allkeys-lru", "maxmemory-clients": "90%", @@ -203,7 +213,7 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { }, { // remove all redis configs - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, @@ -259,6 +269,11 @@ type ClusterParams struct { redisConfigs map[string]string zoneDistributionMode string zone string + maintenanceDay string + maintenanceHours int + maintenanceMinutes int + maintenanceSeconds int + maintenanceNanos int } func createOrUpdateRedisCluster(params *ClusterParams) string { @@ -277,6 +292,23 @@ func createOrUpdateRedisCluster(params *ClusterParams) string { `, params.zoneDistributionMode, params.zone) } + maintenancePolicyBlock := `` + if params.maintenanceDay != "" { + maintenancePolicyBlock = fmt.Sprintf(` + maintenance_policy { + weekly_maintenance_window { + day = "%s" + start_time { + hours = %d + minutes = %d + seconds = %d + nanos = %d + } + } + } + `, params.maintenanceDay, params.maintenanceHours, params.maintenanceMinutes, params.maintenanceSeconds, params.maintenanceNanos) + } + return fmt.Sprintf(` resource "google_redis_cluster" "test" { provider = google-beta @@ -292,6 +324,7 @@ resource "google_redis_cluster" "test" { redis_configs = { %s } + %s %s depends_on = [ google_network_connectivity_service_connection_policy.default @@ -323,7 +356,7 @@ resource "google_compute_network" "producer_net" { name = "%s" auto_create_subnetworks = false } -`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, params.name, params.name, params.name) +`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, maintenancePolicyBlock, params.name, params.name, params.name) } {{ end }} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go index c0a89bace550..8f4ecf70057b 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go @@ -3,10 +3,9 @@ package resourcemanager_test import ( "fmt" "regexp" - "testing" - "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) @@ -267,7 +266,7 @@ func TestAccProjectIamBinding_invalidMembers(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccProjectAssociateBindingBasic(pid, org, role, "admin@hashicorptest.com"), + Config: testAccProjectAssociateBindingBasic(pid, org, role, "admin@hashicorptest.com"), ExpectError: regexp.MustCompile("invalid value for members\\.0 \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), }, { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go index 019e75c5029c..b02cb66743c5 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go @@ -3,10 +3,9 @@ package resourcemanager_test import ( "fmt" "regexp" - "testing" - "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) @@ -183,7 +182,7 @@ func TestAccProjectIamMember_invalidMembers(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccProjectAssociateMemberBasic(pid, org, role, "admin@hashicorptest.com"), + Config: testAccProjectAssociateMemberBasic(pid, org, role, "admin@hashicorptest.com"), ExpectError: regexp.MustCompile("invalid value for member \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), }, { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go index 9b923080d453..2324eca039f6 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go @@ -55,7 +55,7 @@ func TestAccProjectIamPolicy_emptyMembers(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { @@ -72,7 +72,7 @@ func TestAccProjectIamPolicy_expanded(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { @@ -92,7 +92,7 @@ func TestAccProjectIamPolicy_basicAuditConfig(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ // Create a new project @@ -122,7 +122,7 @@ func TestAccProjectIamPolicy_expandedAuditConfig(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { @@ -141,7 +141,7 @@ func TestAccProjectIamPolicy_withCondition(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ // Create a new project @@ -176,7 +176,7 @@ func TestAccProjectIamPolicy_invalidMembers(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccProjectAssociatePolicyBasic(pid, org, "admin@hashicorptest.com"), + Config: testAccProjectAssociatePolicyBasic(pid, org, "admin@hashicorptest.com"), ExpectError: regexp.MustCompile("invalid value for bindings\\.1\\.members\\.0 \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), }, { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl index d1a304e96938..ff1fd2a8fb7e 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl @@ -1,22 +1,24 @@ package resourcemanager import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/terraform-provider-google/google/verify" - - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" + "fmt" + "log" + "strings" + "time" + {{- if ne $.TargetVersionName "ga" }} + "regexp" + {{- end }} + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1" ) // These services can only be enabled as a side-effect of enabling other services, @@ -332,11 +334,11 @@ func disableServiceUsageProjectService(service, project string, d *schema.Resour ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.ServiceUsageServiceBeingActivated}, }) if err != nil { - {{- if not (eq $.TargetVersionName "ga") }} + {{- if ne $.TargetVersionName "ga" }} if res, _ := regexp.MatchString("COMMON_SU_SERVICE_HAS_USAGE", err.Error()); res { - return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, strings.Replace(err.Error(), "check_if_service_has_usage=SKIP", "check_if_service_has_usage_on_destroy=false", -1)) + return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, strings.Replace(err.Error(), "check_if_service_has_usage=SKIP", "check_if_service_has_usage_on_destroy=false", -1)) } - {{- end }} + {{- end }} return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err) } return nil diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl index 57d13174696f..c5fdaded7489 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl @@ -37,21 +37,21 @@ func TestAccProjectService_basic(t *testing.T) { ResourceName: "google_project_service.test", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy"}, + {{- end }} }, { ResourceName: "google_project_service.test2", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "project", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "project"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "project", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "project"}, + {{- end }} }, // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. { @@ -99,11 +99,11 @@ func TestAccProjectService_disableDependentServices(t *testing.T) { ResourceName: "google_project_service.test", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy"}, + {{- end }} }, { Config: testAccProjectService_dependencyRemoved(services, pid, org, billingId), @@ -116,11 +116,11 @@ func TestAccProjectService_disableDependentServices(t *testing.T) { ResourceName: "google_project_service.test", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy"}, + {{- end }} }, { Config: testAccProjectService_dependencyRemoved(services, pid, org, billingId), @@ -387,6 +387,7 @@ resource "google_project_service" "test" { func testAccProjectService_checkUsage(service string, pid, org string, checkIfServiceHasUsage string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { + provider = google-beta project_id = "%s" name = "%s" org_id = "%s" @@ -394,6 +395,7 @@ resource "google_project" "acceptance" { } resource "google_project_service" "test" { + provider = google-beta project = google_project.acceptance.project_id service = "%s" diff --git a/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl b/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl new file mode 100644 index 000000000000..d85d72dd999a --- /dev/null +++ b/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl @@ -0,0 +1,2553 @@ +package sql + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +// Match fully-qualified or relative URLs +const privateNetworkLinkRegex = "^(?:http(?:s)?://.+/)?projects/(" + verify.ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" + +var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiration_time": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, +} + +var sqlDatabaseFlagSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Required: true, + Description: `Value of the flag.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the flag.`, + }, + }, +} + +var ( + backupConfigurationKeys = []string{ + "settings.0.backup_configuration.0.binary_log_enabled", + "settings.0.backup_configuration.0.enabled", + "settings.0.backup_configuration.0.start_time", + "settings.0.backup_configuration.0.location", + "settings.0.backup_configuration.0.point_in_time_recovery_enabled", + "settings.0.backup_configuration.0.backup_retention_settings", + "settings.0.backup_configuration.0.transaction_log_retention_days", + } + + ipConfigurationKeys = []string{ + "settings.0.ip_configuration.0.authorized_networks", + "settings.0.ip_configuration.0.ipv4_enabled", + "settings.0.ip_configuration.0.private_network", + "settings.0.ip_configuration.0.allocated_ip_range", + "settings.0.ip_configuration.0.enable_private_path_for_google_cloud_services", + "settings.0.ip_configuration.0.psc_config", + "settings.0.ip_configuration.0.ssl_mode", + "settings.0.ip_configuration.0.server_ca_mode", + } + + maintenanceWindowKeys = []string{ + "settings.0.maintenance_window.0.day", + "settings.0.maintenance_window.0.hour", + "settings.0.maintenance_window.0.update_track", + } + + replicaConfigurationKeys = []string{ + "replica_configuration.0.ca_certificate", + "replica_configuration.0.client_certificate", + "replica_configuration.0.client_key", + "replica_configuration.0.connect_retry_interval", + "replica_configuration.0.dump_file_path", + "replica_configuration.0.failover_target", + "replica_configuration.0.master_heartbeat_period", + "replica_configuration.0.password", + "replica_configuration.0.ssl_cipher", + "replica_configuration.0.username", + "replica_configuration.0.verify_server_certificate", + } + + insightsConfigKeys = []string{ + "settings.0.insights_config.0.query_insights_enabled", + "settings.0.insights_config.0.query_string_length", + "settings.0.insights_config.0.record_application_tags", + "settings.0.insights_config.0.record_client_address", + "settings.0.insights_config.0.query_plans_per_minute", + } + + sqlServerAuditConfigurationKeys = []string{ + "settings.0.sql_server_audit_config.0.bucket", + "settings.0.sql_server_audit_config.0.retention_interval", + "settings.0.sql_server_audit_config.0.upload_interval", + } +) + +func ResourceSqlDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseInstanceCreate, + Read: resourceSqlDatabaseInstanceRead, + Update: resourceSqlDatabaseInstanceUpdate, + Delete: resourceSqlDatabaseInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceSqlDatabaseInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + customdiff.ForceNewIfChange("settings.0.disk_size", compute.IsDiskShrinkage), + customdiff.ForceNewIfChange("master_instance_name", isMasterInstanceNameSet), + customdiff.IfValueChange("instance_type", isReplicaPromoteRequested, checkPromoteConfigurationsAndUpdateDiff), + privateNetworkCustomizeDiff, + pitrSupportDbCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The region the instance will sit in. Note, Cloud SQL is not available in all regions. A valid region must be provided to use this resource. If a region is not provided in the resource definition, the provider region will be used instead, but this will be an apply-time error for instances if the provider region is not supported with Cloud SQL. If you choose not to provide the region argument for this resource, make sure you understand this.`, + }, + "deletion_protection": { + Type: schema.TypeBool, + Default: true, + Optional: true, + Description: `Used to block Terraform from deleting a SQL Instance. Defaults to true.`, + }, + "settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: []string{"settings", "clone"}, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": { + Type: schema.TypeInt, + Computed: true, + Description: `Used to make sure changes to the settings block are atomic.`, + }, + "tier": { + Type: schema.TypeString, + Required: true, + Description: `The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types.`, + }, + "edition": { + Type: schema.TypeString, + Optional: true, + Default: "ENTERPRISE", + ValidateFunc: validation.StringInSlice([]string{"ENTERPRISE", "ENTERPRISE_PLUS"}, false), + Description: `The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS.`, + }, + "advanced_machine_features": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of threads per physical core. Can be 1 or 2.`, + }, + }, + }, + }, + "data_cache_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Data cache configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_cache_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether data cache is enabled for the instance.`, + }, + }, + }, + }, + "activation_policy": { + Type: schema.TypeString, + Optional: true, + Default: "ALWAYS", + Description: `This specifies when the instance should be active. Can be either ALWAYS, NEVER or ON_DEMAND.`, + }, + "active_directory_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + Description: `Domain name of the Active Directory for SQL Server (e.g., mydomain.com).`, + }, + }, + }, + }, + "deny_maintenance_period": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_date": { + Type: schema.TypeString, + Required: true, + Description: `End date before which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01`, + }, + "start_date": { + Type: schema.TypeString, + Required: true, + Description: `Start date after which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01`, + }, + "time": { + Type: schema.TypeString, + Required: true, + Description: `Time in UTC when the "deny maintenance period" starts on start_date and ends on end_date. The time is in format: HH:mm:SS, i.e., 00:00:00`, + }, + }, + }, + }, + "sql_server_audit_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `The name of the destination bucket (e.g., gs://mybucket).`, + }, + "retention_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"..`, + }, + "upload_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, + "time_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The time_zone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format.`, + }, + "availability_type": { + Type: schema.TypeString, + Optional: true, + Default: "ZONAL", + ValidateFunc: validation.StringInSlice([]string{"REGIONAL", "ZONAL"}, false), + Description: `The availability type of the Cloud SQL instance, high availability +(REGIONAL) or single zone (ZONAL). For all instances, ensure that +settings.backup_configuration.enabled is set to true. +For MySQL instances, ensure that settings.backup_configuration.binary_log_enabled is set to true. +For Postgres instances, ensure that settings.backup_configuration.point_in_time_recovery_enabled +is set to true. Defaults to ZONAL.`, + }, + "backup_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_log_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Can only be used with MySQL.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if backup configuration is enabled.`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + // start_time is randomly assigned if not set + Computed: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `HH:MM format time indicating when backup configuration starts.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `Location of the backup configuration.`, + }, + "point_in_time_recovery_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if Point-in-time recovery is enabled.`, + }, + "transaction_log_retention_days": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `The number of days of transaction logs we retain for point in time restore, from 1-7. (For PostgreSQL Enterprise Plus instances, from 1 to 35.)`, + }, + "backup_retention_settings": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retained_backups": { + Type: schema.TypeInt, + Required: true, + Description: `Number of backups to retain.`, + }, + "retention_unit": { + Type: schema.TypeString, + Optional: true, + Default: "COUNT", + Description: `The unit that 'retainedBackups' represents. Defaults to COUNT`, + }, + }, + }, + }, + }, + }, + }, + "collation": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of server instance collation.`, + }, + "database_flags": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseFlagSchemaElem), + Elem: sqlDatabaseFlagSchemaElem, + }, + "disk_autoresize": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Enables auto-resizing of the storage size. Defaults to true.`, + }, + "disk_autoresize_limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: `The maximum size, in GB, to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit.`, + }, + "enable_google_ml_integration": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Vertex AI Integration.`, + }, + "enable_dataplex_integration": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Dataplex Integration.`, + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + // Default is likely 10gb, but it is undocumented and may change. + Computed: true, + Description: `The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased. The minimum value is 10GB.`, + }, + "disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "PD_SSD", + ForceNew: true, + DiffSuppressFunc: caseDiffDashSuppress, + Description: `The type of data disk: PD_SSD or PD_HDD. Defaults to PD_SSD.`, + }, + "ip_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), + Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + AtLeastOneOf: ipConfigurationKeys, + }, + "ipv4_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4_enabled must be enabled or a private_network must be configured.`, + }, + "private_network": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.OrEmpty(verify.ValidateRegexp(privateNetworkLinkRegex)), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + AtLeastOneOf: ipConfigurationKeys, + Description: `The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4_enabled must be enabled or a private_network must be configured. This setting can be updated, but it cannot be removed after it is set.`, + }, + "allocated_ip_range": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.`, + }, + "enable_private_path_for_google_cloud_services": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported.`, + }, + "psc_config": { + Type: schema.TypeSet, + Optional: true, + Description: `PSC settings for a Cloud SQL instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "psc_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether PSC connectivity is enabled for this instance.`, + }, + "allowed_consumer_projects": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + Description: `List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric).`, + }, + }, + }, + }, + "ssl_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"ALLOW_UNENCRYPTED_AND_ENCRYPTED", "ENCRYPTED_ONLY", "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"}, false), + Description: `Specify how SSL connection should be enforced in DB connections.`, + AtLeastOneOf: ipConfigurationKeys, + }, + "server_ca_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"CA_MODE_UNSPECIFIED", "GOOGLE_MANAGED_INTERNAL_CA", "GOOGLE_MANAGED_CAS_CA"}, false), + Description: `Specify how the server certificate's Certificate Authority is hosted.`, + AtLeastOneOf: ipConfigurationKeys, + }, + }, + }, + }, + "location_preference": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "follow_gae_application": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, + Description: `A Google App Engine application whose zone to remain in. Must be in the same region as this instance.`, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, + Description: `The preferred compute engine zone.`, + }, + "secondary_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The preferred Compute Engine zone for the secondary/failover`, + }, + }, + }, + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 7), + AtLeastOneOf: maintenanceWindowKeys, + Description: `Day of week (1-7), starting on Monday`, + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + AtLeastOneOf: maintenanceWindowKeys, + Description: `Hour of day (0-23), ignored if day not set`, + }, + "update_track": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: maintenanceWindowKeys, + Description: `Receive updates after one week (canary) or after two weeks (stable) or after five weeks (week5) of notification.`, + }, + }, + }, + Description: `Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time.`, + }, + "pricing_plan": { + Type: schema.TypeString, + Optional: true, + Default: "PER_USE", + Description: `Pricing plan for this instance, can only be PER_USE.`, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value user label pairs to assign to the instance.`, + }, + "insights_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_insights_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights feature is enabled.`, + }, + "query_string_length": { + Type: schema.TypeInt, + Optional: true, + Default: 1024, + ValidateFunc: validation.IntBetween(256, 4500), + AtLeastOneOf: insightsConfigKeys, + Description: `Maximum query length stored in bytes. Between 256 and 4500. Default to 1024.`, + }, + "record_application_tags": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights will record application tags from query when enabled.`, + }, + "record_client_address": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights will record client address when enabled.`, + }, + "query_plans_per_minute": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 20), + AtLeastOneOf: insightsConfigKeys, + Description: `Number of query execution plans captured by Insights per minute for all queries combined. Between 0 and 20. Default to 5.`, + }, + }, + }, + Description: `Configuration of Query Insights.`, + }, + "password_validation_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_length": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Minimum number of characters allowed.`, + }, + "complexity": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"COMPLEXITY_DEFAULT", "COMPLEXITY_UNSPECIFIED"}, false), + Description: `Password complexity.`, + }, + "reuse_interval": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Number of previous passwords that cannot be reused.`, + }, + "disallow_username_substring": { + Type: schema.TypeBool, + Optional: true, + Description: `Disallow username as a part of the password.`, + }, + "password_change_interval": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL.`, + }, + "enable_password_policy": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the password policy is enabled or not.`, + }, + }, + }, + }, + "connector_enforcement": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"NOT_REQUIRED", "REQUIRED"}, false), + Description: `Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected.`, + }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Configuration to protect against accidental instance deletion.`, + }, + }, + }, + Description: `The settings to use for the database. The configuration is detailed below.`, + }, + + "connection_name": { + Type: schema.TypeString, + Computed: true, + Description: `The connection name of the instance to be used in connection strings. For example, when connecting with Cloud SQL Proxy.`, + }, + "maintenance_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Maintenance version.`, + DiffSuppressFunc: maintenanceVersionDiffSuppress, + }, + "available_maintenance_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `Available Maintenance versions.`, + }, + "database_version": { + Type: schema.TypeString, + Required: true, + Description: `The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.`, + }, + + "encryption_key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "root_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: `Initial root password. Required for MS SQL Server.`, + }, + "ip_address": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "time_to_retire": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "first_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The first IPv4 address of any type assigned. This is to support accessing the first address in the list in a terraform output when the resource is configured with a count.`, + }, + + "public_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, + }, + + "private_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of the instance. If the name is left blank, Terraform will randomly generate one when the instance is first created. This is done because after a name is used, it cannot be reused for up to one week.`, + }, + + "master_instance_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The name of the instance that will act as the master in the replication setup. Note, this requires the master to have binary_log_enabled set, as well as existing backups.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The type of the instance. The valid values are:- 'SQL_INSTANCE_TYPE_UNSPECIFIED', 'CLOUD_SQL_INSTANCE', 'ON_PREMISES_INSTANCE' and 'READ_REPLICA_INSTANCE'.`, + }, + + "replica_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + // Returned from API on all replicas + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the trusted CA's x509 certificate.`, + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the replica's x509 certificate.`, + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the replica's private key. The corresponding public key in encoded in the client_certificate.`, + }, + "connect_retry_interval": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `The number of seconds between connect retries. MySQL's default is 60 seconds.`, + }, + "dump_file_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Path to a SQL file in Google Cloud Storage from which replica instances are created. Format is gs://bucket/filename.`, + }, + "failover_target": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. If the master instance fails, the replica instance will be promoted as the new master instance. Not supported for Postgres`, + }, + "master_heartbeat_period": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Time in ms between replication heartbeats.`, + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Password for the replication connection.`, + }, + "ssl_cipher": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Permissible ciphers for use in SSL encryption.`, + }, + "username": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Username for replication connection.`, + }, + "verify_server_certificate": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `True if the master's common name value is checked during the SSL handshake.`, + }, + }, + }, + Description: `The configuration for replication.`, + }, + "server_ca_cert": { + Type: schema.TypeList, + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert": { + Type: schema.TypeString, + Computed: true, + Description: `The CA Certificate used to connect to the SQL Instance via SSL.`, + }, + "common_name": { + Type: schema.TypeString, + Computed: true, + Description: `The CN valid for the CA Cert.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation time of the CA Cert.`, + }, + "expiration_time": { + Type: schema.TypeString, + Computed: true, + Description: `Expiration time of the CA Cert.`, + }, + "sha1_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `SHA Fingerprint of the CA Cert.`, + }, + }, + }, + }, + "service_account_email_address": { + Type: schema.TypeString, + Computed: true, + Description: `The service account email address assigned to the instance.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + "psc_service_attachment_link": { + Type: schema.TypeString, + Computed: true, + Description: `The link to service attachment of PSC instance.`, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + Description: `The dns name of the instance.`, + }, + "restore_backup_context": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_run_id": { + Type: schema.TypeInt, + Required: true, + Description: `The ID of the backup run to restore from.`, + }, + "instance_id": { + Type: schema.TypeString, + Optional: true, + Description: `The ID of the instance that the backup was taken from.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Description: `The full project ID of the source instance.`, + }, + }, + }, + }, + "clone": { + Type: schema.TypeList, + Optional: true, + Computed: false, + AtLeastOneOf: []string{"settings", "clone"}, + Description: `Configuration for creating a new instance as a clone of another instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_instance_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the instance from which the point in time should be restored.`, + }, + "point_in_time": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), + Description: `The timestamp of the point in time that should be restored.`, + }, + "preferred_zone": { + Type: schema.TypeString, + Optional: true, + Description: `(Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance.`, + }, + "database_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `(SQL Server only, use with point_in_time) clone only the specified databases from the source instance. Clone all databases if empty.`, + }, + "allocated_ip_range": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +// Makes private_network ForceNew if it is changing from set to nil. The API returns an error +// if this change is attempted in-place. +func privateNetworkCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange("settings.0.ip_configuration.0.private_network") + + if old != "" && new == "" { + if err := d.ForceNew("settings.0.ip_configuration.0.private_network"); err != nil { + return err + } + } + + return nil +} + +// helper function to see if string within list contains a particular substring +func stringContainsSlice(arr []string, str string) bool { + for _, i := range arr { + if strings.Contains(str, i) { + return true + } + } + return false +} + +// Point in time recovery for MySQL database instances needs binary_log_enabled set to true and +// not point_in_time_recovery_enabled, which is confusing to users. This checks for +// point_in_time_recovery_enabled being set to a non-PostgreSQL and non-SQLServer database instances and suggests +// binary_log_enabled. +func pitrSupportDbCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + pitr := diff.Get("settings.0.backup_configuration.0.point_in_time_recovery_enabled").(bool) + dbVersion := diff.Get("database_version").(string) + dbVersionPitrValid := []string{"POSTGRES", "SQLSERVER"} + if pitr && !stringContainsSlice(dbVersionPitrValid, dbVersion) { + return fmt.Errorf("point_in_time_recovery_enabled is only available for the following %v. You may want to consider using binary_log_enabled instead and remove point_in_time_recovery_enabled (removing point_in_time_recovery_enabled and adding binary_log_enabled will enable pitr for MYSQL)", dbVersionPitrValid) + } + return nil +} + +func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = id.UniqueId() + } + + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + // SQL Instances that fail to create are expensive- see https://github.com/hashicorp/terraform-provider-google/issues/7154 + // We can fail fast to stop instance names from getting reserved. + network := d.Get("settings.0.ip_configuration.0.private_network").(string) + if network != "" { + err = sqlDatabaseInstanceServiceNetworkPrecheck(d, config, userAgent, network) + if err != nil { + return err + } + } + + databaseVersion := d.Get("database_version").(string) + + instance := &sqladmin.DatabaseInstance{ + Name: name, + Region: region, + DatabaseVersion: databaseVersion, + MasterInstanceName: d.Get("master_instance_name").(string), + ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})), + } + + cloneContext, cloneSource := expandCloneContext(d.Get("clone").([]interface{})) + + s, ok := d.GetOk("settings") + desiredSettings := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) + if ok { + instance.Settings = desiredSettings + } + + if _, ok := d.GetOk("maintenance_version"); ok { + instance.MaintenanceVersion = d.Get("maintenance_version").(string) + } + + if _, ok := d.GetOk("instance_type"); ok { + instance.InstanceType = d.Get("instance_type").(string) + } + + instance.RootPassword = d.Get("root_password").(string) + + // Modifying a replica during Create can cause problems if the master is + // modified at the same time. Lock the master until we're done in order + // to prevent that. + if !sqlDatabaseIsMaster(d) { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance.MasterInstanceName)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) + } + + if k, ok := d.GetOk("encryption_key_name"); ok { + instance.DiskEncryptionConfiguration = &sqladmin.DiskEncryptionConfiguration{ + KmsKeyName: k.(string), + } + } + + var patchData *sqladmin.DatabaseInstance + + // BinaryLogging can be enabled on replica instances but only after creation. + if instance.MasterInstanceName != "" && instance.Settings != nil && instance.Settings.BackupConfiguration != nil && instance.Settings.BackupConfiguration.BinaryLogEnabled { + settingsCopy := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) + bc := settingsCopy.BackupConfiguration + patchData = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{BackupConfiguration: bc}} + + instance.Settings.BackupConfiguration.BinaryLogEnabled = false + } + + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + if cloneContext != nil { + cloneContext.DestinationInstanceName = name + clodeReq := sqladmin.InstancesCloneRequest{CloneContext: cloneContext} + op, operr = config.NewSqlAdminClient(userAgent).Instances.Clone(project, cloneSource, &clodeReq).Do() + } else { + op, operr = config.NewSqlAdminClient(userAgent).Instances.Insert(project, instance).Do() + } + return operr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = SqlAdminOperationWaitTime(config, op, project, "Create Instance", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + d.SetId("") + return err + } + + // If a default root user was created with a wildcard ('%') hostname, delete it. Note it + // appears to only be created for certain types of databases, like MySQL. + // Users in a replica instance are inherited from the master instance and should be left alone. + // This deletion is done immediately after the instance is created, in order to minimize the + // risk of it being left on the instance, which would present a security concern. + if sqlDatabaseIsMaster(d) { + var users *sqladmin.UsersListResponse + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance.Name).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) + } + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance.Name).Host(u.Host).Name(u.Name).Do() + if err == nil { + err = SqlAdminOperationWaitTime(config, op, project, "Delete default root User", userAgent, d.Timeout(schema.TimeoutCreate)) + } + return err + }, + }) + if err != nil { + return fmt.Errorf("Error, failed to delete default 'root'@'*' u, but the database was created successfully: %s", err) + } + } + } + } + + // patch any fields that need to be sent postcreation + if patchData != nil { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, instance.Name, patchData).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + + // Refresh settings from read as they may have defaulted from the API + s = d.Get("settings") + // If we've created an instance as a clone, we need to update it to set any user defined settings + if len(s.([]interface{})) != 0 && cloneContext != nil && desiredSettings != nil { + instanceUpdate := &sqladmin.DatabaseInstance{ + Settings: desiredSettings, + } + _settings := s.([]interface{})[0].(map[string]interface{}) + instanceUpdate.Settings.SettingsVersion = int64(_settings["version"].(int)) + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, name, instanceUpdate).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + // Refresh the state of the instance after updating the settings + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Perform a backup restore if the backup context exists + if r, ok := d.GetOk("restore_backup_context"); ok { + err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, name, r) + if err != nil { + return err + } + } + + return nil +} + +// Available fields for settings vary between database versions. +func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion string) *sqladmin.Settings { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _settings := configured[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + // Version is unset in Create but is set during update + SettingsVersion: int64(_settings["version"].(int)), + DataCacheConfig: expandDataCacheConfig(_settings["data_cache_config"].([]interface{})), + Tier: _settings["tier"].(string), + Edition: _settings["edition"].(string), + AdvancedMachineFeatures: expandSqlServerAdvancedMachineFeatures(_settings["advanced_machine_features"].([]interface{})), + ForceSendFields: []string{"StorageAutoResize", "EnableGoogleMlIntegration", "EnableDataplexIntegration"}, + ActivationPolicy: _settings["activation_policy"].(string), + ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), + DenyMaintenancePeriods: expandDenyMaintenancePeriod(_settings["deny_maintenance_period"].([]interface{})), + SqlServerAuditConfig: expandSqlServerAuditConfig(_settings["sql_server_audit_config"].([]interface{})), + TimeZone: _settings["time_zone"].(string), + AvailabilityType: _settings["availability_type"].(string), + ConnectorEnforcement: _settings["connector_enforcement"].(string), + Collation: _settings["collation"].(string), + DataDiskSizeGb: int64(_settings["disk_size"].(int)), + DataDiskType: _settings["disk_type"].(string), + PricingPlan: _settings["pricing_plan"].(string), + DeletionProtectionEnabled: _settings["deletion_protection_enabled"].(bool), + EnableGoogleMlIntegration: _settings["enable_google_ml_integration"].(bool), + EnableDataplexIntegration: _settings["enable_dataplex_integration"].(bool), + UserLabels: tpgresource.ConvertStringMap(_settings["user_labels"].(map[string]interface{})), + BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), + DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].(*schema.Set).List()), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{}), databaseVersion), + LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), + MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), + InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + PasswordValidationPolicy: expandPasswordValidationPolicy(_settings["password_validation_policy"].([]interface{})), + } + + resize := _settings["disk_autoresize"].(bool) + settings.StorageAutoResize = &resize + settings.StorageAutoResizeLimit = int64(_settings["disk_autoresize_limit"].(int)) + + return settings +} + +func expandReplicaConfiguration(configured []interface{}) *sqladmin.ReplicaConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _replicaConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.ReplicaConfiguration{ + FailoverTarget: _replicaConfiguration["failover_target"].(bool), + + // MysqlReplicaConfiguration has been flattened in the TF schema, so + // we'll keep it flat here instead of another expand method. + MysqlReplicaConfiguration: &sqladmin.MySqlReplicaConfiguration{ + CaCertificate: _replicaConfiguration["ca_certificate"].(string), + ClientCertificate: _replicaConfiguration["client_certificate"].(string), + ClientKey: _replicaConfiguration["client_key"].(string), + ConnectRetryInterval: int64(_replicaConfiguration["connect_retry_interval"].(int)), + DumpFilePath: _replicaConfiguration["dump_file_path"].(string), + MasterHeartbeatPeriod: int64(_replicaConfiguration["master_heartbeat_period"].(int)), + Password: _replicaConfiguration["password"].(string), + SslCipher: _replicaConfiguration["ssl_cipher"].(string), + Username: _replicaConfiguration["username"].(string), + VerifyServerCertificate: _replicaConfiguration["verify_server_certificate"].(bool), + }, + } +} + +func expandCloneContext(configured []interface{}) (*sqladmin.CloneContext, string) { + if len(configured) == 0 || configured[0] == nil { + return nil, "" + } + + _cloneConfiguration := configured[0].(map[string]interface{}) + + databaseNames := []string{} + rawDatabaseNames := _cloneConfiguration["database_names"].([]interface{}) + for _, db := range rawDatabaseNames { + databaseNames = append(databaseNames, db.(string)) + } + + return &sqladmin.CloneContext{ + PointInTime: _cloneConfiguration["point_in_time"].(string), + PreferredZone: _cloneConfiguration["preferred_zone"].(string), + DatabaseNames: databaseNames, + AllocatedIpRange: _cloneConfiguration["allocated_ip_range"].(string), + }, _cloneConfiguration["source_instance_name"].(string) +} + +func expandMaintenanceWindow(configured []interface{}) *sqladmin.MaintenanceWindow { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + window := configured[0].(map[string]interface{}) + return &sqladmin.MaintenanceWindow{ + Day: int64(window["day"].(int)), + Hour: int64(window["hour"].(int)), + UpdateTrack: window["update_track"].(string), + ForceSendFields: []string{"Hour"}, + } +} + +func expandLocationPreference(configured []interface{}) *sqladmin.LocationPreference { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _locationPreference := configured[0].(map[string]interface{}) + return &sqladmin.LocationPreference{ + FollowGaeApplication: _locationPreference["follow_gae_application"].(string), + Zone: _locationPreference["zone"].(string), + SecondaryZone: _locationPreference["secondary_zone"].(string), + } +} + +func expandIpConfiguration(configured []interface{}, databaseVersion string) *sqladmin.IpConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _ipConfiguration := configured[0].(map[string]interface{}) + + forceSendFields := []string{"Ipv4Enabled"} + nullFields := []string{"RequireSsl"} + + if !strings.HasPrefix(databaseVersion, "SQLSERVER") { + forceSendFields = append(forceSendFields, "EnablePrivatePathForGoogleCloudServices") + } + + return &sqladmin.IpConfiguration{ + Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), + PrivateNetwork: _ipConfiguration["private_network"].(string), + AllocatedIpRange: _ipConfiguration["allocated_ip_range"].(string), + AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()), + EnablePrivatePathForGoogleCloudServices: _ipConfiguration["enable_private_path_for_google_cloud_services"].(bool), + ForceSendFields: forceSendFields, + NullFields: nullFields, + PscConfig: expandPscConfig(_ipConfiguration["psc_config"].(*schema.Set).List()), + SslMode: _ipConfiguration["ssl_mode"].(string), + ServerCaMode: _ipConfiguration["server_ca_mode"].(string), + } +} + +func expandPscConfig(configured []interface{}) *sqladmin.PscConfig { + for _, _pscConfig := range configured { + _entry := _pscConfig.(map[string]interface{}) + return &sqladmin.PscConfig{ + PscEnabled: _entry["psc_enabled"].(bool), + AllowedConsumerProjects: tpgresource.ConvertStringArr(_entry["allowed_consumer_projects"].(*schema.Set).List()), + } + } + + return nil +} + +func expandAuthorizedNetworks(configured []interface{}) []*sqladmin.AclEntry { + an := make([]*sqladmin.AclEntry, 0, len(configured)) + for _, _acl := range configured { + _entry := _acl.(map[string]interface{}) + an = append(an, &sqladmin.AclEntry{ + ExpirationTime: _entry["expiration_time"].(string), + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + + return an +} + +func expandDatabaseFlags(configured []interface{}) []*sqladmin.DatabaseFlags { + databaseFlags := make([]*sqladmin.DatabaseFlags, 0, len(configured)) + for _, _flag := range configured { + if _flag == nil { + continue + } + _entry := _flag.(map[string]interface{}) + + databaseFlags = append(databaseFlags, &sqladmin.DatabaseFlags{ + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + return databaseFlags +} + +func expandDataCacheConfig(configured interface{}) *sqladmin.DataCacheConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.DataCacheConfig{ + DataCacheEnabled: config["data_cache_enabled"].(bool), + } +} + +func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _backupConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.BackupConfiguration{ + BinaryLogEnabled: _backupConfiguration["binary_log_enabled"].(bool), + BackupRetentionSettings: expandBackupRetentionSettings(_backupConfiguration["backup_retention_settings"]), + Enabled: _backupConfiguration["enabled"].(bool), + StartTime: _backupConfiguration["start_time"].(string), + Location: _backupConfiguration["location"].(string), + TransactionLogRetentionDays: int64(_backupConfiguration["transaction_log_retention_days"].(int)), + PointInTimeRecoveryEnabled: _backupConfiguration["point_in_time_recovery_enabled"].(bool), + ForceSendFields: []string{"BinaryLogEnabled", "Enabled", "PointInTimeRecoveryEnabled"}, + } +} + +func expandBackupRetentionSettings(configured interface{}) *sqladmin.BackupRetentionSettings { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.BackupRetentionSettings{ + RetainedBackups: int64(config["retained_backups"].(int)), + RetentionUnit: config["retention_unit"].(string), + } +} + +func expandActiveDirectoryConfig(configured interface{}) *sqladmin.SqlActiveDirectoryConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlActiveDirectoryConfig{ + Domain: config["domain"].(string), + } +} + +func expandDenyMaintenancePeriod(configured []interface{}) []*sqladmin.DenyMaintenancePeriod { + denyMaintenancePeriod := make([]*sqladmin.DenyMaintenancePeriod, 0, len(configured)) + + for _, _flag := range configured { + if _flag == nil { + continue + } + _entry := _flag.(map[string]interface{}) + + denyMaintenancePeriod = append(denyMaintenancePeriod, &sqladmin.DenyMaintenancePeriod{ + EndDate: _entry["end_date"].(string), + StartDate: _entry["start_date"].(string), + Time: _entry["time"].(string), + }) + } + return denyMaintenancePeriod + +} + +func expandSqlServerAdvancedMachineFeatures(configured interface{}) *sqladmin.AdvancedMachineFeatures { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.AdvancedMachineFeatures{ + ThreadsPerCore: int64(config["threads_per_core"].(int)), + } +} + +func expandSqlServerAuditConfig(configured interface{}) *sqladmin.SqlServerAuditConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlServerAuditConfig{ + Bucket: config["bucket"].(string), + RetentionInterval: config["retention_interval"].(string), + UploadInterval: config["upload_interval"].(string), + } +} + +func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _insightsConfig := configured[0].(map[string]interface{}) + return &sqladmin.InsightsConfig{ + QueryInsightsEnabled: _insightsConfig["query_insights_enabled"].(bool), + QueryStringLength: int64(_insightsConfig["query_string_length"].(int)), + RecordApplicationTags: _insightsConfig["record_application_tags"].(bool), + RecordClientAddress: _insightsConfig["record_client_address"].(bool), + QueryPlansPerMinute: int64(_insightsConfig["query_plans_per_minute"].(int)), + } +} + +func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.PasswordValidationPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _passwordValidationPolicy := configured[0].(map[string]interface{}) + return &sqladmin.PasswordValidationPolicy{ + MinLength: int64(_passwordValidationPolicy["min_length"].(int)), + Complexity: _passwordValidationPolicy["complexity"].(string), + ReuseInterval: int64(_passwordValidationPolicy["reuse_interval"].(int)), + DisallowUsernameSubstring: _passwordValidationPolicy["disallow_username_substring"].(bool), + PasswordChangeInterval: _passwordValidationPolicy["password_change_interval"].(string), + EnablePasswordPolicy: _passwordValidationPolicy["enable_password_policy"].(bool), + } +} + +func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + var instance *sqladmin.DatabaseInstance + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + instance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) + } + + if err := d.Set("name", instance.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("region", instance.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("database_version", instance.DatabaseVersion); err != nil { + return fmt.Errorf("Error setting database_version: %s", err) + } + if err := d.Set("connection_name", instance.ConnectionName); err != nil { + return fmt.Errorf("Error setting connection_name: %s", err) + } + if err := d.Set("maintenance_version", instance.MaintenanceVersion); err != nil { + return fmt.Errorf("Error setting maintenance_version: %s", err) + } + if err := d.Set("available_maintenance_versions", instance.AvailableMaintenanceVersions); err != nil { + return fmt.Errorf("Error setting available_maintenance_version: %s", err) + } + if err := d.Set("service_account_email_address", instance.ServiceAccountEmailAddress); err != nil { + return fmt.Errorf("Error setting service_account_email_address: %s", err) + } + if err := d.Set("instance_type", instance.InstanceType); err != nil { + return fmt.Errorf("Error setting instance_type: %s", err) + } + if err := d.Set("settings", flattenSettings(instance.Settings, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Settings") + } + + if instance.DiskEncryptionConfiguration != nil { + if err := d.Set("encryption_key_name", instance.DiskEncryptionConfiguration.KmsKeyName); err != nil { + return fmt.Errorf("Error setting encryption_key_name: %s", err) + } + } + + if err := d.Set("replica_configuration", flattenReplicaConfiguration(instance.ReplicaConfiguration, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Replica Configuration") + } + ipAddresses := flattenIpAddresses(instance.IpAddresses) + if err := d.Set("ip_address", ipAddresses); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance IP Addresses") + } + + if len(ipAddresses) > 0 { + if err := d.Set("first_ip_address", ipAddresses[0]["ip_address"]); err != nil { + return fmt.Errorf("Error setting first_ip_address: %s", err) + } + } + + publicIpAddress := "" + privateIpAddress := "" + for _, ip := range instance.IpAddresses { + if publicIpAddress == "" && ip.Type == "PRIMARY" { + publicIpAddress = ip.IpAddress + } + + if privateIpAddress == "" && ip.Type == "PRIVATE" { + privateIpAddress = ip.IpAddress + } + } + + if err := d.Set("public_ip_address", publicIpAddress); err != nil { + return fmt.Errorf("Error setting public_ip_address: %s", err) + } + if err := d.Set("private_ip_address", privateIpAddress); err != nil { + return fmt.Errorf("Error setting private_ip_address: %s", err) + } + + if err := d.Set("server_ca_cert", flattenServerCaCerts([]*sqladmin.SslCert{instance.ServerCaCert})); err != nil { + log.Printf("[WARN] Failed to set SQL Database CA Certificate") + } + + if err := d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")); err != nil { + return fmt.Errorf("Error setting master_instance_name: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("self_link", instance.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("psc_service_attachment_link", instance.PscServiceAttachmentLink); err != nil { + return fmt.Errorf("Error setting psc_service_attachment_link: %s", err) + } + if err := d.Set("dns_name", instance.DnsName); err != nil { + return fmt.Errorf("Error setting dns_name: %s", err) + } + d.SetId(instance.Name) + + return nil +} + +func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + var maintenance_version string + if v, ok := d.GetOk("maintenance_version"); ok { + maintenance_version = v.(string) + } + + promoteReadReplicaRequired := false + if d.HasChange("instance_type") { + oldInstanceType, newInstanceType := d.GetChange("instance_type") + + if isReplicaPromoteRequested(nil, oldInstanceType, newInstanceType, nil) { + err = checkPromoteConfigurations(d) + if err != nil { + return err + } + + promoteReadReplicaRequired = true + } + } + + desiredSetting := d.Get("settings") + var op *sqladmin.Operation + var instance *sqladmin.DatabaseInstance + + databaseVersion := d.Get("database_version").(string) + + // Check if the activation policy is being updated. If it is being changed to ALWAYS this should be done first. + if d.HasChange("settings.0.activation_policy") && d.Get("settings.0.activation_policy").(string) == "ALWAYS" { + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{ActivationPolicy: "ALWAYS"}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the database version is being updated, because patching database version is an atomic operation and can not be + // performed with other fields, we first patch database version before updating the rest of the fields. + if d.HasChange("database_version") { + instance = &sqladmin.DatabaseInstance{DatabaseVersion: databaseVersion} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the root_password is being updated, because updating root_password is an atomic operation and can not be + // performed with other fields, we first update root password before updating the rest of the fields. + if d.HasChange("root_password") { + oldPwd, newPwd := d.GetChange("root_password") + password := newPwd.(string) + dv := d.Get("database_version").(string) + name := "" + host := "" + if strings.Contains(dv, "MYSQL") { + name = "root" + host = "%" + } else if strings.Contains(dv, "POSTGRES") { + name = "postgres" + } else if strings.Contains(dv, "SQLSERVER") { + name = "sqlserver" + if len(password) == 0 { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, root password cannot be empty for SQL Server instance.") + } + } else { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, invalid database version") + } + instance := d.Get("name").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + } + + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) + var op *sqladmin.Operation + updateFunc := func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Update(project, instance, user).Host(host).Name(name).Do() + return err + } + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: updateFunc, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, failed to update root_password : %s", err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Insert User", userAgent, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, failed to update root_password : %s", err) + } + } + + // Check if the maintenance version is being updated, because patching maintenance version is an atomic operation and can not be + // performed with other fields, we first patch maintenance version before updating the rest of the fields. + if d.HasChange("maintenance_version") { + instance = &sqladmin.DatabaseInstance{MaintenanceVersion: maintenance_version} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + if promoteReadReplicaRequired { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.PromoteReplica(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to promote read replica instance as primary stand-alone %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Promote Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the edition is being updated, because patching edition is an atomic operation and can not be + // performed with other fields, we first patch edition, tier and data cache config before updating the rest of the fields. + if d.HasChange("settings.0.edition") { + edition := d.Get("settings.0.edition").(string) + tier := d.Get("settings.0.tier").(string) + dataCacheConfig := expandDataCacheConfig(d.Get("settings.0.data_cache_config").([]interface{})) + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{Edition: edition, Tier: tier, DataCacheConfig: dataCacheConfig}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + s := d.Get("settings") + instance = &sqladmin.DatabaseInstance{ + Settings: expandSqlDatabaseInstanceSettings(desiredSetting.([]interface{}), databaseVersion), + } + _settings := s.([]interface{})[0].(map[string]interface{}) + // Instance.Patch operation on completion updates the settings proto version by +8. As terraform does not know this it tries + // to make an update call with the proto version before patch and fails. To resolve this issue we update the setting version + // before making the update call. + instance.Settings.SettingsVersion = int64(_settings["version"].(int)) + // Collation cannot be included in the update request + instance.Settings.Collation = "" + + // Lock on the master_instance_name just in case updating any replica + // settings causes operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) + } + + if _, ok := d.GetOk("instance_type"); ok { + instance.InstanceType = d.Get("instance_type").(string) + } + + // Database Version is required for all calls with Google ML integration enabled or it will be rejected by the API. + if d.Get("settings.0.enable_google_ml_integration").(bool) { + instance.DatabaseVersion = databaseVersion + } + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + // Perform a backup restore if the backup context exists and has changed + if r, ok := d.GetOk("restore_backup_context"); ok { + if d.HasChange("restore_backup_context") { + err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, d.Get("name").(string), r) + if err != nil { + return err + } + } + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func maintenanceVersionDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // Ignore the database version part and only compare the last part of the maintenance version which represents the release date of the version. + if len(old) > 14 && len(new) > 14 && old[len(old)-14:] >= new[len(new)-14:] { + log.Printf("[DEBUG] Maintenance version in configuration [%s] is older than current maintenance version [%s] on instance. Suppressing diff", new, old) + return true + } else { + return false + } +} + +func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Check if deletion protection is enabled. + + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("Error, failed to delete instance because deletion_protection is set to true. Set it to false to proceed with instance deletion") + } + + // Lock on the master_instance_name just in case deleting a replica causes + // operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) + } + + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Delete(project, d.Get("name").(string)).Do() + if rerr != nil { + return rerr + } + err = SqlAdminOperationWaitTime(config, op, project, "Delete Instance", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError, IsSqlInternalError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) + } + return nil +} + +func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSettings(settings *sqladmin.Settings, d *schema.ResourceData) []map[string]interface{} { + data := map[string]interface{}{ + "version": settings.SettingsVersion, + "tier": settings.Tier, + "edition": flattenEdition(settings.Edition), + "activation_policy": settings.ActivationPolicy, + "availability_type": settings.AvailabilityType, + "collation": settings.Collation, + "connector_enforcement": settings.ConnectorEnforcement, + "disk_type": settings.DataDiskType, + "disk_size": settings.DataDiskSizeGb, + "pricing_plan": settings.PricingPlan, + "user_labels": settings.UserLabels, + "password_validation_policy": settings.PasswordValidationPolicy, + "time_zone": settings.TimeZone, + "deletion_protection_enabled": settings.DeletionProtectionEnabled, + } + + if settings.ActiveDirectoryConfig != nil { + data["active_directory_config"] = flattenActiveDirectoryConfig(settings.ActiveDirectoryConfig) + } + + if settings.DenyMaintenancePeriods != nil { + data["deny_maintenance_period"] = flattenDenyMaintenancePeriod(settings.DenyMaintenancePeriods) + } + + if settings.SqlServerAuditConfig != nil { + data["sql_server_audit_config"] = flattenSqlServerAuditConfig(settings.SqlServerAuditConfig) + } + + if settings.BackupConfiguration != nil { + data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) + } + + if settings.DatabaseFlags != nil { + data["database_flags"] = flattenDatabaseFlags(settings.DatabaseFlags) + } + + if settings.IpConfiguration != nil { + data["ip_configuration"] = flattenIpConfiguration(settings.IpConfiguration, d) + } + + if settings.LocationPreference != nil { + data["location_preference"] = flattenLocationPreference(settings.LocationPreference) + } + + if settings.MaintenanceWindow != nil { + data["maintenance_window"] = flattenMaintenanceWindow(settings.MaintenanceWindow) + } + + if settings.InsightsConfig != nil { + data["insights_config"] = flattenInsightsConfig(settings.InsightsConfig) + } + + data["disk_autoresize"] = settings.StorageAutoResize + data["disk_autoresize_limit"] = settings.StorageAutoResizeLimit + + data["enable_google_ml_integration"] = settings.EnableGoogleMlIntegration + data["enable_dataplex_integration"] = settings.EnableDataplexIntegration + + if settings.UserLabels != nil { + data["user_labels"] = settings.UserLabels + } + + if settings.PasswordValidationPolicy != nil { + data["password_validation_policy"] = flattenPasswordValidationPolicy(settings.PasswordValidationPolicy) + } + + if settings.DataCacheConfig != nil { + data["data_cache_config"] = flattenDataCacheConfig(settings.DataCacheConfig) + } + + if settings.AdvancedMachineFeatures != nil { + data["advanced_machine_features"] = flattenSqlServerAdvancedMachineFeatures(settings.AdvancedMachineFeatures) + } + + return []map[string]interface{}{data} +} + +func flattenDataCacheConfig(d *sqladmin.DataCacheConfig) []map[string]interface{} { + if d == nil { + return nil + } + return []map[string]interface{}{ + { + "data_cache_enabled": d.DataCacheEnabled, + }, + } +} + +func flattenBackupConfiguration(backupConfiguration *sqladmin.BackupConfiguration) []map[string]interface{} { + data := map[string]interface{}{ + "binary_log_enabled": backupConfiguration.BinaryLogEnabled, + "enabled": backupConfiguration.Enabled, + "start_time": backupConfiguration.StartTime, + "location": backupConfiguration.Location, + "point_in_time_recovery_enabled": backupConfiguration.PointInTimeRecoveryEnabled, + "backup_retention_settings": flattenBackupRetentionSettings(backupConfiguration.BackupRetentionSettings), + "transaction_log_retention_days": backupConfiguration.TransactionLogRetentionDays, + } + + return []map[string]interface{}{data} +} + +func flattenBackupRetentionSettings(b *sqladmin.BackupRetentionSettings) []map[string]interface{} { + if b == nil { + return nil + } + return []map[string]interface{}{ + { + "retained_backups": b.RetainedBackups, + "retention_unit": b.RetentionUnit, + }, + } +} + +func flattenActiveDirectoryConfig(sqlActiveDirectoryConfig *sqladmin.SqlActiveDirectoryConfig) []map[string]interface{} { + if sqlActiveDirectoryConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "domain": sqlActiveDirectoryConfig.Domain, + }, + } +} + +func flattenDenyMaintenancePeriod(denyMaintenancePeriod []*sqladmin.DenyMaintenancePeriod) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(denyMaintenancePeriod)) + + for _, flag := range denyMaintenancePeriod { + data := map[string]interface{}{ + "end_date": flag.EndDate, + "start_date": flag.StartDate, + "time": flag.Time, + } + + flags = append(flags, data) + } + + return flags +} + +func flattenSqlServerAdvancedMachineFeatures(advancedMachineFeatures *sqladmin.AdvancedMachineFeatures) []map[string]interface{} { + if advancedMachineFeatures == nil { + return nil + } + return []map[string]interface{}{ + { + "threads_per_core": advancedMachineFeatures.ThreadsPerCore, + }, + } +} + +func flattenSqlServerAuditConfig(sqlServerAuditConfig *sqladmin.SqlServerAuditConfig) []map[string]interface{} { + if sqlServerAuditConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "bucket": sqlServerAuditConfig.Bucket, + "retention_interval": sqlServerAuditConfig.RetentionInterval, + "upload_interval": sqlServerAuditConfig.UploadInterval, + }, + } +} + +func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(databaseFlags)) + + for _, flag := range databaseFlags { + data := map[string]interface{}{ + "name": flag.Name, + "value": flag.Value, + } + + flags = append(flags, data) + } + + return flags +} + +func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration, d *schema.ResourceData) interface{} { + data := map[string]interface{}{ + "ipv4_enabled": ipConfiguration.Ipv4Enabled, + "private_network": ipConfiguration.PrivateNetwork, + "allocated_ip_range": ipConfiguration.AllocatedIpRange, + "enable_private_path_for_google_cloud_services": ipConfiguration.EnablePrivatePathForGoogleCloudServices, + "ssl_mode": ipConfiguration.SslMode, + "server_ca_mode": ipConfiguration.ServerCaMode, + } + + if ipConfiguration.AuthorizedNetworks != nil { + data["authorized_networks"] = flattenAuthorizedNetworks(ipConfiguration.AuthorizedNetworks) + } + + if ipConfiguration.PscConfig != nil { + data["psc_config"] = flattenPscConfigs(ipConfiguration.PscConfig) + } + + return []map[string]interface{}{data} +} + +func flattenPscConfigs(pscConfig *sqladmin.PscConfig) interface{} { + data := map[string]interface{}{ + "psc_enabled": pscConfig.PscEnabled, + "allowed_consumer_projects": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(pscConfig.AllowedConsumerProjects)), + } + + return []map[string]interface{}{data} +} + +func flattenAuthorizedNetworks(entries []*sqladmin.AclEntry) interface{} { + networks := schema.NewSet(schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), []interface{}{}) + + for _, entry := range entries { + data := map[string]interface{}{ + "expiration_time": entry.ExpirationTime, + "name": entry.Name, + "value": entry.Value, + } + + networks.Add(data) + } + + return networks +} + +func flattenLocationPreference(locationPreference *sqladmin.LocationPreference) interface{} { + data := map[string]interface{}{ + "follow_gae_application": locationPreference.FollowGaeApplication, + "zone": locationPreference.Zone, + "secondary_zone": locationPreference.SecondaryZone, + } + + return []map[string]interface{}{data} +} + +func flattenMaintenanceWindow(maintenanceWindow *sqladmin.MaintenanceWindow) interface{} { + data := map[string]interface{}{ + "day": maintenanceWindow.Day, + "hour": maintenanceWindow.Hour, + "update_track": maintenanceWindow.UpdateTrack, + } + + return []map[string]interface{}{data} +} + +func flattenReplicaConfiguration(replicaConfiguration *sqladmin.ReplicaConfiguration, d *schema.ResourceData) []map[string]interface{} { + rc := []map[string]interface{}{} + + if replicaConfiguration != nil { + data := map[string]interface{}{ + "failover_target": replicaConfiguration.FailoverTarget, + + // Don't attempt to assign anything from replicaConfiguration.MysqlReplicaConfiguration, + // since those fields are set on create and then not stored. See description at + // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances. + // Instead, set them to the values they previously had so we don't set them all to zero. + "ca_certificate": d.Get("replica_configuration.0.ca_certificate"), + "client_certificate": d.Get("replica_configuration.0.client_certificate"), + "client_key": d.Get("replica_configuration.0.client_key"), + "connect_retry_interval": d.Get("replica_configuration.0.connect_retry_interval"), + "dump_file_path": d.Get("replica_configuration.0.dump_file_path"), + "master_heartbeat_period": d.Get("replica_configuration.0.master_heartbeat_period"), + "password": d.Get("replica_configuration.0.password"), + "ssl_cipher": d.Get("replica_configuration.0.ssl_cipher"), + "username": d.Get("replica_configuration.0.username"), + "verify_server_certificate": d.Get("replica_configuration.0.verify_server_certificate"), + } + rc = append(rc, data) + } + + return rc +} + +func flattenIpAddresses(ipAddresses []*sqladmin.IpMapping) []map[string]interface{} { + var ips []map[string]interface{} + + for _, ip := range ipAddresses { + data := map[string]interface{}{ + "ip_address": ip.IpAddress, + "type": ip.Type, + "time_to_retire": ip.TimeToRetire, + } + + ips = append(ips, data) + } + + return ips +} + +func flattenServerCaCerts(caCerts []*sqladmin.SslCert) []map[string]interface{} { + var certs []map[string]interface{} + + for _, caCert := range caCerts { + if caCert != nil { + data := map[string]interface{}{ + "cert": caCert.Cert, + "common_name": caCert.CommonName, + "create_time": caCert.CreateTime, + "expiration_time": caCert.ExpirationTime, + "sha1_fingerprint": caCert.Sha1Fingerprint, + } + + certs = append(certs, data) + } + } + + return certs +} + +func flattenInsightsConfig(insightsConfig *sqladmin.InsightsConfig) interface{} { + data := map[string]interface{}{ + "query_insights_enabled": insightsConfig.QueryInsightsEnabled, + "query_string_length": insightsConfig.QueryStringLength, + "record_application_tags": insightsConfig.RecordApplicationTags, + "record_client_address": insightsConfig.RecordClientAddress, + "query_plans_per_minute": insightsConfig.QueryPlansPerMinute, + } + + return []map[string]interface{}{data} +} + +func flattenPasswordValidationPolicy(passwordValidationPolicy *sqladmin.PasswordValidationPolicy) interface{} { + data := map[string]interface{}{ + "min_length": passwordValidationPolicy.MinLength, + "complexity": passwordValidationPolicy.Complexity, + "reuse_interval": passwordValidationPolicy.ReuseInterval, + "disallow_username_substring": passwordValidationPolicy.DisallowUsernameSubstring, + "password_change_interval": passwordValidationPolicy.PasswordChangeInterval, + "enable_password_policy": passwordValidationPolicy.EnablePasswordPolicy, + } + return []map[string]interface{}{data} +} + +func flattenEdition(v interface{}) string { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "ENTERPRISE" + } + + return v.(string) +} + +func instanceMutexKey(project, instance_name string) string { + return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) +} + +// sqlDatabaseIsMaster returns true if the provided schema.ResourceData represents a +// master SQL Instance, and false if it is a replica. +func sqlDatabaseIsMaster(d *schema.ResourceData) bool { + _, ok := d.GetOk("master_instance_name") + return !ok +} + +func sqlDatabaseInstanceServiceNetworkPrecheck(d *schema.ResourceData, config *transport_tpg.Config, userAgent, network string) error { + log.Printf("[DEBUG] checking network %q for at least one service networking connection", network) + // This call requires projects.get permissions, which may not have been granted to the Terraform actor, + // particularly in shared VPC setups. Most will! But it's not strictly required. + serviceNetworkingNetworkName, err := servicenetworking.RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Printf("[DEBUG] retrieved googleapi error while creating sn name for %q. precheck skipped. code %v and message: %s", network, gerr.Code, gerr.Body) + return nil + } + + return err + } + + response, err := config.NewServiceNetworkingClient(userAgent).Services.Connections.List("services/servicenetworking.googleapis.com").Network(serviceNetworkingNetworkName).Do() + if err != nil { + // It is possible that the actor creating the SQL Instance might not have permissions to call servicenetworking.services.connections.list + log.Printf("[WARNING] Failed to list Service Networking of the project. Skipped Service Networking precheck.") + return nil + } + + if len(response.Connections) < 1 { + return fmt.Errorf("Error, failed to create instance because the network doesn't have at least 1 private services connection. Please see https://cloud.google.com/sql/docs/mysql/private-ip#network_requirements for how to create this connection.") + } + + return nil +} + +func expandRestoreBackupContext(configured []interface{}) *sqladmin.RestoreBackupContext { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _rc := configured[0].(map[string]interface{}) + return &sqladmin.RestoreBackupContext{ + BackupRunId: int64(_rc["backup_run_id"].(int)), + InstanceId: _rc["instance_id"].(string), + Project: _rc["project"].(string), + } +} + +func sqlDatabaseInstanceRestoreFromBackup(d *schema.ResourceData, config *transport_tpg.Config, userAgent, project, instanceId string, r interface{}) error { + log.Printf("[DEBUG] Initiating SQL database instance backup restore") + restoreContext := r.([]interface{}) + + backupRequest := &sqladmin.InstancesRestoreBackupRequest{ + RestoreBackupContext: expandRestoreBackupContext(restoreContext), + } + + var op *sqladmin.Operation + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + op, operr = config.NewSqlAdminClient(userAgent).Instances.RestoreBackup(project, instanceId, backupRequest).Do() + return operr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to restore instance from backup %s: %s", instanceId, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Restore Backup", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return nil +} + +func caseDiffDashSuppress(_, old, new string, _ *schema.ResourceData) bool { + postReplaceNew := strings.Replace(new, "-", "_", -1) + return strings.ToUpper(postReplaceNew) == strings.ToUpper(old) +} + +func isMasterInstanceNameSet(_ context.Context, oldMasterInstanceName interface{}, newMasterInstanceName interface{}, _ interface{}) bool { + new := newMasterInstanceName.(string) + if new == "" { + return false + } + + return true +} + +func isReplicaPromoteRequested(_ context.Context, oldInstanceType interface{}, newInstanceType interface{}, _ interface{}) bool { + oldInstanceType = oldInstanceType.(string) + newInstanceType = newInstanceType.(string) + + if newInstanceType == "CLOUD_SQL_INSTANCE" && oldInstanceType == "READ_REPLICA_INSTANCE" { + return true + } + + return false +} + +func checkPromoteConfigurations(d *schema.ResourceData) error { + masterInstanceName := d.GetRawConfig().GetAttr("master_instance_name") + replicaConfiguration := d.GetRawConfig().GetAttr("replica_configuration").AsValueSlice() + + return validatePromoteConfigurations(masterInstanceName, replicaConfiguration) +} + +func checkPromoteConfigurationsAndUpdateDiff(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + masterInstanceName := diff.GetRawConfig().GetAttr("master_instance_name") + replicaConfiguration := diff.GetRawConfig().GetAttr("replica_configuration").AsValueSlice() + + err := validatePromoteConfigurations(masterInstanceName, replicaConfiguration) + if err != nil { + return err + } + + err = diff.SetNew("master_instance_name", nil) + if err != nil { + return err + } + + err = diff.SetNew("replica_configuration", nil) + if err != nil { + return err + } + return nil +} + +func validatePromoteConfigurations(masterInstanceName cty.Value, replicaConfigurations []cty.Value) error { + if !masterInstanceName.IsNull() { + return fmt.Errorf("Replica promote configuration check failed. Please remove master_instance_name and try again.") + } + + if len(replicaConfigurations) != 0 { + return fmt.Errorf("Replica promote configuration check failed. Please remove replica_configuration and try again.") + } + return nil +} diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go index 8ad06ff90f82..d8b0d19a4763 100644 --- a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go @@ -47,13 +47,23 @@ func ResourceStorageBucket() *schema.Resource { Read: schema.DefaultTimeout(4 * time.Minute), }, - SchemaVersion: 1, + SchemaVersion: 3, StateUpgraders: []schema.StateUpgrader{ { Type: resourceStorageBucketV0().CoreConfigSchema().ImpliedType(), Upgrade: ResourceStorageBucketStateUpgradeV0, Version: 0, }, + { + Type: resourceStorageBucketV1().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV1, + Version: 1, + }, + { + Type: resourceStorageBucketV2().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV2, + Version: 2, + }, }, Schema: map[string]*schema.Schema{ @@ -226,12 +236,6 @@ func ResourceStorageBucket() *schema.Resource { Optional: true, Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, }, - "no_age": { - Type: schema.TypeBool, - Deprecated: "`no_age` is deprecated and will be removed in a future major release. Use `send_age_if_zero` instead.", - Optional: true, - Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, - }, "with_state": { Type: schema.TypeString, Computed: true, @@ -265,7 +269,6 @@ func ResourceStorageBucket() *schema.Resource { "send_age_if_zero": { Type: schema.TypeBool, Optional: true, - Default: true, Description: `While set true, age value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the age field. It can be used alone or together with age.`, }, "send_days_since_noncurrent_time_if_zero": { @@ -1406,14 +1409,12 @@ func flattenBucketLifecycleRuleCondition(index int, d *schema.ResourceData, cond // are already present otherwise setting them to individual default values. if v, ok := d.GetOk(fmt.Sprintf("lifecycle_rule.%d.condition",index)); ok{ state_condition := v.(*schema.Set).List()[0].(map[string]interface{}) - ruleCondition["no_age"] = state_condition["no_age"].(bool) ruleCondition["send_days_since_noncurrent_time_if_zero"] = state_condition["send_days_since_noncurrent_time_if_zero"].(bool) ruleCondition["send_days_since_custom_time_if_zero"] = state_condition["send_days_since_custom_time_if_zero"].(bool) ruleCondition["send_num_newer_versions_if_zero"] = state_condition["send_num_newer_versions_if_zero"].(bool) ruleCondition["send_age_if_zero"] = state_condition["send_age_if_zero"].(bool) } else { - ruleCondition["no_age"] = false - ruleCondition["send_age_if_zero"] = true + ruleCondition["send_age_if_zero"] = false ruleCondition["send_days_since_noncurrent_time_if_zero"] = false ruleCondition["send_days_since_custom_time_if_zero"] = false ruleCondition["send_num_newer_versions_if_zero"] = false @@ -1566,15 +1567,10 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi condition := conditions[0].(map[string]interface{}) transformed := &storage.BucketLifecycleRuleCondition{} - // Setting high precedence of no_age over age and send_age_if_zero. - // Only sets age value when no_age is not present or no_age is present and has false value - if v, ok := condition["no_age"]; !ok || !(v.(bool)) { - if v, ok := condition["age"]; ok { - age := int64(v.(int)) - u, ok := condition["send_age_if_zero"] - if age > 0 || (ok && u.(bool)) { - transformed.Age = &age - } + if v, ok := condition["age"]; ok { + age := int64(v.(int)) + if u, ok := condition["send_age_if_zero"]; age > 0 || (ok && u.(bool)) { + transformed.Age = &age } } @@ -1685,15 +1681,8 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) - if v, ok := m["no_age"]; ok && v.(bool){ - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } else { - if v, ok := m["send_age_if_zero"]; ok { - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } - if v, ok := m["age"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } + if v, ok := m["age"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) } if v, ok := m["days_since_custom_time"]; ok { @@ -1737,6 +1726,10 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } + if v, ok := m["send_age_if_zero"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + if v, ok := m["send_days_since_noncurrent_time_if_zero"]; ok { buf.WriteString(fmt.Sprintf("%t-", v.(bool))) } @@ -1859,8 +1852,7 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("autoclass", flattenBucketAutoclass(res.Autoclass)); err != nil { return fmt.Errorf("Error setting autoclass: %s", err) } - // lifecycle_rule contains terraform only variable no_age. - // Passing config("d") to flattener function to set no_age separately. + // Passing config("d") to flattener function to set virtual fields separately. if err := d.Set("lifecycle_rule", flattenBucketLifecycle(d, res.Lifecycle)); err != nil { return fmt.Errorf("Error setting lifecycle_rule: %s", err) } diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go index 83f48c3aeb56..19f6a9feae4d 100644 --- a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go @@ -133,7 +133,7 @@ func TestAccStorageBucket_AutoclassDiffSupress(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_basicWithAutoclass(bucketName, false), + Config: testAccStorageBucket_basicWithAutoclass(bucketName,false), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -146,7 +146,7 @@ func TestAccStorageBucket_AutoclassDiffSupress(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_basicWithAutoclass(bucketName, true), + Config: testAccStorageBucket_basicWithAutoclass(bucketName,true), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -346,7 +346,7 @@ func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_dualLocation_rpo(bucketName, "ASYNC_TURBO"), + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"ASYNC_TURBO"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "rpo", "ASYNC_TURBO"), @@ -359,7 +359,7 @@ func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_dualLocation_rpo(bucketName, "DEFAULT"), + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"DEFAULT"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "rpo", "DEFAULT"), @@ -399,7 +399,7 @@ func TestAccStorageBucket_multiLocation_rpo(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_multiLocation_rpo(bucketName, "DEFAULT"), + Config: testAccStorageBucket_multiLocation_rpo(bucketName,"DEFAULT"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "rpo", "DEFAULT"), @@ -655,14 +655,13 @@ func TestAccStorageBucket_lifecycleRulesVirtualFields(t *testing.T) { ResourceName: "google_storage_bucket.bucket", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.no_age", "lifecycle_rule.1.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.1.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.1.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.2.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero"}, + ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.1.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.1.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.2.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero", "lifecycle_rule.2.condition.0.send_age_if_zero"}, }, { Config: testAccStorageBucket_customAttributes_withLifecycleVirtualFieldsUpdate2(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket, 1), testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket, 2), ), }, @@ -670,7 +669,7 @@ func TestAccStorageBucket_lifecycleRulesVirtualFields(t *testing.T) { ResourceName: "google_storage_bucket.bucket", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.no_age", "lifecycle_rule.0.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.0.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.0.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.0.condition.0.send_age_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero", "lifecycle_rule.2.condition.0.send_age_if_zero"}, + ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.0.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.0.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.0.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.0.condition.0.send_age_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero", "lifecycle_rule.2.condition.0.send_age_if_zero"}, }, { Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), @@ -1513,7 +1512,7 @@ func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_SoftDeletePolicy(bucketName, 7776000), + Config: testAccStorageBucket_SoftDeletePolicy(bucketName,7776000), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -1528,7 +1527,7 @@ func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_SoftDeletePolicy(bucketName, 0), + Config: testAccStorageBucket_SoftDeletePolicy(bucketName,0), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -1804,7 +1803,7 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testAccStorageBucket_dualLocation_rpo(bucketName string, rpo string) string { +func testAccStorageBucket_dualLocation_rpo(bucketName string,rpo string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" @@ -1815,10 +1814,10 @@ resource "google_storage_bucket" "bucket" { } rpo = "%s" } -`, bucketName, rpo) +`, bucketName,rpo) } -func testAccStorageBucket_multiLocation_rpo(bucketName string, rpo string) string { +func testAccStorageBucket_multiLocation_rpo(bucketName string,rpo string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" @@ -1826,7 +1825,7 @@ resource "google_storage_bucket" "bucket" { force_destroy = true rpo = "%s" } -`, bucketName, rpo) +`, bucketName,rpo) } func testAccStorageBucket_customAttributes(bucketName string) string { @@ -1868,6 +1867,7 @@ resource "google_storage_bucket" "bucket" { type = "Delete" } condition { + send_age_if_zero = true age = 0 } } @@ -1915,7 +1915,6 @@ resource "google_storage_bucket" "bucket" { } condition { age = 10 - no_age = false days_since_noncurrent_time = 0 send_days_since_noncurrent_time_if_zero = false days_since_custom_time = 0 @@ -1929,7 +1928,6 @@ resource "google_storage_bucket" "bucket" { type = "Delete" } condition { - no_age = true days_since_noncurrent_time = 0 send_days_since_noncurrent_time_if_zero = true days_since_custom_time = 0 @@ -1943,6 +1941,7 @@ resource "google_storage_bucket" "bucket" { type = "Delete" } condition { + send_age_if_zero = true send_days_since_noncurrent_time_if_zero = true send_days_since_custom_time_if_zero = true send_num_newer_versions_if_zero = true @@ -1964,7 +1963,6 @@ resource "google_storage_bucket" "bucket" { } condition { age = 10 - no_age = false days_since_noncurrent_time = 0 send_days_since_noncurrent_time_if_zero = true days_since_custom_time = 0 @@ -1979,7 +1977,6 @@ resource "google_storage_bucket" "bucket" { } condition { age = 10 - no_age = true send_age_if_zero = false custom_time_before = "2022-09-01" days_since_noncurrent_time = 0 @@ -2549,7 +2546,7 @@ resource "google_storage_bucket" "bucket" { } func testAccStorageBucket_SoftDeletePolicy(bucketName string, duration int) string { - return fmt.Sprintf(` + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" location = "US"