diff --git a/.github/labeler-issue.yml b/.github/labeler-issue.yml index 86318cb84b..315ef69c2e 100644 --- a/.github/labeler-issue.yml +++ b/.github/labeler-issue.yml @@ -46,6 +46,8 @@ service/Classic Infrastructure: - '((\*|-) ?`?|(data|resource) "?)(ibm_compute|ibm_cdn|ibm_firewall|ibm_lb|ibm_lbaas|ibm_network|ibm_storage|ibm_security|ibm_subnet|ibm_hardware|ibm_ipsec)' service/Cloud Databases: - '((\*|-) ?`?|(data|resource) "?)ibm_(cloudant|database)' +service/Cloud Logs: + - '((\*|-) ?`?|(data|resource) "?)ibm_logs_' service/Cloud Foundry: - '((\*|-) ?`?|(data|resource) "?)(ibm_account|ibm_org|ibm_space|ibm_service|ibm_app_domain_private|ibm_app_domain_shared|ibm_app_route)' service/CBR: diff --git a/.secrets.baseline b/.secrets.baseline index 5eb22bef30..67f80e0f9c 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "go.mod|go.sum|.*.map|^.secrets.baseline$", "lines": null }, - "generated_at": "2024-09-12T14:29:18Z", + "generated_at": "2024-10-20T14:21:53Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -526,7 +526,7 @@ "hashed_secret": "fd8bc0cb6ce2ef2fe2934f6d2d1ce1d648503740", "is_secret": false, "is_verified": false, - "line_number": 194, + "line_number": 221, "type": "Secret Keyword", "verified_result": null } @@ -571,6 +571,16 @@ "verified_result": null } ], + "examples/ibm-partner-center-sell/variables.tf": [ + { + "hashed_secret": "4fe2c50d3e572f60977cb06e8995a5d7c368758d", + "is_secret": false, + "is_verified": false, + "line_number": 262, + "type": "Hex High Entropy String", + "verified_result": null + } + ], "examples/ibm-private-dns/terraform.tfvars": [ { "hashed_secret": "55518b11e5013893f3b9f074209da1e3d4b2a6e7", @@ -786,7 +796,7 @@ "hashed_secret": "731438016c5ab94431f61820f35e3ae5f8ad6004", "is_secret": false, "is_verified": false, - "line_number": 497, + "line_number": 508, "type": "Secret Keyword", "verified_result": null }, @@ -794,15 +804,23 @@ "hashed_secret": "12da2e35d6b50c902c014f1ab9e3032650368df7", "is_secret": false, "is_verified": false, - "line_number": 503, + "line_number": 514, "type": "Secret Keyword", "verified_result": null }, + { + "hashed_secret": "165722fe6dd0ec0afbeefb51c8258a177497956b", + "is_secret": false, + "is_verified": false, + "line_number": 770, + "type": "Hex High Entropy String", + "verified_result": null + }, { "hashed_secret": "813274ccae5b6b509379ab56982d862f7b5969b6", "is_secret": false, "is_verified": false, - "line_number": 1287, + "line_number": 1373, "type": "Base64 High Entropy String", "verified_result": null } @@ -812,7 +830,7 @@ "hashed_secret": "9184b0c38101bf24d78b2bb0d044deb1d33696fc", "is_secret": false, "is_verified": false, - "line_number": 137, + "line_number": 140, "type": "Secret Keyword", "verified_result": null }, @@ -820,7 +838,7 @@ "hashed_secret": "c427f185ddcb2440be9b77c8e45f1cd487a2e790", "is_secret": false, "is_verified": false, - "line_number": 1494, + "line_number": 1514, "type": "Base64 High Entropy String", "verified_result": null }, @@ -828,7 +846,7 @@ "hashed_secret": "1f7e33de15e22de9d2eaf502df284ed25ca40018", "is_secret": false, "is_verified": false, - "line_number": 1561, + "line_number": 1581, "type": "Secret Keyword", "verified_result": null }, @@ -836,7 +854,7 @@ "hashed_secret": "1f614c2eb6b3da22d89bd1b9fd47d7cb7c8fc670", "is_secret": false, "is_verified": false, - "line_number": 3494, + "line_number": 3550, "type": "Secret Keyword", "verified_result": null }, @@ -844,7 +862,7 @@ "hashed_secret": "7abfce65b8504403afc25c9790f358d513dfbcc6", "is_secret": false, "is_verified": false, - "line_number": 3507, + "line_number": 3563, "type": "Secret Keyword", "verified_result": null }, @@ -852,7 +870,7 @@ "hashed_secret": "0c2d85bf9a9b1579b16f220a4ea8c3d62b2e24b1", "is_secret": false, "is_verified": false, - "line_number": 3548, + "line_number": 3604, "type": "Secret Keyword", "verified_result": null } @@ -872,7 +890,7 @@ "hashed_secret": "da8cae6284528565678de15e03d461e23fe22538", "is_secret": false, "is_verified": false, - "line_number": 1903, + "line_number": 2026, "type": "Secret Keyword", "verified_result": null }, @@ -880,7 +898,7 @@ "hashed_secret": "1a0334cfa65f4be58b9d914b8e96e9d9478bfbac", "is_secret": false, "is_verified": false, - "line_number": 3293, + "line_number": 3481, "type": "Secret Keyword", "verified_result": null } @@ -890,7 +908,7 @@ "hashed_secret": "c8b6f5ef11b9223ac35a5663975a466ebe7ebba9", "is_secret": false, "is_verified": false, - "line_number": 2190, + "line_number": 2256, "type": "Secret Keyword", "verified_result": null }, @@ -898,7 +916,7 @@ "hashed_secret": "8abf4899c01104241510ba87685ad4de76b0c437", "is_secret": false, "is_verified": false, - "line_number": 2196, + "line_number": 2262, "type": "Secret Keyword", "verified_result": null } @@ -1180,7 +1198,17 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 415, + "line_number": 425, + "type": "Secret Keyword", + "verified_result": null + } + ], + "ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_test.go": [ + { + "hashed_secret": "731538be37450012c5fe92b7e86eff4959083a36", + "is_secret": false, + "is_verified": false, + "line_number": 961, "type": "Secret Keyword", "verified_result": null } @@ -1190,7 +1218,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 233, + "line_number": 243, "type": "Secret Keyword", "verified_result": null }, @@ -1198,7 +1226,15 @@ "hashed_secret": "17f5f58d3d8d9871c52ab032989df3d810d2443e", "is_secret": false, "is_verified": false, - "line_number": 392, + "line_number": 439, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", + "is_secret": false, + "is_verified": false, + "line_number": 441, "type": "Secret Keyword", "verified_result": null } @@ -1208,7 +1244,17 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 465, + "line_number": 508, + "type": "Secret Keyword", + "verified_result": null + } + ], + "ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_test.go": [ + { + "hashed_secret": "731538be37450012c5fe92b7e86eff4959083a36", + "is_secret": false, + "is_verified": false, + "line_number": 967, "type": "Secret Keyword", "verified_result": null } @@ -1218,7 +1264,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 88, + "line_number": 205, "type": "Secret Keyword", "verified_result": null }, @@ -1226,7 +1272,15 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 483, + "line_number": 592, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b5366a2d2ac98dae978423083f8b09e5cddc705d", + "is_secret": false, + "is_verified": false, + "line_number": 975, "type": "Secret Keyword", "verified_result": null } @@ -1894,7 +1948,7 @@ "hashed_secret": "a99bf28e18370eb20e9cc79a1e7f8c379075f69c", "is_secret": false, "is_verified": false, - "line_number": 658, + "line_number": 652, "type": "Secret Keyword", "verified_result": null }, @@ -1902,7 +1956,7 @@ "hashed_secret": "b5366a2d2ac98dae978423083f8b09e5cddc705d", "is_secret": false, "is_verified": false, - "line_number": 726, + "line_number": 720, "type": "Secret Keyword", "verified_result": null } @@ -1992,7 +2046,7 @@ "hashed_secret": "884a58e4c2c5d195d3876787bdc63af6c5af2924", "is_secret": false, "is_verified": false, - "line_number": 647, + "line_number": 740, "type": "Secret Keyword", "verified_result": null } @@ -2002,7 +2056,7 @@ "hashed_secret": "884a58e4c2c5d195d3876787bdc63af6c5af2924", "is_secret": false, "is_verified": false, - "line_number": 1692, + "line_number": 1720, "type": "Secret Keyword", "verified_result": null } @@ -2032,7 +2086,7 @@ "hashed_secret": "b02fa7fd7ca08b5dc86c2548e40f8a21171ef977", "is_secret": false, "is_verified": false, - "line_number": 373, + "line_number": 375, "type": "Secret Keyword", "verified_result": null } @@ -2060,7 +2114,7 @@ "hashed_secret": "deab23f996709b4e3d14e5499d1cc2de677bfaa8", "is_secret": false, "is_verified": false, - "line_number": 1365, + "line_number": 1366, "type": "Secret Keyword", "verified_result": null }, @@ -2068,7 +2122,7 @@ "hashed_secret": "20a25bac21219ffff1904bde871ded4027eca2f8", "is_secret": false, "is_verified": false, - "line_number": 1954, + "line_number": 1960, "type": "Secret Keyword", "verified_result": null }, @@ -2076,7 +2130,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 1973, + "line_number": 1979, "type": "Secret Keyword", "verified_result": null } @@ -2086,7 +2140,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 189, + "line_number": 190, "type": "Secret Keyword", "verified_result": null } @@ -2096,7 +2150,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 760, + "line_number": 771, "type": "Secret Keyword", "verified_result": null } @@ -2106,7 +2160,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 808, + "line_number": 819, "type": "Secret Keyword", "verified_result": null } @@ -2116,7 +2170,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 207, + "line_number": 209, "type": "Secret Keyword", "verified_result": null } @@ -2126,7 +2180,7 @@ "hashed_secret": "8cbbbfad0206e5953901f679b0d26d583c4f5ffe", "is_secret": false, "is_verified": false, - "line_number": 252, + "line_number": 253, "type": "Secret Keyword", "verified_result": null }, @@ -2134,7 +2188,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 317, + "line_number": 318, "type": "Secret Keyword", "verified_result": null } @@ -2144,7 +2198,7 @@ "hashed_secret": "5667b8489a17faa9ef54941db31ed762be280bec", "is_secret": false, "is_verified": false, - "line_number": 145, + "line_number": 147, "type": "Secret Keyword", "verified_result": null }, @@ -2152,7 +2206,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 177, + "line_number": 179, "type": "Secret Keyword", "verified_result": null } @@ -2162,7 +2216,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 211, + "line_number": 213, "type": "Secret Keyword", "verified_result": null } @@ -2172,7 +2226,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 144, + "line_number": 145, "type": "Secret Keyword", "verified_result": null } @@ -2190,7 +2244,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 649, + "line_number": 650, "type": "Secret Keyword", "verified_result": null } @@ -2200,7 +2254,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 222, + "line_number": 224, "type": "Secret Keyword", "verified_result": null } @@ -2210,7 +2264,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 277, + "line_number": 280, "type": "Secret Keyword", "verified_result": null }, @@ -2218,7 +2272,7 @@ "hashed_secret": "44cdfc3615970ada14420caaaa5c5745fca06002", "is_secret": false, "is_verified": false, - "line_number": 295, + "line_number": 299, "type": "Secret Keyword", "verified_result": null } @@ -2594,7 +2648,7 @@ "hashed_secret": "b02fa7fd7ca08b5dc86c2548e40f8a21171ef977", "is_secret": false, "is_verified": false, - "line_number": 266, + "line_number": 276, "type": "Secret Keyword", "verified_result": null }, @@ -2602,7 +2656,7 @@ "hashed_secret": "d4c3d66fd0c38547a3c7a4c6bdc29c36911bc030", "is_secret": false, "is_verified": false, - "line_number": 304, + "line_number": 314, "type": "Secret Keyword", "verified_result": null } @@ -2854,7 +2908,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 1168, + "line_number": 1173, "type": "Secret Keyword", "verified_result": null } @@ -2914,7 +2968,7 @@ "hashed_secret": "1018de48014135565e13b4b33d5d34cde9d5c23b", "is_secret": false, "is_verified": false, - "line_number": 186, + "line_number": 188, "type": "Hex High Entropy String", "verified_result": null } @@ -2924,7 +2978,7 @@ "hashed_secret": "18697a00f52cfe022bb910a8a7af9d509114f997", "is_secret": false, "is_verified": false, - "line_number": 112, + "line_number": 114, "type": "Hex High Entropy String", "verified_result": null }, @@ -2932,7 +2986,7 @@ "hashed_secret": "1018de48014135565e13b4b33d5d34cde9d5c23b", "is_secret": false, "is_verified": false, - "line_number": 241, + "line_number": 243, "type": "Hex High Entropy String", "verified_result": null } @@ -2942,7 +2996,7 @@ "hashed_secret": "1018de48014135565e13b4b33d5d34cde9d5c23b", "is_secret": false, "is_verified": false, - "line_number": 314, + "line_number": 332, "type": "Hex High Entropy String", "verified_result": null } @@ -2952,7 +3006,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 45, + "line_number": 46, "type": "Secret Keyword", "verified_result": null }, @@ -2960,7 +3014,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 313, + "line_number": 336, "type": "Secret Keyword", "verified_result": null }, @@ -2968,7 +3022,7 @@ "hashed_secret": "a31a653edc5743836dc805b057d29822f06c7878", "is_secret": false, "is_verified": false, - "line_number": 463, + "line_number": 487, "type": "Secret Keyword", "verified_result": null }, @@ -2976,14 +3030,14 @@ "hashed_secret": "b5366a2d2ac98dae978423083f8b09e5cddc705d", "is_secret": false, "is_verified": false, - "line_number": 594, + "line_number": 618, "type": "Secret Keyword", "verified_result": null } ], "ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker_test.go": [ { - "hashed_secret": "7d85295c8215b574e490cc3498084e9c859eec69", + "hashed_secret": "af88cc3e27e9712c4ce850ba1c0f9c844e6c2876", "is_secret": false, "is_verified": false, "line_number": 73, @@ -3136,7 +3190,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 320, + "line_number": 321, "type": "Secret Keyword", "verified_result": null } @@ -3252,15 +3306,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 53, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 110, + "line_number": 54, "type": "Secret Keyword", "verified_result": null } @@ -3280,15 +3326,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 185, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 312, + "line_number": 189, "type": "Secret Keyword", "verified_result": null } @@ -3298,15 +3336,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 143, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 291, + "line_number": 148, "type": "Secret Keyword", "verified_result": null } @@ -3339,14 +3369,6 @@ "line_number": 189, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 331, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/data_source_ibm_sm_private_certificate.go": [ @@ -3357,14 +3379,6 @@ "line_number": 230, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 409, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/data_source_ibm_sm_private_certificate_configuration_intermediate_ca.go": [ @@ -3380,7 +3394,7 @@ "hashed_secret": "3ad6a2f4e68613da801ef1ddc1baf6d5b25607b2", "is_secret": false, "is_verified": false, - "line_number": 484, + "line_number": 511, "type": "Secret Keyword", "verified_result": null } @@ -3398,7 +3412,7 @@ "hashed_secret": "9beb31de125498074813c6f31c0e4df3e54a5489", "is_secret": false, "is_verified": false, - "line_number": 535, + "line_number": 575, "type": "Secret Keyword", "verified_result": null } @@ -3411,14 +3425,6 @@ "line_number": 299, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 482, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/data_source_ibm_sm_public_certificate_configuration_ca_lets_encrypt.go": [ @@ -3426,15 +3432,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 48, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 107, + "line_number": 49, "type": "Secret Keyword", "verified_result": null } @@ -3454,15 +3452,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 53, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 116, + "line_number": 54, "type": "Secret Keyword", "verified_result": null } @@ -3472,15 +3462,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 58, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 119, + "line_number": 59, "type": "Secret Keyword", "verified_result": null } @@ -3528,7 +3510,7 @@ "hashed_secret": "d39b250468d54ca72b44af6ba25479701dd451c1", "is_secret": false, "is_verified": false, - "line_number": 877, + "line_number": 884, "type": "Secret Keyword", "verified_result": null } @@ -3541,14 +3523,6 @@ "line_number": 201, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 392, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/data_source_ibm_sm_service_credentials_secret_metadata.go": [ @@ -3569,14 +3543,6 @@ "line_number": 186, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 314, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/resource_ibm_sm_iam_credentials_configuration.go": [ @@ -3584,15 +3550,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 40, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 152, + "line_number": 41, "type": "Secret Keyword", "verified_result": null } @@ -3612,7 +3570,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 202, + "line_number": 209, "type": "Secret Keyword", "verified_result": null }, @@ -3620,15 +3578,7 @@ "hashed_secret": "108b310facc1a193833fc2971fd83081f775ea0c", "is_secret": false, "is_verified": false, - "line_number": 393, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 396, + "line_number": 435, "type": "Secret Keyword", "verified_result": null } @@ -3652,19 +3602,11 @@ "type": "Secret Keyword", "verified_result": null }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 418, - "type": "Secret Keyword", - "verified_result": null - }, { "hashed_secret": "9beb31de125498074813c6f31c0e4df3e54a5489", "is_secret": false, "is_verified": false, - "line_number": 634, + "line_number": 678, "type": "Secret Keyword", "verified_result": null } @@ -3695,14 +3637,6 @@ "line_number": 297, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 538, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/resource_ibm_sm_private_certificate_configuration_intermediate_ca.go": [ @@ -3718,7 +3652,7 @@ "hashed_secret": "9beb31de125498074813c6f31c0e4df3e54a5489", "is_secret": false, "is_verified": false, - "line_number": 923, + "line_number": 974, "type": "Secret Keyword", "verified_result": null } @@ -3736,7 +3670,7 @@ "hashed_secret": "9beb31de125498074813c6f31c0e4df3e54a5489", "is_secret": false, "is_verified": false, - "line_number": 847, + "line_number": 898, "type": "Secret Keyword", "verified_result": null } @@ -3749,14 +3683,6 @@ "line_number": 396, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 636, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/resource_ibm_sm_public_certificate_action_validate_manual_dns_test.go": [ @@ -3774,15 +3700,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 44, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 152, + "line_number": 45, "type": "Secret Keyword", "verified_result": null } @@ -3802,15 +3720,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 41, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 190, + "line_number": 42, "type": "Secret Keyword", "verified_result": null }, @@ -3818,7 +3728,7 @@ "hashed_secret": "3475f51e796d0881fb3e42f690c66ab3ecb217a1", "is_secret": false, "is_verified": false, - "line_number": 237, + "line_number": 255, "type": "Secret Keyword", "verified_result": null } @@ -3828,15 +3738,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 47, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 193, + "line_number": 48, "type": "Secret Keyword", "verified_result": null }, @@ -3844,7 +3746,7 @@ "hashed_secret": "6b52786f527ade6e28a0b59df6b1367713f8851e", "is_secret": false, "is_verified": false, - "line_number": 240, + "line_number": 258, "type": "Secret Keyword", "verified_result": null } @@ -3877,14 +3779,6 @@ "line_number": 189, "type": "Secret Keyword", "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 448, - "type": "Secret Keyword", - "verified_result": null } ], "ibm/service/secretsmanager/resource_ibm_sm_username_password_secret.go": [ @@ -3892,15 +3786,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 123, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", - "is_secret": false, - "is_verified": false, - "line_number": 390, + "line_number": 122, "type": "Secret Keyword", "verified_result": null } @@ -3978,7 +3864,7 @@ "hashed_secret": "f855f5027fd8fdb2df3f6a6f1cf858fffcbedb0c", "is_secret": false, "is_verified": false, - "line_number": 96457, + "line_number": 96469, "type": "Secret Keyword", "verified_result": null }, @@ -3986,7 +3872,7 @@ "hashed_secret": "5fb0fa884132a8724a8d7cba55853737e442adbd", "is_secret": false, "is_verified": false, - "line_number": 119081, + "line_number": 119093, "type": "Secret Keyword", "verified_result": null }, @@ -3994,7 +3880,7 @@ "hashed_secret": "1e5c2f367f02e47a8c160cda1cd9d91decbac441", "is_secret": false, "is_verified": false, - "line_number": 151289, + "line_number": 151301, "type": "Secret Keyword", "verified_result": null } @@ -4059,6 +3945,26 @@ "verified_result": null } ], + "website/docs/d/is_private_path_service_gateway_account_policies.html.markdown": [ + { + "hashed_secret": "165722fe6dd0ec0afbeefb51c8258a177497956b", + "is_secret": false, + "is_verified": false, + "line_number": 44, + "type": "Hex High Entropy String", + "verified_result": null + } + ], + "website/docs/d/is_private_path_service_gateway_account_policy.html.markdown": [ + { + "hashed_secret": "165722fe6dd0ec0afbeefb51c8258a177497956b", + "is_secret": false, + "is_verified": false, + "line_number": 40, + "type": "Hex High Entropy String", + "verified_result": null + } + ], "website/docs/d/sm_public_certificate_configuration_ca_lets_encrypt.html.markdown": [ { "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", @@ -4617,6 +4523,36 @@ "verified_result": null } ], + "website/docs/r/is_private_path_service_gateway_account_policy.html.markdown": [ + { + "hashed_secret": "165722fe6dd0ec0afbeefb51c8258a177497956b", + "is_secret": false, + "is_verified": false, + "line_number": 28, + "type": "Hex High Entropy String", + "verified_result": null + } + ], + "website/docs/r/is_private_path_service_gateway_endpoint_gateway_binding_operations.html.markdown": [ + { + "hashed_secret": "354fe46ea7cceda3813bfa9d7541d0922f1c45d0", + "is_secret": false, + "is_verified": false, + "line_number": 27, + "type": "Hex High Entropy String", + "verified_result": null + } + ], + "website/docs/r/is_private_path_service_gateway_revoke_account.html.markdown": [ + { + "hashed_secret": "354fe46ea7cceda3813bfa9d7541d0922f1c45d0", + "is_secret": false, + "is_verified": false, + "line_number": 26, + "type": "Hex High Entropy String", + "verified_result": null + } + ], "website/docs/r/lb_vpx.html.markdown": [ { "hashed_secret": "7f9e9d60560fbad72688c82e68cf42157a61bcad", @@ -4681,13 +4617,13 @@ "verified_result": null } ], - "website/docs/r/onboarding_resource_broker.html.markdown": [ + "website/docs/r/onboarding_registration.html.markdown": [ { - "hashed_secret": "3634241a55dbf1e8b0d7d4b7b8c3af1a45f1fcae", + "hashed_secret": "4fe2c50d3e572f60977cb06e8995a5d7c368758d", "is_secret": false, "is_verified": false, "line_number": 19, - "type": "Secret Keyword", + "type": "Hex High Entropy String", "verified_result": null } ], @@ -4780,7 +4716,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 82, + "line_number": 83, "type": "Secret Keyword", "verified_result": null }, @@ -4788,7 +4724,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 84, + "line_number": 85, "type": "Secret Keyword", "verified_result": null } @@ -4798,7 +4734,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 128, + "line_number": 129, "type": "Secret Keyword", "verified_result": null }, @@ -4806,7 +4742,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 130, + "line_number": 131, "type": "Secret Keyword", "verified_result": null } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d9744e0d4..bf2ea5e508 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,129 @@ +# 1.71.0-beta0(Oct 20, 2024) +Features +* Support Power System + - **Resources** + - ibm_pi_network_interface + - ibm_pi_storage_tiers + - ibm_pi_network_address_group + - ibm_pi_network_address_group_member + - ibm_pi_network_security_group_action + - ibm_pi_network_security_group_member + - ibm_pi_network_security_group_rule + - ibm_pi_network_security_group + - **Datasources** + - ibm_pi_network_interface + - ibm_pi_network_interfaces + - ibm_pi_volume_snapshot + - ibm_pi_volume_snapshots + - ibm_pi_network_address_group + - ibm_pi_network_address_groups + - ibm_pi_network_security_group + - ibm_pi_network_security_groups +* Support Event Streams + - **Resources** + - ibm_event_streams_quota + - **Datasources** + - ibm_event_streams_quota +* Support VPC + - **Datasources** + - ibm_is_private_path_service_gateway + - ibm_is_private_path_service_gateway_account_policy + - ibm_is_private_path_service_gateway_account_policies + - ibm_is_private_path_service_gateways + - ibm_is_private_path_service_gateway_endpoint_gateway_binding + - ibm_is_private_path_service_gateway_endpoint_gateway_bindings + - **Resources** + - ibm_is_private_path_service_gateway_account_policy + - ibm_is_private_path_service_gateway + - ibm_is_private_path_service_gateway_revoke_account + - ibm_is_private_path_service_gateway_endpoint_gateway_binding_operations + - ibm_is_private_path_service_gateway_operations + +Enhancements +* Granular CRN for images ([5646](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5646)) +* Granular CRN for instance resources ([5647](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5647)) +* granular CRN for volumes ([5651](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5651)) +* Add GRS for volume data sources ([5671](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5671)) +* Add GRS Attributes/Arguments to pi_volume resource ([5668](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5668)) +* Add replication_pool_map to disaster recovery data sources ([5664](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5664)) +* Add GRS ([5665](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5665)) +* Add source checksum field to image data source ([5663](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5663)) +* Add datacenter capability details ([5666](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5666)) +* Update Storage Connection ([5667](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5667)) +* Add support_systems for sap profile ([5670](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5670)) +* Add GRS attributes to pi volume group data sources and related ([5673](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5673)) +* Update documentation for ibm_resource_tag ([5700](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5700)) +* Add Workspace NSG ([5727](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5727)) +* Update database.html.markdown ([5717](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5717)) +* Update partnercentersell service ([5721](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5721)) +* add orphan_resource_on_delete argument to worker pool resources ([5705](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5705)) +* Tekton pipeline re-generated with latest SDK generator ([5715](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5715)) +* Added support for vpn gateway connection distribute traffic ([5733](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5733)) +* feat(PPS): Add support for private path service gateway and private path load balancer ([5712](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5712)) +* Added support for defined_performance changes in is_volume ([5694](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5694)) +* feat(routing-table-crn): UI 37487 routing table crn and SDK migration PR ([5708](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5708)) + +Bugfixes +* Fix Config Aggregator ([5723](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5723)) + +# 1.70.1 (Oct 19, 2024) +BugFixes +* Fix test bucket endpoints ([5695](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5695)) + +# 1.70.0 (Oct 06, 2024) + +Breaking Changes +* update(cloud-databases): Make Service Endpoints as Required argument([5615](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5615)) + +Features +* Support IAM Access tag + - **Resources** + - ibm_iam_access_tag +* Support VPC + - **Resources** + - ibm_is_subnet_reserved_ip_patch +* Support code Engine + - **Resources** + - ibm_code_engine_function +* Support Configuration Aggregator + - **Resources** + - ibm_config_aggregator_settings + - **Datasources** + - ibm_config_aggregator_configurations + - ibm_config_aggregator_settings + - ibm_config_aggregator_resource_collection_status + +Enhancements +* add service_to_service_enabled field to atracker event streams endpoint ([5605](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5605)) +* Deprecated ibm_resource_access_tag and replaced it with ibm_iam_access_tag ([5567](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5567)) +* SM New error formatting + crypto_key label name changes ([5562](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5562)) +* Deprecating alias and bindings APIs and schema items ([5612](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5612)) +* update(cloud-databases): Make Service Endpoints Required and send warning when public endpoints are in use ([5402](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5402)) +* discourage use of ibm_container_bind_service ([5588](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5588)) +* updated resources, data source and documentation for Slack Direct destination support ([5615](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5615)) +* IAM Policy Assignment: S2S Policy Assignments ET and AG ([5624](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5624)) +* fix for context and history parameter of template assignment ([5640](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5640)) +* Updating to latest schematics sdk and updating sch_agent resource to enable force deletion by default ([5660](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5660)) +* add support for 4th zone ([5644](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5644)) +* add support for CRN attribute for Power Network ([5649](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5649)) +* add support for CRN attribute for Power Shared Processor ([5650](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5650)) +* add support for CRN attribute for Power Snapshot ([5650](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5650)) +* Secrets Manager IAM credentials secrets with S2S ([5678](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5678)) +* fix: update error diagnostic reporting in existing Event Streams code ([5685](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5685)) +* feat(Catalog Management): support new offering version limit ([5688](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5688)) +* sch Agent resource updated to accommodate agent destroy resources ([5691](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5691)) +* feat(tekton): add support for fork feature ([5693](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5693)) + +BugFixes +* The ibm_resource_tag do not detach if tag_type is access or service ([5566](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5566)) +* fix(is_share): added empty check and moved source_share_crn outside ([5632](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5632)) +* Add test bucket endpoints ([5636](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5636)) +* The ibm_resource_tag does not check the response on api tags calls ([5641](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5641)) +* Fix incorrect handling of trusted profile static CRN identities ([5681](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5681)) +* Fix multibyte character and optional attributes fix for Cloud logs ([5679](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5679)) +* fix(lb-pool-member): Add a nil check before accessing member weight ([5682](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5682)) +* Fix unicode characters for name fields in cloud logs resources ([5698](https://github.com/IBM-Cloud/terraform-provider-ibm/issues/5698)) + # 1.70.0-beta0(Sep 22, 2024) Breaking Changes diff --git a/examples/ibm-configuration-aggregator/README.md b/examples/ibm-configuration-aggregator/README.md new file mode 100644 index 0000000000..c861f83215 --- /dev/null +++ b/examples/ibm-configuration-aggregator/README.md @@ -0,0 +1,128 @@ +# Examples for Configuration Aggregator + +These examples illustrate how to use the resources and data sources associated with Configuration Aggregator. + +The following resources are supported: +* ibm_config_aggregator_settings + +The following data sources are supported: +* ibm_config_aggregator_configurations +* ibm_config_aggregator_settings +* ibm_config_aggregator_resource_collection_status + +## Usage + +To run this example, execute the following commands: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Run `terraform destroy` when you don't need these resources. + +## Configuration Aggregator resources + +### Resource: ibm_config_aggregator_settings + +```hcl +resource "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { + resource_collection_enabled = var.config_aggregator_settings_resource_collection_enabled + trusted_profile_id = var.config_aggregator_settings_trusted_profile_id + regions = var.config_aggregator_settings_regions + additional_scope = var.config_aggregator_settings_additional_scope +} +``` + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| resource_collection_enabled | The field denoting if the resource collection is enabled. | `bool` | false | +| trusted_profile_id | The trusted profile id that provides Reader access to the App Configuration instance to collect resource metadata. | `string` | false | +| regions | The list of regions across which the resource collection is enabled. | `list(string)` | false | +| additional_scope | The additional scope that enables resource collection for Enterprise acccounts. | `list()` | false | + +## Configuration Aggregator data sources + +### Data source: ibm_config_aggregator_configurations + +```hcl +data "ibm_config_aggregator_configurations" "config_aggregator_configurations_instance" { + config_type = var.config_aggregator_configurations_config_type + service_name = var.config_aggregator_configurations_service_name + resource_group_id = var.config_aggregator_configurations_resource_group_id + location = var.config_aggregator_configurations_location + resource_crn = var.config_aggregator_configurations_resource_crn +} +``` + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| config_type | The type of resource configuration that are to be retrieved. | `string` | false | +| service_name | The name of the IBM Cloud service for which resources are to be retrieved. | `string` | false | +| resource_group_id | The resource group id of the resources. | `string` | false | +| location | The location or region in which the resources are created. | `string` | false | +| resource_crn | The crn of the resource. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| prev | The reference to the previous page of entries. | +| configs | Array of resource configurations. | + +### Data source: ibm_config_aggregator_settings + +```hcl +data "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { +} +``` + +#### Outputs + +| Name | Description | +|------|-------------| +| resource_collection_enabled | The field to check if the resource collection is enabled. | +| trusted_profile_id | The trusted profile ID that provides access to App Configuration instance to retrieve resource metadata. | +| last_updated | The last time the settings was last updated. | +| regions | Regions for which the resource collection is enabled. | +| additional_scope | The additional scope that enables resource collection for Enterprise acccounts. | + +### Data source: ibm_config_aggregator_resource_collection_status + +```hcl +data "ibm_config_aggregator_resource_collection_status" "config_aggregator_resource_collection_status_instance" { +} +``` + +#### Outputs + +| Name | Description | +|------|-------------| +| last_config_refresh_time | The timestamp at which the configuration was last refreshed. | +| status | Status of the resource collection. | + +## Assumptions + +1. TODO + +## Notes + +1. TODO + +## Requirements + +| Name | Version | +|------|---------| +| terraform | ~> 0.12 | + +## Providers + +| Name | Version | +|------|---------| +| ibm | 1.13.1 | diff --git a/examples/ibm-configuration-aggregator/main.tf b/examples/ibm-configuration-aggregator/main.tf new file mode 100644 index 0000000000..c558ba48d7 --- /dev/null +++ b/examples/ibm-configuration-aggregator/main.tf @@ -0,0 +1,28 @@ +provider "ibm" { + ibmcloud_api_key = var.ibmcloud_api_key +} + +resource "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { + instance_id=var.instance_id + region =var.region + resource_collection_enabled = var.config_aggregator_settings_resource_collection_enabled + trusted_profile_id = var.config_aggregator_settings_trusted_profile_id + resource_collection_regions = var.config_aggregator_settings_regions +} + +data "ibm_config_aggregator_configurations" "example" { + instance_id=var.instance_id + region =var.region + +} + + +data "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { + instance_id=var.instance_id + region =var.region +} + +data "ibm_config_aggregator_resource_collection_status" "config_aggregator_resource_collection_status_instance" { + instance_id=var.instance_id + region =var.region +} \ No newline at end of file diff --git a/examples/ibm-configuration-aggregator/outputs.tf b/examples/ibm-configuration-aggregator/outputs.tf new file mode 100644 index 0000000000..1589b9d663 --- /dev/null +++ b/examples/ibm-configuration-aggregator/outputs.tf @@ -0,0 +1,49 @@ +// This output allows config_aggregator_settings data to be referenced by other resources and the terraform CLI +// Modify this output if only certain data should be exposed +locals { + entries = [ + for config in data.ibm_config_aggregator_configurations.example.configs : { + about = { + account_id = config.about.account_id + config_type = config.about.config_type + last_config_refresh_time = config.about.last_config_refresh_time + location = config.about.location + resource_crn = config.about.resource_crn + resource_group_id = config.about.resource_group_id + resource_name = config.about.resource_name + service_name = config.about.service_name + tags={} + } + config = jsondecode(config.config) + } + ] +} + +output "ibm_config_aggregator_configurations" { + value = { + configs=local.entries + } +} +output "config_aggregator_settings" { + value = { + additional_scope = [] + regions = ["all"] + resource_collection_enabled = ibm_config_aggregator_settings.config_aggregator_settings_instance.resource_collection_enabled + trusted_profile_id = ibm_config_aggregator_settings.config_aggregator_settings_instance.trusted_profile_id + } +} + +output "aggregator_settings" { + value = { + additional_scope = [] + regions = ["all"] + resource_collection_enabled = ibm_config_aggregator_settings.config_aggregator_settings_instance.resource_collection_enabled + trusted_profile_id = ibm_config_aggregator_settings.config_aggregator_settings_instance.trusted_profile_id + } +} + +output "config_aggregator_resource_collection_status"{ + value={ + status=data.ibm_config_aggregator_resource_collection_status.config_aggregator_resource_collection_status_instance.status + } +} \ No newline at end of file diff --git a/examples/ibm-configuration-aggregator/variables.tf b/examples/ibm-configuration-aggregator/variables.tf new file mode 100644 index 0000000000..61bed75697 --- /dev/null +++ b/examples/ibm-configuration-aggregator/variables.tf @@ -0,0 +1,58 @@ +variable "ibmcloud_api_key" { + description = "IBM Cloud API key" + type = string +} + +variable "region"{ + description="Config Aggregator Instance ID" + type=string +} + +variable "instance_id"{ + description="Config Aggregator Instance ID" + type=string +} + +// Resource arguments for config_aggregator_settings +variable "config_aggregator_settings_resource_collection_enabled" { + description = "The field denoting if the resource collection is enabled." + type = bool +} +variable "config_aggregator_settings_trusted_profile_id" { + description = "The trusted profile id that provides Reader access to the App Configuration instance to collect resource metadata." + type = string +} +variable "config_aggregator_settings_regions" { + description = "The list of regions across which the resource collection is enabled." + type = list(string) + default = ["all"] +} + +// Data source arguments for config_aggregator_configurations +variable "config_aggregator_configurations_config_type" { + description = "The type of resource configuration that are to be retrieved." + type = string + default = "placeholder" +} +variable "config_aggregator_configurations_service_name" { + description = "The name of the IBM Cloud service for which resources are to be retrieved." + type = string + default = "placeholder" +} +variable "config_aggregator_configurations_resource_group_id" { + description = "The resource group id of the resources." + type = string + default = "placeholder" +} +variable "config_aggregator_configurations_location" { + description = "The location or region in which the resources are created." + type = string + default = "placeholder" +} +variable "config_aggregator_configurations_resource_crn" { + description = "The crn of the resource." + type = string + default = "placeholder" +} + + diff --git a/examples/ibm-configuration-aggregator/versions.tf b/examples/ibm-configuration-aggregator/versions.tf new file mode 100644 index 0000000000..3883805970 --- /dev/null +++ b/examples/ibm-configuration-aggregator/versions.tf @@ -0,0 +1,8 @@ +terraform { +required_version = ">=1.0.0, <2.0" + required_providers { + ibm = { + source = "registry.terraform.io/ibm-cloud/ibm" + } + } +} \ No newline at end of file diff --git a/examples/ibm-event-streams/README.md b/examples/ibm-event-streams/README.md index 2e3dab13b1..99282dadb0 100644 --- a/examples/ibm-event-streams/README.md +++ b/examples/ibm-event-streams/README.md @@ -160,7 +160,34 @@ resource "ibm_resource_tag" "tag_example_on_es" { } ``` -#### Scenario 6: Connect to an existing Event Streams instance and its topics. +#### Scenario 6: Set default and user quotas on an existing Event Streams instance. + +This code sets the default quota to 32768 bytes/second for producers and 16384 bytes/second for consumers. +It sets a quota for user `iam-ServiceId-00001111-2222-3333-4444-555566667777` to 65536 bytes/second for producers and no limit (-1) for consumers. +For more information on quotas, see [Setting Kafka quotas](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-enabling_kafka_quotas). + +```terraform +data "ibm_resource_instance" "es_instance_6" { + name = "terraform-integration-6" + resource_group_id = data.ibm_resource_group.group.id +} + +resource "ibm_event_streams_quota" "default_quota" { + resource_instance_id = data.ibm_resource_instance.es_instance_6.id + entity = "default" + producer_byte_rate = 32768 + consumer_byte_rate = 16384 +} + +resource "ibm_event_streams_quota" "user00001111_quota" { + resource_instance_id = data.ibm_resource_instance.es_instance_6.id + entity = "iam-ServiceId-00001111-2222-3333-4444-555566667777" + producer_byte_rate = 65536 + consumer_byte_rate = -1 +} +``` + +#### Scenario 7: Connect to an existing Event Streams instance and its topics. This scenario uses a fictitious `"kafka_consumer_app"` resource to demonstrate how a consumer application could be configured. The resource uses three configuration properties: @@ -177,22 +204,22 @@ The topic names can be provided as strings, or can be taken from topic data sour ```terraform # Use an existing instance -data "ibm_resource_instance" "es_instance_6" { - name = "terraform-integration-6" +data "ibm_resource_instance" "es_instance_7" { + name = "terraform-integration-7" resource_group_id = data.ibm_resource_group.group.id } # Use an existing topic on that instance -data "ibm_event_streams_topic" "es_topic_6" { - resource_instance_id = data.ibm_resource_instance.es_instance_6.id +data "ibm_event_streams_topic" "es_topic_7" { + resource_instance_id = data.ibm_resource_instance.es_instance_7.id name = "my-es-topic" } # The FICTITIOUS consumer application, configured with brokers, API key, and topics resource "kafka_consumer_app" "es_kafka_app" { - bootstrap_server = lookup(data.ibm_resource_instance.es_instance_4.extensions, "kafka_brokers_sasl", []) + bootstrap_server = lookup(data.ibm_resource_instance.es_instance_7.extensions, "kafka_brokers_sasl", []) apikey = var.es_reader_api_key - topics = [data.ibm_event_streams_topic.es_topic_4.name] + topics = [data.ibm_event_streams_topic.es_topic_7.name] } ``` diff --git a/examples/ibm-partner-center-sell/README.md b/examples/ibm-partner-center-sell/README.md index 0fe30efee7..1e90b9b2f1 100644 --- a/examples/ibm-partner-center-sell/README.md +++ b/examples/ibm-partner-center-sell/README.md @@ -50,8 +50,8 @@ resource "ibm_onboarding_resource_broker" "onboarding_resource_broker_instance" |------|-------------|------|---------| | ibmcloud\_api\_key | IBM Cloud API key | `string` | true | | env | The environment to fetch this object from. | `string` | false | -| auth_username | The authentication username to reach the broker. | `string` | true | -| auth_password | The authentication password to reach the broker. | `string` | true | +| auth_username | The authentication username to reach the broker. | `string` | false | +| auth_password | The authentication password to reach the broker. | `string` | false | | auth_scheme | The supported authentication scheme for the broker. | `string` | true | | resource_group_crn | The cloud resource name of the resource group. | `string` | false | | state | The state of the broker. | `string` | false | @@ -81,9 +81,9 @@ resource "ibm_onboarding_resource_broker" "onboarding_resource_broker_instance" ```hcl resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_instance" { - product_id = var.onboarding_catalog_deployment_product_id - catalog_product_id = var.onboarding_catalog_deployment_catalog_product_id - catalog_plan_id = var.onboarding_catalog_deployment_catalog_plan_id + product_id = ibm_onboarding_product.onboarding_product_instance.id + catalog_product_id = ibm_onboarding_catalog_product.onboarding_catalog_product_instance.onboarding_catalog_product_id + catalog_plan_id = ibm_onboarding_catalog_plan.onboarding_catalog_plan_instance.onboarding_catalog_plan_id env = var.onboarding_catalog_deployment_env name = var.onboarding_catalog_deployment_name active = var.onboarding_catalog_deployment_active @@ -125,8 +125,8 @@ resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_inst ```hcl resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { - product_id = var.onboarding_catalog_plan_product_id - catalog_product_id = var.onboarding_catalog_plan_catalog_product_id + product_id = ibm_onboarding_product.onboarding_product_instance.id + catalog_product_id = ibm_onboarding_catalog_product.onboarding_catalog_product_instance.onboarding_catalog_product_id env = var.onboarding_catalog_plan_env name = var.onboarding_catalog_plan_name active = var.onboarding_catalog_plan_active @@ -167,7 +167,7 @@ resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { ```hcl resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" { - product_id = var.onboarding_catalog_product_product_id + product_id = ibm_onboarding_product.onboarding_product_instance.id env = var.onboarding_catalog_product_env name = var.onboarding_catalog_product_name active = var.onboarding_catalog_product_active @@ -209,7 +209,7 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" ```hcl resource "ibm_onboarding_iam_registration" "onboarding_iam_registration_instance" { - product_id = var.onboarding_iam_registration_product_id + product_id = ibm_onboarding_product.onboarding_product_instance.id env = var.onboarding_iam_registration_env name = var.onboarding_iam_registration_name enabled = var.onboarding_iam_registration_enabled @@ -234,7 +234,7 @@ resource "ibm_onboarding_iam_registration" "onboarding_iam_registration_instance | ibmcloud\_api\_key | IBM Cloud API key | `string` | true | | product_id | The unique ID of the product. | `string` | true | | env | The environment to fetch this object from. | `string` | false | -| name | The IAM registration name, which must be the programmatic name of the product. | `string` | false | +| name | The IAM registration name, which must be the programmatic name of the product. | `string` | true | | enabled | Whether the service is enabled or disabled for IAM. | `bool` | false | | service_type | The type of the service. | `string` | false | | actions | The product access management action. | `list()` | false | @@ -285,6 +285,7 @@ resource "ibm_onboarding_product" "onboarding_product_instance" { | global_catalog_offering_id | The ID of a global catalog object. | | staging_global_catalog_offering_id | The ID of a global catalog object. | | approver_resource_id | The ID of the approval workflow of your product. | +| iam_registration_id | IAM registration identifier. | ### Resource: ibm_onboarding_registration @@ -313,8 +314,6 @@ resource "ibm_onboarding_registration" "onboarding_registration_instance" { | Name | Description | |------|-------------| -| account_dra_id | The ID of the IBM Digital Platform Reseller Agreement. | -| account_dpa_id | The ID of the IBM Digital Provider Agreement. | | created_at | The time when the registration was created. | | updated_at | The time when the registration was updated. | diff --git a/examples/ibm-partner-center-sell/main.tf b/examples/ibm-partner-center-sell/main.tf index a94ca8db57..09af685d89 100644 --- a/examples/ibm-partner-center-sell/main.tf +++ b/examples/ibm-partner-center-sell/main.tf @@ -20,9 +20,9 @@ resource "ibm_onboarding_resource_broker" "onboarding_resource_broker_instance" // Provision onboarding_catalog_deployment resource instance resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_instance" { - product_id = var.onboarding_catalog_deployment_product_id - catalog_product_id = var.onboarding_catalog_deployment_catalog_product_id - catalog_plan_id = var.onboarding_catalog_deployment_catalog_plan_id + product_id = ibm_onboarding_product.onboarding_product_instance.id + catalog_product_id = ibm_onboarding_catalog_product.onboarding_catalog_product_instance.onboarding_catalog_product_id + catalog_plan_id = ibm_onboarding_catalog_plan.onboarding_catalog_plan_instance.onboarding_catalog_plan_id env = var.onboarding_catalog_deployment_env name = var.onboarding_catalog_deployment_name active = var.onboarding_catalog_deployment_active @@ -58,11 +58,17 @@ resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_inst type = "image" url = "url" } + embeddable_dashboard = "embeddable_dashboard" } } urls { doc_url = "doc_url" + apidocs_url = "apidocs_url" terms_url = "terms_url" + instructions_url = "instructions_url" + catalog_details_url = "catalog_details_url" + custom_create_page_url = "custom_create_page_url" + dashboard = "dashboard" } hidden = true side_by_side_index = 1.0 @@ -70,14 +76,26 @@ resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_inst service { rc_provisionable = true iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true + } + deployment { + broker { + name = "name" + guid = "guid" + } + location = "location" + location_url = "location_url" + target_crn = "target_crn" } } } // Provision onboarding_catalog_plan resource instance resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { - product_id = var.onboarding_catalog_plan_product_id - catalog_product_id = var.onboarding_catalog_plan_catalog_product_id + product_id = ibm_onboarding_product.onboarding_product_instance.id + catalog_product_id = ibm_onboarding_catalog_product.onboarding_catalog_product_instance.onboarding_catalog_product_id env = var.onboarding_catalog_plan_env name = var.onboarding_catalog_plan_name active = var.onboarding_catalog_plan_active @@ -113,25 +131,42 @@ resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { type = "image" url = "url" } + embeddable_dashboard = "embeddable_dashboard" } } urls { doc_url = "doc_url" + apidocs_url = "apidocs_url" terms_url = "terms_url" + instructions_url = "instructions_url" + catalog_details_url = "catalog_details_url" + custom_create_page_url = "custom_create_page_url" + dashboard = "dashboard" } hidden = true side_by_side_index = 1.0 } + service { + rc_provisionable = true + iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true + } pricing { type = "free" origin = "global_catalog" } + plan { + allow_internal_users = true + bindable = true + } } } // Provision onboarding_catalog_product resource instance resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" { - product_id = var.onboarding_catalog_product_product_id + product_id = ibm_onboarding_product.onboarding_product_instance.id env = var.onboarding_catalog_product_env name = var.onboarding_catalog_product_name active = var.onboarding_catalog_product_active @@ -170,11 +205,17 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" type = "image" url = "url" } + embeddable_dashboard = "embeddable_dashboard" } } urls { doc_url = "doc_url" + apidocs_url = "apidocs_url" terms_url = "terms_url" + instructions_url = "instructions_url" + catalog_details_url = "catalog_details_url" + custom_create_page_url = "custom_create_page_url" + dashboard = "dashboard" } hidden = true side_by_side_index = 1.0 @@ -182,6 +223,9 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" service { rc_provisionable = true iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true } other { pc { @@ -223,13 +267,21 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" } } } + composite { + composite_kind = "service" + composite_tag = "composite_tag" + children { + kind = "service" + name = "name" + } + } } } } // Provision onboarding_iam_registration resource instance resource "ibm_onboarding_iam_registration" "onboarding_iam_registration_instance" { - product_id = var.onboarding_iam_registration_product_id + product_id = ibm_onboarding_product.onboarding_product_instance.id env = var.onboarding_iam_registration_env name = var.onboarding_iam_registration_name enabled = var.onboarding_iam_registration_enabled @@ -408,7 +460,7 @@ resource "ibm_onboarding_iam_registration" "onboarding_iam_registration_instance zh_cn = "zh_cn" } options { - access_policy = { "key" = "inner" } + access_policy = true policy_type = [ "access" ] account_type = "enterprise" } diff --git a/examples/ibm-partner-center-sell/variables.tf b/examples/ibm-partner-center-sell/variables.tf index 60e1ae9410..f064006d09 100644 --- a/examples/ibm-partner-center-sell/variables.tf +++ b/examples/ibm-partner-center-sell/variables.tf @@ -12,7 +12,7 @@ variable "onboarding_resource_broker_env" { variable "onboarding_resource_broker_auth_username" { description = "The authentication username to reach the broker." type = string - default = "auth_username" + default = "apikey" } variable "onboarding_resource_broker_auth_password" { description = "The authentication password to reach the broker." @@ -22,32 +22,32 @@ variable "onboarding_resource_broker_auth_password" { variable "onboarding_resource_broker_auth_scheme" { description = "The supported authentication scheme for the broker." type = string - default = "auth_scheme" + default = "bearer" } variable "onboarding_resource_broker_resource_group_crn" { description = "The cloud resource name of the resource group." type = string - default = "resource_group_crn" + default = "crn:v1:bluemix:public:resource-controller::a/4a5c3c51b97a446fbb1d0e1ef089823b::resource-group:4fae20bd538a4a738475350dfdc1596f" } variable "onboarding_resource_broker_state" { description = "The state of the broker." type = string - default = "removed" + default = "active" } variable "onboarding_resource_broker_broker_url" { description = "The URL associated with the broker application." type = string - default = "broker_url" + default = "https://broker-url-for-my-service.com" } variable "onboarding_resource_broker_allow_context_updates" { description = "Whether the resource controller will call the broker for any context changes to the instance. Currently, the only context related change is an instance name update." type = bool - default = true + default = false } variable "onboarding_resource_broker_catalog_type" { description = "To enable the provisioning of your broker, set this parameter value to `service`." type = string - default = "catalog_type" + default = "service" } variable "onboarding_resource_broker_type" { description = "The type of the provisioning model." @@ -57,12 +57,12 @@ variable "onboarding_resource_broker_type" { variable "onboarding_resource_broker_name" { description = "The name of the broker." type = string - default = "name" + default = "brokername" } variable "onboarding_resource_broker_region" { description = "The region where the pricing plan is available." type = string - default = "region" + default = "global" } // Resource arguments for onboarding_catalog_deployment @@ -89,7 +89,7 @@ variable "onboarding_catalog_deployment_env" { variable "onboarding_catalog_deployment_name" { description = "The programmatic name of this deployment." type = string - default = "name" + default = "deployment-eu-de" } variable "onboarding_catalog_deployment_active" { description = "Whether the service is active." @@ -99,7 +99,7 @@ variable "onboarding_catalog_deployment_active" { variable "onboarding_catalog_deployment_disabled" { description = "Determines the global visibility for the catalog entry, and its children. If it is not enabled, all plans are disabled." type = bool - default = true + default = false } variable "onboarding_catalog_deployment_kind" { description = "The kind of the global catalog object." @@ -109,7 +109,7 @@ variable "onboarding_catalog_deployment_kind" { variable "onboarding_catalog_deployment_tags" { description = "A list of tags that carry information about your product. These tags can be used to find your product in the IBM Cloud catalog." type = list(string) - default = [ "tags" ] + default = ["eu-gb"] } // Resource arguments for onboarding_catalog_plan @@ -131,7 +131,7 @@ variable "onboarding_catalog_plan_env" { variable "onboarding_catalog_plan_name" { description = "The programmatic name of this plan." type = string - default = "name" + default = "free-plan2" } variable "onboarding_catalog_plan_active" { description = "Whether the service is active." @@ -141,7 +141,7 @@ variable "onboarding_catalog_plan_active" { variable "onboarding_catalog_plan_disabled" { description = "Determines the global visibility for the catalog entry, and its children. If it is not enabled, all plans are disabled." type = bool - default = true + default = false } variable "onboarding_catalog_plan_kind" { description = "The kind of the global catalog object." @@ -151,7 +151,7 @@ variable "onboarding_catalog_plan_kind" { variable "onboarding_catalog_plan_tags" { description = "A list of tags that carry information about your product. These tags can be used to find your product in the IBM Cloud catalog." type = list(string) - default = [ "tags" ] + default = ["ibm_created"] } // Resource arguments for onboarding_catalog_product @@ -168,7 +168,7 @@ variable "onboarding_catalog_product_env" { variable "onboarding_catalog_product_name" { description = "The programmatic name of this product." type = string - default = "name" + default = "1p-service-08-06" } variable "onboarding_catalog_product_active" { description = "Whether the service is active." @@ -178,7 +178,7 @@ variable "onboarding_catalog_product_active" { variable "onboarding_catalog_product_disabled" { description = "Determines the global visibility for the catalog entry, and its children. If it is not enabled, all plans are disabled." type = bool - default = true + default = false } variable "onboarding_catalog_product_kind" { description = "The kind of the global catalog object." @@ -188,7 +188,7 @@ variable "onboarding_catalog_product_kind" { variable "onboarding_catalog_product_tags" { description = "A list of tags that carry information about your product. These tags can be used to find your product in the IBM Cloud catalog." type = list(string) - default = [ "tags" ] + default = ["keyword","support_ibm"] } // Resource arguments for onboarding_iam_registration @@ -232,7 +232,7 @@ variable "onboarding_iam_registration_parent_ids" { variable "onboarding_product_type" { description = "The type of the product." type = string - default = "software" + default = "service" } variable "onboarding_product_eccn_number" { description = "The Export Control Classification Number of your product." @@ -259,12 +259,12 @@ variable "onboarding_product_tax_assessment" { variable "onboarding_registration_account_id" { description = "The ID of your account." type = string - default = "account_id" + default = "4a5c3c51b97a446fbb1d0e1ef089823b" } variable "onboarding_registration_company_name" { description = "The name of your company that is displayed in the IBM Cloud catalog." type = string - default = "company_name" + default = "Beautiful Company" } variable "onboarding_registration_default_private_catalog_id" { description = "The default private catalog in which products are created." diff --git a/go.mod b/go.mod index 328df1afe6..7c6df5a9a3 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,9 @@ require ( github.com/IBM/cloud-databases-go-sdk v0.7.0 github.com/IBM/cloudant-go-sdk v0.8.0 github.com/IBM/code-engine-go-sdk v0.0.0-20240808131715-b9d168602dac + github.com/IBM/configuration-aggregator-go-sdk v0.0.1 github.com/IBM/container-registry-go-sdk v1.1.0 - github.com/IBM/continuous-delivery-go-sdk v1.8.1 + github.com/IBM/continuous-delivery-go-sdk v1.8.2 github.com/IBM/event-notifications-go-admin-sdk v0.9.0 github.com/IBM/eventstreams-go-sdk v1.4.0 github.com/IBM/go-sdk-core v1.1.0 @@ -30,7 +31,7 @@ require ( github.com/IBM/logs-router-go-sdk v1.0.5 github.com/IBM/mqcloud-go-sdk v0.1.0 github.com/IBM/networking-go-sdk v0.49.0 - github.com/IBM/platform-services-go-sdk v0.69.1 + github.com/IBM/platform-services-go-sdk v0.69.2 github.com/IBM/project-go-sdk v0.3.5 github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5 github.com/IBM/sarama v1.41.2 @@ -38,8 +39,8 @@ require ( github.com/IBM/schematics-go-sdk v0.3.0 github.com/IBM/secrets-manager-go-sdk/v2 v2.0.7 github.com/IBM/vmware-go-sdk v0.1.2 - github.com/IBM/vpc-beta-go-sdk v0.6.0 - github.com/IBM/vpc-go-sdk v0.58.0 + github.com/IBM/vpc-beta-go-sdk v0.8.0 + github.com/IBM/vpc-go-sdk v0.61.0 github.com/ScaleFT/sshkeys v0.0.0-20200327173127-6142f742bca5 github.com/akamai/AkamaiOPEN-edgegrid-golang v1.2.2 github.com/akamai/AkamaiOPEN-edgegrid-golang/v5 v5.0.0 @@ -51,7 +52,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/hashicorp/go-uuid v1.0.3 - github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/go-version v1.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/jinzhu/copier v0.3.2 github.com/minsikl/netscaler-nitro-go v0.0.0-20170827154432-5b14ce3643e3 @@ -75,6 +76,7 @@ require ( require ( cloud.google.com/go/kms v1.10.1 // indirect cloud.google.com/go/monitoring v1.13.0 // indirect + github.com/Bowery/prompt v0.0.0-20190916142128-fa8279994f75 // indirect github.com/Logicalis/asn1 v0.0.0-20190312173541-d60463189a56 // indirect github.com/PromonLogicalis/asn1 v0.0.0-20190312173541-d60463189a56 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect @@ -93,6 +95,7 @@ require ( github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect + github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/eapache/go-resiliency v1.4.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect @@ -125,6 +128,7 @@ require ( github.com/google/gnostic v0.6.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -165,6 +169,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/govendor v1.0.9 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 // indirect github.com/leodido/go-urn v1.4.0 // indirect @@ -176,6 +181,8 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/gox v1.0.1 // indirect + github.com/mitchellh/iochan v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -209,13 +216,16 @@ require ( go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect - golang.org/x/mod v0.17.0 // indirect + golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.23.0 // indirect + golang.org/x/tools/cmd/cover v0.1.0-deprecated // indirect + golang.org/x/tools/go/vcs v0.1.0-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect diff --git a/go.sum b/go.sum index d1f9c1784c..ca9a5692a7 100644 --- a/go.sum +++ b/go.sum @@ -105,6 +105,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Bowery/prompt v0.0.0-20190916142128-fa8279994f75 h1:xGHheKK44eC6K0u5X+DZW/fRaR1LnDdqPHMZMWx5fv8= +github.com/Bowery/prompt v0.0.0-20190916142128-fa8279994f75/go.mod h1:4/6eNcqZ09BZ9wLK3tZOjBA1nDj+B0728nlX5YRlSmQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -134,10 +136,12 @@ github.com/IBM/cloudant-go-sdk v0.8.0 h1:XzaqZFy5fm1Q9+iK52X5zRW39SHaahT9pf5SRgV github.com/IBM/cloudant-go-sdk v0.8.0/go.mod h1:zDGBs8ideVtn9MehXbIQNI3852B68BsMtKJvq3iPn/Q= github.com/IBM/code-engine-go-sdk v0.0.0-20240808131715-b9d168602dac h1:9Y5TB9Ar2SM6JPr2kM6c9pHSdSuHMDCIcbvTa/hNTj4= github.com/IBM/code-engine-go-sdk v0.0.0-20240808131715-b9d168602dac/go.mod h1:sy4CocPPaCiS+T1znqVdw83dkoyxSMUFxkksqahUhbY= +github.com/IBM/configuration-aggregator-go-sdk v0.0.1 h1:bgJqfd39hzKqtLxgrmOZ7UgjhB2lgZ4jWqRfgqa0VTk= +github.com/IBM/configuration-aggregator-go-sdk v0.0.1/go.mod h1:iMQUJgo42cbRk1XW06lmeHzm9/Nfk5/laBscGdPnSqY= github.com/IBM/container-registry-go-sdk v1.1.0 h1:sYyknIod8R4RJZQqAheiduP6wbSTphE9Ag8ho28yXjc= github.com/IBM/container-registry-go-sdk v1.1.0/go.mod h1:4TwsCnQtVfZ4Vkapy/KPvQBKFc3VOyUZYkwRU4FTPrs= -github.com/IBM/continuous-delivery-go-sdk v1.8.1 h1:BWmp58XODXqAe3DRQE3I0Lnrwewf8HzXH1FVCBYlAa0= -github.com/IBM/continuous-delivery-go-sdk v1.8.1/go.mod h1:5umVUaXEoTP2ULARgXRBPcR3vWDCmKD66P6XgNqpzZk= +github.com/IBM/continuous-delivery-go-sdk v1.8.2 h1:L2hyoJSxgCa0/HKviHKtut1z4Mn2qGz1gnJZ43LV0o0= +github.com/IBM/continuous-delivery-go-sdk v1.8.2/go.mod h1:5umVUaXEoTP2ULARgXRBPcR3vWDCmKD66P6XgNqpzZk= github.com/IBM/event-notifications-go-admin-sdk v0.9.0 h1:eaCd+GkxhNyot+8rA9WkAQdlVYrRD20LYiXjEytFO6M= github.com/IBM/event-notifications-go-admin-sdk v0.9.0/go.mod h1:OByvqfrNVxs7G6ggv8pwQCEVw10/TBJCLh7NM3z707w= github.com/IBM/eventstreams-go-sdk v1.4.0 h1:yS/Ns29sBOe8W2tynQmz9HTKqQZ0ckse4Py5Oy/F2rM= @@ -174,8 +178,8 @@ github.com/IBM/mqcloud-go-sdk v0.1.0 h1:fWt4uisg5GbbsfNmAxx5/6c5gQIPM+VrEsTtnimE github.com/IBM/mqcloud-go-sdk v0.1.0/go.mod h1:LesMQlKHXvdks4jqQLZH7HfATY5lvTzHuwQU5+y7b2g= github.com/IBM/networking-go-sdk v0.49.0 h1:lPS34u3C0JVrbxH+Ulua76Nwl6Frv8BEfq6LRkyvOv0= github.com/IBM/networking-go-sdk v0.49.0/go.mod h1:G9CKbmPE8gSLjN+ABh4hIZ1bMx076enl5Eekvj6zQnA= -github.com/IBM/platform-services-go-sdk v0.69.1 h1:Wb8BYVpsPIppWbOQCgF7ytm+BbSOXdWWCf9zcZ6xGA4= -github.com/IBM/platform-services-go-sdk v0.69.1/go.mod h1:ZP3zUDxR1qRdUqzFdnJOlQN0QpVYol2eOUCv4uk03Jc= +github.com/IBM/platform-services-go-sdk v0.69.2 h1:8XNI8rBZShutuybFN5v8BsWlrdUa1eF0L6nOS+lDXmI= +github.com/IBM/platform-services-go-sdk v0.69.2/go.mod h1:ZP3zUDxR1qRdUqzFdnJOlQN0QpVYol2eOUCv4uk03Jc= github.com/IBM/project-go-sdk v0.3.5 h1:L+YClFUa14foS0B/hOOY9n7sIdsT5/XQicnXOyJSpyM= github.com/IBM/project-go-sdk v0.3.5/go.mod h1:FOJM9ihQV3EEAY6YigcWiTNfVCThtdY8bLC/nhQHFvo= github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5 h1:NPUhkoOCRuv3OFWt19PmwjXGGTKlvmbuPg9fUrBUNe4= @@ -190,10 +194,10 @@ github.com/IBM/secrets-manager-go-sdk/v2 v2.0.7 h1:5lKt1rHuKaAaiZtbPfsF8dgiko/gG github.com/IBM/secrets-manager-go-sdk/v2 v2.0.7/go.mod h1:RglK3v6CPe3T1myRtQCD6z+nBygXvNJwufAon0qcZok= github.com/IBM/vmware-go-sdk v0.1.2 h1:5lKWFyInWz9e2hwGsoFTEoLa1jYkD30SReN0fQ10w9M= github.com/IBM/vmware-go-sdk v0.1.2/go.mod h1:2UGPBJju3jiv5VKKBBm9a5L6bzF/aJdKOKAzJ7HaOjA= -github.com/IBM/vpc-beta-go-sdk v0.6.0 h1:wfM3AcW3zOM3xsRtZ+EA6+sESlGUjQ6Yf4n5QQyz4uc= -github.com/IBM/vpc-beta-go-sdk v0.6.0/go.mod h1:fzHDAQIqH/5yJmYsKodKHLcqxMDT+yfH6vZjdiw8CQA= -github.com/IBM/vpc-go-sdk v0.58.0 h1:Slk1jkcV7tPnf0iECQV2Oja7W8Bom0z7k9M4fMBY4bI= -github.com/IBM/vpc-go-sdk v0.58.0/go.mod h1:swmxiYLT+OfBsBYqJWGeRd6NPmBk4u/het2PZdtzIaw= +github.com/IBM/vpc-beta-go-sdk v0.8.0 h1:cEPpv4iw3Ba5W2d0AWg3TIbKeJ8y1nPuUuibR5Jt9eE= +github.com/IBM/vpc-beta-go-sdk v0.8.0/go.mod h1:hORgIyTFRzXrZIK9IohaWmCRBBlYiDRagsufi7M6akE= +github.com/IBM/vpc-go-sdk v0.61.0 h1:VXT8ZwOQtl15/RSInj9+Z4OQC/vhE/Owoauu128BO4M= +github.com/IBM/vpc-go-sdk v0.61.0/go.mod h1:swmxiYLT+OfBsBYqJWGeRd6NPmBk4u/het2PZdtzIaw= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Logicalis/asn1 v0.0.0-20190312173541-d60463189a56 h1:vuquMR410psHNax14XKNWa0Ae/kYgWJcXi0IFuX60N0= @@ -391,6 +395,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a h1:saTgr5tMLFnmy/yg3qDTft4rE5DY2uJ/cCxCe3q0XTU= github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a/go.mod h1:Bw9BbhOJVNR+t0jCqx2GC6zv0TGBsShs56Y3gfSCvl0= +github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA= +github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU= github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= @@ -782,6 +788,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -951,10 +959,11 @@ github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -1143,6 +1152,8 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/govendor v1.0.9 h1:WOH3FcVI9eOgnIZYg96iwUwrL4eOVx+aQ66oyX2R8Yc= +github.com/kardianos/govendor v1.0.9/go.mod h1:yvmR6q9ZZ7nSF5Wvh40v0wfP+3TwwL8zYQp+itoZSVM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -1261,7 +1272,10 @@ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= +github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -1824,8 +1838,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2057,8 +2071,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2196,6 +2210,10 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58 golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools/cmd/cover v0.1.0-deprecated h1:Rwy+mWYz6loAF+LnG1jHG/JWMHRMMC2/1XX3Ejkx9lA= +golang.org/x/tools/cmd/cover v0.1.0-deprecated/go.mod h1:hMDiIvlpN1NoVgmjLjUJE9tMHyxHjFX7RuQ+rW12mSA= +golang.org/x/tools/go/vcs v0.1.0-deprecated h1:cOIJqWBl99H1dH5LWizPa+0ImeeJq3t3cJjaeOWUAL4= +golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/ibm/acctest/acctest.go b/ibm/acctest/acctest.go index 7fdb4aefea..9fe5eb861d 100644 --- a/ibm/acctest/acctest.go +++ b/ibm/acctest/acctest.go @@ -23,6 +23,7 @@ const ( ) var ( + AccountId string AppIDTenantID string AppIDTestUserEmail string BackupPolicyJobID string @@ -203,39 +204,48 @@ var ( // For Power Colo var ( - Pi_auxiliary_volume_name string - Pi_cloud_instance_id string - Pi_dhcp_id string - Pi_host_group_id string - Pi_host_id string - Pi_image string - Pi_image_bucket_access_key string - Pi_image_bucket_file_name string - Pi_image_bucket_name string - Pi_image_bucket_region string - Pi_image_bucket_secret_key string - Pi_instance_name string - Pi_key_name string - Pi_network_name string - Pi_placement_group_name string - Pi_replication_volume_name string - Pi_resource_group_id string - Pi_sap_image string - Pi_shared_processor_pool_id string - Pi_snapshot_id string - Pi_spp_placement_group_id string - Pi_target_storage_tier string - Pi_volume_clone_task_id string - Pi_volume_group_id string - Pi_volume_group_name string - Pi_volume_id string - Pi_volume_name string - Pi_volume_onboarding_id string - Pi_volume_onboarding_source_crn string - PiCloudConnectionName string - PiSAPProfileID string - PiStoragePool string - PiStorageType string + Pi_auxiliary_volume_name string + Pi_cloud_instance_id string + Pi_dhcp_id string + Pi_host_group_id string + Pi_host_id string + Pi_image string + Pi_image_bucket_access_key string + Pi_image_bucket_file_name string + Pi_image_bucket_name string + Pi_image_bucket_region string + Pi_image_bucket_secret_key string + Pi_image_id string + Pi_instance_name string + Pi_key_name string + Pi_network_address_group_id string + Pi_network_id string + Pi_network_interface_id string + Pi_network_name string + Pi_network_security_group_id string + Pi_network_security_group_rule_id string + Pi_placement_group_name string + Pi_remote_id string + Pi_remote_type string + Pi_replication_volume_name string + Pi_resource_group_id string + Pi_sap_image string + Pi_shared_processor_pool_id string + Pi_snapshot_id string + Pi_spp_placement_group_id string + Pi_storage_connection string + Pi_target_storage_tier string + Pi_volume_clone_task_id string + Pi_volume_group_id string + Pi_volume_group_name string + Pi_volume_id string + Pi_volume_name string + Pi_volume_onboarding_id string + Pi_volume_onboarding_source_crn string + PiCloudConnectionName string + PiSAPProfileID string + PiStoragePool string + PiStorageType string ) var ( @@ -754,6 +764,17 @@ func init() { fmt.Println("[INFO] Set the environment variable SL_CIDR_2 for testing ibm_is_subnet else it is set to default value '10.240.64.0/24'") } + AccountId = os.Getenv("IS_ACCOUNT_ID") + if AccountId == "" { + AccountId = "fee82deba12e4c0fb69c3b09d1f12345" + fmt.Println("[INFO] Set the environment variable IS_ACCOUNT_ID for testing private_path_service_gateway_account_policy else it is set to default value 'fee82deba12e4c0fb69c3b09d1f12345'") + } + + ISAddressPrefixCIDR = os.Getenv("SL_ADDRESS_PREFIX_CIDR") + if ISAddressPrefixCIDR == "" { + ISAddressPrefixCIDR = "10.120.0.0/24" + fmt.Println("[INFO] Set the environment variable SL_ADDRESS_PREFIX_CIDR for testing ibm_is_vpc_address_prefix else it is set to default value '10.120.0.0/24'") + } ISCIDR2 = os.Getenv("SL_CIDR_2") if ISCIDR2 == "" { ISCIDR2 = "10.240.64.0/24" @@ -1047,7 +1068,13 @@ func init() { Pi_image_bucket_region = os.Getenv("PI_IMAGE_BUCKET_REGION") if Pi_image_bucket_region == "" { Pi_image_bucket_region = "us-east" - fmt.Println("[INFO] Set the environment variable PI_IMAGE_BUCKET_REGION for testing ibm_pi_image_export resource else it is set to default value 'us-east'") + fmt.Println("[INFO] Set the environment variable PI_IMAGE_BUCKET_REGION for testing ibm_pi_image resource else it is set to default value 'us-east'") + } + + Pi_image_id = os.Getenv("PI_IMAGE_ID") + if Pi_image_id == "" { + Pi_image_id = "IBMi-72-09-2924-11" + fmt.Println("[INFO] Set the environment variable PI_IMAGE_ID for testing ibm_pi_image resource else it is set to default value 'IBMi-72-09-2924-11'") } Pi_key_name = os.Getenv("PI_KEY_NAME") @@ -1061,6 +1088,40 @@ func init() { Pi_network_name = "terraform-test-power" fmt.Println("[INFO] Set the environment variable PI_NETWORK_NAME for testing ibm_pi_network_name resource else it is set to default value 'terraform-test-power'") } + Pi_network_id = os.Getenv("PI_NETWORK_ID") + if Pi_network_id == "" { + Pi_network_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_ID for testing ibm_pi_network_interface resource else it is set to default value 'terraform-test-power'") + } + Pi_network_interface_id = os.Getenv("PI_NETWORK_INTERFACE_ID") + if Pi_network_interface_id == "" { + Pi_network_interface_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_INTERFACE_ID for testing ibm_pi_network_interface resource else it is set to default value 'terraform-test-power'") + } + + Pi_network_security_group_id = os.Getenv("PI_NETWORK_SECURITY_GROUP_ID") + if Pi_network_security_group_id == "" { + Pi_network_security_group_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_SECURITY_GROUP_ID for testing ibm_pi_network_security_group resource else it is set to default value 'terraform-test-power'") + } + + Pi_network_security_group_rule_id = os.Getenv("PI_NETWORK_SECURITY_GROUP_RULE_ID") + if Pi_network_security_group_rule_id == "" { + Pi_network_security_group_rule_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_SECURITY_GROUP_RULE_ID for testing ibm_pi_network_security_group resource else it is set to default value 'terraform-test-power'") + } + + Pi_network_security_group_id = os.Getenv("PI_NETWORK_SECURITY_GROUP_ID") + if Pi_network_security_group_id == "" { + Pi_network_security_group_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_SECURITY_GROUP_ID for testing ibm_pi_network_security_group resource else it is set to default value 'terraform-test-power'") + } + + Pi_network_security_group_rule_id = os.Getenv("PI_NETWORK_SECURITY_GROUP_RULE_ID") + if Pi_network_security_group_rule_id == "" { + Pi_network_security_group_rule_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_SECURITY_GROUP_RULE_ID for testing ibm_pi_network_security_group resource else it is set to default value 'terraform-test-power'") + } Pi_volume_name = os.Getenv("PI_VOLUME_NAME") if Pi_volume_name == "" { @@ -1151,21 +1212,37 @@ func init() { Pi_placement_group_name = "tf-pi-placement-group" fmt.Println("[WARN] Set the environment variable PI_PLACEMENT_GROUP_NAME for testing ibm_pi_placement_group resource else it is set to default value 'tf-pi-placement-group'") } + + Pi_remote_id = os.Getenv("PI_REMOTE_ID") + if Pi_remote_id == "" { + Pi_remote_id = "terraform-test-power" + fmt.Println("[WARN] Set the environment variable PI_REMOTE_ID for testing ibm_pi_network_security_group resource else it is set to default value 'terraform-test-power'") + } + + Pi_remote_type = os.Getenv("PI_REMOTE_TYPE") + if Pi_remote_type == "" { + Pi_remote_type = "terraform-test-power" + fmt.Println("[WARN] Set the environment variable PI_REMOTE_TYPE for testing ibm_pi_network_security_group resource else it is set to default value 'terraform-test-power'") + } + Pi_spp_placement_group_id = os.Getenv("PI_SPP_PLACEMENT_GROUP_ID") if Pi_spp_placement_group_id == "" { Pi_spp_placement_group_id = "tf-pi-spp-placement-group" fmt.Println("[WARN] Set the environment variable PI_SPP_PLACEMENT_GROUP_ID for testing ibm_pi_spp_placement_group resource else it is set to default value 'tf-pi-spp-placement-group'") } + PiStoragePool = os.Getenv("PI_STORAGE_POOL") if PiStoragePool == "" { PiStoragePool = "terraform-test-power" fmt.Println("[INFO] Set the environment variable PI_STORAGE_POOL for testing ibm_pi_storage_pool_capacity else it is set to default value 'terraform-test-power'") } + PiStorageType = os.Getenv("PI_STORAGE_TYPE") if PiStorageType == "" { PiStorageType = "terraform-test-power" fmt.Println("[INFO] Set the environment variable PI_STORAGE_TYPE for testing ibm_pi_storage_type_capacity else it is set to default value 'terraform-test-power'") } + // Added for resource capture instance testing Pi_capture_storage_image_path = os.Getenv("PI_CAPTURE_STORAGE_IMAGE_PATH") if Pi_capture_storage_image_path == "" { @@ -1190,7 +1267,10 @@ func init() { Pi_shared_processor_pool_id = "tf-pi-shared-processor-pool" fmt.Println("[WARN] Set the environment variable PI_SHARED_PROCESSOR_POOL_ID for testing ibm_pi_shared_processor_pool resource else it is set to default value 'tf-pi-shared-processor-pool'") } - + Pi_storage_connection = os.Getenv("PI_STORAGE_CONNECTION") + if Pi_storage_connection == "" { + fmt.Println("[WARN] Set the environment variable PI_STORAGE_CONNECTION for testing pi_storage_connection resource else it is empty") + } Pi_target_storage_tier = os.Getenv("PI_TARGET_STORAGE_TIER") if Pi_target_storage_tier == "" { Pi_target_storage_tier = "terraform-test-tier" @@ -1213,12 +1293,17 @@ func init() { Pi_host_group_id = "" fmt.Println("[WARN] Set the environment variable PI_HOST_GROUP_ID for testing ibm_pi_host resource else it is set to default value ''") } - Pi_host_id = os.Getenv("PI_HOST_ID") if Pi_host_id == "" { Pi_host_id = "" fmt.Println("[WARN] Set the environment variable PI_HOST_ID for testing ibm_pi_host resource else it is set to default value ''") } + Pi_network_address_group_id = os.Getenv("PI_NETWORK_ADDRESS_GROUP_ID") + if Pi_network_address_group_id == "" { + Pi_network_address_group_id = "terraform-test-power" + fmt.Println("[INFO] Set the environment variable PI_NETWORK_ADDRESS_GROUP_ID for testing ibm_pi_network_address_group data source else it is set to default value 'terraform-test-power'") + } + WorkspaceID = os.Getenv("SCHEMATICS_WORKSPACE_ID") if WorkspaceID == "" { WorkspaceID = "us-south.workspace.tf-acc-test-schematics-state-test.392cd99f" @@ -1516,6 +1601,16 @@ func init() { fmt.Println("[INFO] Set the environment variable SATELLITE_RESOURCE_INSTANCE_ID for ibm_cos_bucket satellite location resource or datasource else tests will fail if this is not set correctly") } + ConfigAggregatorEndpoint := os.Getenv("IBMCLOUD_CONFIG_AGGREGATOR_ENDPOINT") + if ConfigAggregatorEndpoint == "" { + fmt.Println("[WARN] Set the environment variable IBMCLOUD_CONFIG_AGGREGATOR_ENDPOINT with a VALID SCC API ENDPOINT") + } + + ConfigAggregatorInstanceID := os.Getenv("IBMCLOUD_CONFIG_AGGREGATOR_INSTANCE_ID") + if ConfigAggregatorInstanceID == "" { + fmt.Println("[WARN] Set the environment variable IBMCLOUD_CONFIG_AGGREGATOR_INSTANCE_ID with a VALID SCC INSTANCE ID") + } + SccInstanceID = os.Getenv("IBMCLOUD_SCC_INSTANCE_ID") if SccInstanceID == "" { fmt.Println("[WARN] Set the environment variable IBMCLOUD_SCC_INSTANCE_ID with a VALID SCC INSTANCE ID") diff --git a/ibm/conns/config.go b/ibm/conns/config.go index 83ae73f383..b502894045 100644 --- a/ibm/conns/config.go +++ b/ibm/conns/config.go @@ -93,6 +93,8 @@ import ( jwt "github.com/golang-jwt/jwt" slsession "github.com/softlayer/softlayer-go/session" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" + bluemix "github.com/IBM-Cloud/bluemix-go" "github.com/IBM-Cloud/bluemix-go/api/account/accountv1" "github.com/IBM-Cloud/bluemix-go/api/account/accountv2" @@ -120,6 +122,7 @@ import ( "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" "github.com/IBM/continuous-delivery-go-sdk/cdtoolchainv2" "github.com/IBM/event-notifications-go-admin-sdk/eventnotificationsv1" + "github.com/IBM/eventstreams-go-sdk/pkg/adminrestv1" "github.com/IBM/eventstreams-go-sdk/pkg/schemaregistryv1" "github.com/IBM/ibm-hpcs-uko-sdk/ukov4" "github.com/IBM/logs-go-sdk/logsv0" @@ -220,6 +223,7 @@ type ClientSession interface { ContainerAPI() (containerv1.ContainerServiceAPI, error) VpcContainerAPI() (containerv2.ContainerServiceAPI, error) ContainerRegistryV1() (*containerregistryv1.ContainerRegistryV1, error) + ConfigurationAggregatorV1() (*configurationaggregatorv1.ConfigurationAggregatorV1, error) FunctionClient() (*whisk.Client, error) GlobalSearchAPI() (globalsearchv2.GlobalSearchServiceAPI, error) GlobalTaggingAPI() (globaltaggingv3.GlobalTaggingServiceAPI, error) @@ -299,6 +303,7 @@ type ClientSession interface { AtrackerV2() (*atrackerv2.AtrackerV2, error) MetricsRouterV3() (*metricsrouterv3.MetricsRouterV3, error) ESschemaRegistrySession() (*schemaregistryv1.SchemaregistryV1, error) + ESadminRestSession() (*adminrestv1.AdminrestV1, error) ContextBasedRestrictionsV1() (*contextbasedrestrictionsv1.ContextBasedRestrictionsV1, error) SecurityAndComplianceCenterV3() (*scc.SecurityAndComplianceCenterApiV3, error) CdToolchainV2() (*cdtoolchainv2.CdToolchainV2, error) @@ -327,6 +332,9 @@ type clientSession struct { accountV1ConfigErr error bmxAccountv1ServiceAPI accountv1.AccountServiceAPI + configurationAggregatorClient *configurationaggregatorv1.ConfigurationAggregatorV1 + configurationAggregatorClientErr error + bmxUserDetails *UserConfig bmxUserFetchErr error @@ -614,6 +622,9 @@ type clientSession struct { esSchemaRegistryClient *schemaregistryv1.SchemaregistryV1 esSchemaRegistryErr error + esAdminRestClient *adminrestv1.AdminrestV1 + esAdminRestErr error + // Security and Compliance Center (SCC) securityAndComplianceCenterClient *scc.SecurityAndComplianceCenterApiV3 securityAndComplianceCenterClientErr error @@ -667,6 +678,11 @@ func (session clientSession) PartnerCenterSellV1() (*partnercentersellv1.Partner return session.partnerCenterSellClient, session.partnerCenterSellClientErr } +// Configuration Aggregator +func (session clientSession) ConfigurationAggregatorV1() (*configurationaggregatorv1.ConfigurationAggregatorV1, error) { + return session.configurationAggregatorClient, session.configurationAggregatorClientErr +} + // AppIDAPI provides AppID Service APIs ... func (session clientSession) AppIDAPI() (*appid.AppIDManagementV4, error) { return session.appidAPI, session.appidErr @@ -1212,6 +1228,10 @@ func (session clientSession) ESschemaRegistrySession() (*schemaregistryv1.Schema return session.esSchemaRegistryClient, session.esSchemaRegistryErr } +func (session clientSession) ESadminRestSession() (*adminrestv1.AdminrestV1, error) { + return session.esAdminRestClient, session.esAdminRestErr +} + // Security and Compliance center Admin API func (session clientSession) SecurityAndComplianceCenterV3() (*scc.SecurityAndComplianceCenterApiV3, error) { return session.securityAndComplianceCenterClient, session.securityAndComplianceCenterClientErr @@ -2381,6 +2401,28 @@ func (c *Config) ClientSession() (interface{}, error) { // }) } + // Construct an instance of the 'Configuration Aggregator' service. + var configBaseURL string + configBaseURL = ContructEndpoint(fmt.Sprintf("%s.apprapp", c.Region), cloudEndpoint) + + configurationAggregatorClientOptions := &configurationaggregatorv1.ConfigurationAggregatorV1Options{ + Authenticator: authenticator, + URL: configBaseURL, + } + + // Construct the service client. + session.configurationAggregatorClient, err = configurationaggregatorv1.NewConfigurationAggregatorV1(configurationAggregatorClientOptions) + if err == nil { + // Enable retries for API calls + session.configurationAggregatorClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + // Add custom header for analytics + session.configurationAggregatorClient.SetDefaultHeaders(gohttp.Header{ + "X-Original-User-Agent": {fmt.Sprintf("terraform-provider-ibm/%s", version.Version)}, + }) + } else { + session.configurationAggregatorClientErr = fmt.Errorf("Error occurred while constructing 'Configuration Aggregator' service client: %q", err) + } + // CIS Service instances starts here. cisURL := ContructEndpoint("api.cis", cloudEndpoint) if c.Visibility == "private" { @@ -3325,6 +3367,20 @@ func (c *Config) ClientSession() (interface{}, error) { }) } + esAdminRestV1Options := &adminrestv1.AdminrestV1Options{ + Authenticator: authenticator, + } + session.esAdminRestClient, err = adminrestv1.NewAdminrestV1(esAdminRestV1Options) + if err != nil { + session.esAdminRestErr = fmt.Errorf("[ERROR] Error occured while configuring Event Streams admin rest: %q", err) + } + if session.esAdminRestClient != nil && session.esAdminRestClient.Service != nil { + session.esAdminRestClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) + session.esAdminRestClient.SetDefaultHeaders(gohttp.Header{ + "X-Original-User-Agent": {fmt.Sprintf("terraform-provider-ibm/%s", version.Version)}, + }) + } + // Construct an "options" struct for creating the service client. var cdToolchainClientURL string if c.Visibility == "private" || c.Visibility == "public-and-private" { diff --git a/ibm/flex/structures.go b/ibm/flex/structures.go index 1164864179..41c407e251 100644 --- a/ibm/flex/structures.go +++ b/ibm/flex/structures.go @@ -129,6 +129,22 @@ func FlattenIntList(list []int) []interface{} { return vs } +func ExpandInt64List(input []interface{}) []int64 { + vs := make([]int64, len(input)) + for i, v := range input { + vs[i] = v.(int64) + } + return vs +} + +func FlattenInt64List(list []int64) []interface{} { + vs := make([]interface{}, len(list)) + for i, v := range list { + vs[i] = v + } + return vs +} + func NewStringSet(f schema.SchemaSetFunc, in []string) *schema.Set { var out = make([]interface{}, len(in), len(in)) for i, v := range in { @@ -3023,11 +3039,11 @@ func ResourceVolumeValidate(diff *schema.ResourceDiff) error { } } - if profile != "custom" { + if profile != "custom" && profile != "sdp" { if iops != 0 && diff.NewValueKnown("iops") && diff.HasChange("iops") { - return fmt.Errorf("VolumeError : iops is applicable for only custom volume profiles") + return fmt.Errorf("VolumeError : iops is applicable for only custom/sdp volume profiles") } - } else { + } else if profile != "sdp" { if capacity == 0 { capacity = int64(100) } diff --git a/ibm/provider/provider.go b/ibm/provider/provider.go index 1f857ee945..98d2505127 100644 --- a/ibm/provider/provider.go +++ b/ibm/provider/provider.go @@ -30,6 +30,7 @@ import ( "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cloudfoundry" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cloudshell" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/codeengine" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/configurationaggregator" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/contextbasedrestrictions" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cos" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/database" @@ -230,12 +231,15 @@ func Provider() *schema.Provider { }, DataSourcesMap: map[string]*schema.Resource{ - "ibm_api_gateway": apigateway.DataSourceIBMApiGateway(), - "ibm_account": cloudfoundry.DataSourceIBMAccount(), - "ibm_app": cloudfoundry.DataSourceIBMApp(), - "ibm_app_domain_private": cloudfoundry.DataSourceIBMAppDomainPrivate(), - "ibm_app_domain_shared": cloudfoundry.DataSourceIBMAppDomainShared(), - "ibm_app_route": cloudfoundry.DataSourceIBMAppRoute(), + "ibm_api_gateway": apigateway.DataSourceIBMApiGateway(), + "ibm_account": cloudfoundry.DataSourceIBMAccount(), + "ibm_app": cloudfoundry.DataSourceIBMApp(), + "ibm_app_domain_private": cloudfoundry.DataSourceIBMAppDomainPrivate(), + "ibm_app_domain_shared": cloudfoundry.DataSourceIBMAppDomainShared(), + "ibm_app_route": cloudfoundry.DataSourceIBMAppRoute(), + "ibm_config_aggregator_configurations": configurationaggregator.AddConfigurationAggregatorInstanceFields(configurationaggregator.DataSourceIbmConfigAggregatorConfigurations()), + "ibm_config_aggregator_settings": configurationaggregator.AddConfigurationAggregatorInstanceFields(configurationaggregator.DataSourceIbmConfigAggregatorSettings()), + "ibm_config_aggregator_resource_collection_status": configurationaggregator.AddConfigurationAggregatorInstanceFields(configurationaggregator.DataSourceIbmConfigAggregatorResourceCollectionStatus()), // // AppID "ibm_appid_action_url": appid.DataSourceIBMAppIDActionURL(), @@ -354,6 +358,7 @@ func Provider() *schema.Provider { "ibm_dns_secondary": classicinfrastructure.DataSourceIBMDNSSecondary(), "ibm_event_streams_topic": eventstreams.DataSourceIBMEventStreamsTopic(), "ibm_event_streams_schema": eventstreams.DataSourceIBMEventStreamsSchema(), + "ibm_event_streams_quota": eventstreams.DataSourceIBMEventStreamsQuota(), "ibm_hpcs": hpcs.DataSourceIBMHPCS(), "ibm_hpcs_managed_key": hpcs.DataSourceIbmManagedKey(), "ibm_hpcs_key_template": hpcs.DataSourceIbmKeyTemplate(), @@ -467,26 +472,32 @@ func Provider() *schema.Provider { "ibm_is_instance_network_interface_reserved_ip": vpc.DataSourceIBMISInstanceNICReservedIP(), "ibm_is_instance_network_interface_reserved_ips": vpc.DataSourceIBMISInstanceNICReservedIPs(), - "ibm_is_instance_volume_attachment": vpc.DataSourceIBMISInstanceVolumeAttachment(), - "ibm_is_instance_volume_attachments": vpc.DataSourceIBMISInstanceVolumeAttachments(), - "ibm_is_ipsec_policy": vpc.DataSourceIBMIsIpsecPolicy(), - "ibm_is_ipsec_policies": vpc.DataSourceIBMIsIpsecPolicies(), - "ibm_is_ike_policies": vpc.DataSourceIBMIsIkePolicies(), - "ibm_is_ike_policy": vpc.DataSourceIBMIsIkePolicy(), - "ibm_is_lb": vpc.DataSourceIBMISLB(), - "ibm_is_lb_listener": vpc.DataSourceIBMISLBListener(), - "ibm_is_lb_listeners": vpc.DataSourceIBMISLBListeners(), - "ibm_is_lb_listener_policies": vpc.DataSourceIBMISLBListenerPolicies(), - "ibm_is_lb_listener_policy": vpc.DataSourceIBMISLBListenerPolicy(), - "ibm_is_lb_listener_policy_rule": vpc.DataSourceIBMISLBListenerPolicyRule(), - "ibm_is_lb_listener_policy_rules": vpc.DataSourceIBMISLBListenerPolicyRules(), - "ibm_is_lb_pool": vpc.DataSourceIBMISLBPool(), - "ibm_is_lb_pools": vpc.DataSourceIBMISLBPools(), - "ibm_is_lb_pool_member": vpc.DataSourceIBMIBLBPoolMember(), - "ibm_is_lb_pool_members": vpc.DataSourceIBMISLBPoolMembers(), - "ibm_is_lb_profile": vpc.DataSourceIBMISLbProfile(), - "ibm_is_lb_profiles": vpc.DataSourceIBMISLbProfiles(), - "ibm_is_lbs": vpc.DataSourceIBMISLBS(), + "ibm_is_instance_volume_attachment": vpc.DataSourceIBMISInstanceVolumeAttachment(), + "ibm_is_instance_volume_attachments": vpc.DataSourceIBMISInstanceVolumeAttachments(), + "ibm_is_ipsec_policy": vpc.DataSourceIBMIsIpsecPolicy(), + "ibm_is_ipsec_policies": vpc.DataSourceIBMIsIpsecPolicies(), + "ibm_is_ike_policies": vpc.DataSourceIBMIsIkePolicies(), + "ibm_is_ike_policy": vpc.DataSourceIBMIsIkePolicy(), + "ibm_is_lb": vpc.DataSourceIBMISLB(), + "ibm_is_lb_listener": vpc.DataSourceIBMISLBListener(), + "ibm_is_lb_listeners": vpc.DataSourceIBMISLBListeners(), + "ibm_is_lb_listener_policies": vpc.DataSourceIBMISLBListenerPolicies(), + "ibm_is_lb_listener_policy": vpc.DataSourceIBMISLBListenerPolicy(), + "ibm_is_lb_listener_policy_rule": vpc.DataSourceIBMISLBListenerPolicyRule(), + "ibm_is_lb_listener_policy_rules": vpc.DataSourceIBMISLBListenerPolicyRules(), + "ibm_is_lb_pool": vpc.DataSourceIBMISLBPool(), + "ibm_is_lb_pools": vpc.DataSourceIBMISLBPools(), + "ibm_is_lb_pool_member": vpc.DataSourceIBMIBLBPoolMember(), + "ibm_is_lb_pool_members": vpc.DataSourceIBMISLBPoolMembers(), + "ibm_is_lb_profile": vpc.DataSourceIBMISLbProfile(), + "ibm_is_lb_profiles": vpc.DataSourceIBMISLbProfiles(), + "ibm_is_lbs": vpc.DataSourceIBMISLBS(), + "ibm_is_private_path_service_gateway": vpc.DataSourceIBMIsPrivatePathServiceGateway(), + "ibm_is_private_path_service_gateway_account_policy": vpc.DataSourceIBMIsPrivatePathServiceGatewayAccountPolicy(), + "ibm_is_private_path_service_gateway_account_policies": vpc.DataSourceIBMIsPrivatePathServiceGatewayAccountPolicies(), + "ibm_is_private_path_service_gateways": vpc.DataSourceIBMIsPrivatePathServiceGateways(), + "ibm_is_private_path_service_gateway_endpoint_gateway_binding": vpc.DataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBinding(), + "ibm_is_private_path_service_gateway_endpoint_gateway_bindings": vpc.DataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindings(), "ibm_is_public_gateway": vpc.DataSourceIBMISPublicGateway(), "ibm_is_public_gateways": vpc.DataSourceIBMISPublicGateways(), "ibm_is_region": vpc.DataSourceIBMISRegion(), @@ -651,7 +662,13 @@ func Provider() *schema.Provider { "ibm_pi_instances": power.DataSourceIBMPIInstances(), "ibm_pi_key": power.DataSourceIBMPIKey(), "ibm_pi_keys": power.DataSourceIBMPIKeys(), + "ibm_pi_network_address_group": power.DataSourceIBMPINetworkAddressGroup(), + "ibm_pi_network_address_groups": power.DataSourceIBMPINetworkAddressGroups(), + "ibm_pi_network_interface": power.DataSourceIBMPINetworkInterface(), + "ibm_pi_network_interfaces": power.DataSourceIBMPINetworkInterfaces(), "ibm_pi_network_port": power.DataSourceIBMPINetworkPort(), + "ibm_pi_network_security_group": power.DataSourceIBMPINetworkSecurityGroup(), + "ibm_pi_network_security_groups": power.DataSourceIBMPINetworkSecurityGroups(), "ibm_pi_network": power.DataSourceIBMPINetwork(), "ibm_pi_networks": power.DataSourceIBMPINetworks(), "ibm_pi_placement_group": power.DataSourceIBMPIPlacementGroup(), @@ -666,6 +683,7 @@ func Provider() *schema.Provider { "ibm_pi_spp_placement_groups": power.DataSourceIBMPISPPPlacementGroups(), "ibm_pi_storage_pool_capacity": power.DataSourceIBMPIStoragePoolCapacity(), "ibm_pi_storage_pools_capacity": power.DataSourceIBMPIStoragePoolsCapacity(), + "ibm_pi_storage_tiers": power.DataSourceIBMPIStorageTiers(), "ibm_pi_storage_type_capacity": power.DataSourceIBMPIStorageTypeCapacity(), "ibm_pi_storage_types_capacity": power.DataSourceIBMPIStorageTypesCapacity(), "ibm_pi_system_pools": power.DataSourceIBMPISystemPools(), @@ -681,6 +699,8 @@ func Provider() *schema.Provider { "ibm_pi_volume_onboarding": power.DataSourceIBMPIVolumeOnboarding(), "ibm_pi_volume_onboardings": power.DataSourceIBMPIVolumeOnboardings(), "ibm_pi_volume_remote_copy_relationship": power.DataSourceIBMPIVolumeRemoteCopyRelationship(), + "ibm_pi_volume_snapshot": power.DataSourceIBMPIVolumeSnapshot(), + "ibm_pi_volume_snapshots": power.DataSourceIBMPIVolumeSnapshots(), "ibm_pi_volume": power.DataSourceIBMPIVolume(), "ibm_pi_workspace": power.DatasourceIBMPIWorkspace(), "ibm_pi_workspaces": power.DatasourceIBMPIWorkspaces(), @@ -977,6 +997,7 @@ func Provider() *schema.Provider { "ibm_app_domain_private": cloudfoundry.ResourceIBMAppDomainPrivate(), "ibm_app_domain_shared": cloudfoundry.ResourceIBMAppDomainShared(), "ibm_app_route": cloudfoundry.ResourceIBMAppRoute(), + "ibm_config_aggregator_settings": configurationaggregator.AddConfigurationAggregatorInstanceFields(configurationaggregator.ResourceIbmConfigAggregatorSettings()), // AppID "ibm_appid_action_url": appid.ResourceIBMAppIDActionURL(), @@ -1102,6 +1123,7 @@ func Provider() *schema.Provider { "ibm_dns_record": classicinfrastructure.ResourceIBMDNSRecord(), "ibm_event_streams_topic": eventstreams.ResourceIBMEventStreamsTopic(), "ibm_event_streams_schema": eventstreams.ResourceIBMEventStreamsSchema(), + "ibm_event_streams_quota": eventstreams.ResourceIBMEventStreamsQuota(), "ibm_firewall": classicinfrastructure.ResourceIBMFirewall(), "ibm_firewall_policy": classicinfrastructure.ResourceIBMFirewallPolicy(), "ibm_hpcs": hpcs.ResourceIBMHPCS(), @@ -1155,125 +1177,130 @@ func Provider() *schema.Provider { "ibm_is_bare_metal_server_network_interface": vpc.ResourceIBMIsBareMetalServerNetworkInterface(), "ibm_is_bare_metal_server": vpc.ResourceIBMIsBareMetalServer(), - "ibm_is_dedicated_host": vpc.ResourceIbmIsDedicatedHost(), - "ibm_is_dedicated_host_group": vpc.ResourceIbmIsDedicatedHostGroup(), - "ibm_is_dedicated_host_disk_management": vpc.ResourceIBMISDedicatedHostDiskManagement(), - "ibm_is_placement_group": vpc.ResourceIbmIsPlacementGroup(), - "ibm_is_floating_ip": vpc.ResourceIBMISFloatingIP(), - "ibm_is_flow_log": vpc.ResourceIBMISFlowLog(), - "ibm_is_instance": vpc.ResourceIBMISInstance(), - "ibm_is_instance_action": vpc.ResourceIBMISInstanceAction(), - "ibm_is_instance_network_attachment": vpc.ResourceIBMIsInstanceNetworkAttachment(), - "ibm_is_instance_network_interface": vpc.ResourceIBMIsInstanceNetworkInterface(), - "ibm_is_instance_network_interface_floating_ip": vpc.ResourceIBMIsInstanceNetworkInterfaceFloatingIp(), - "ibm_is_instance_disk_management": vpc.ResourceIBMISInstanceDiskManagement(), - "ibm_is_instance_group": vpc.ResourceIBMISInstanceGroup(), - "ibm_is_instance_group_membership": vpc.ResourceIBMISInstanceGroupMembership(), - "ibm_is_instance_group_manager": vpc.ResourceIBMISInstanceGroupManager(), - "ibm_is_instance_group_manager_policy": vpc.ResourceIBMISInstanceGroupManagerPolicy(), - "ibm_is_instance_group_manager_action": vpc.ResourceIBMISInstanceGroupManagerAction(), - "ibm_is_instance_volume_attachment": vpc.ResourceIBMISInstanceVolumeAttachment(), - "ibm_is_virtual_endpoint_gateway": vpc.ResourceIBMISEndpointGateway(), - "ibm_is_virtual_endpoint_gateway_ip": vpc.ResourceIBMISEndpointGatewayIP(), - "ibm_is_instance_template": vpc.ResourceIBMISInstanceTemplate(), - "ibm_is_ike_policy": vpc.ResourceIBMISIKEPolicy(), - "ibm_is_ipsec_policy": vpc.ResourceIBMISIPSecPolicy(), - "ibm_is_lb": vpc.ResourceIBMISLB(), - "ibm_is_lb_listener": vpc.ResourceIBMISLBListener(), - "ibm_is_lb_listener_policy": vpc.ResourceIBMISLBListenerPolicy(), - "ibm_is_lb_listener_policy_rule": vpc.ResourceIBMISLBListenerPolicyRule(), - "ibm_is_lb_pool": vpc.ResourceIBMISLBPool(), - "ibm_is_lb_pool_member": vpc.ResourceIBMISLBPoolMember(), - "ibm_is_network_acl": vpc.ResourceIBMISNetworkACL(), - "ibm_is_network_acl_rule": vpc.ResourceIBMISNetworkACLRule(), - "ibm_is_public_gateway": vpc.ResourceIBMISPublicGateway(), - "ibm_is_security_group": vpc.ResourceIBMISSecurityGroup(), - "ibm_is_security_group_rule": vpc.ResourceIBMISSecurityGroupRule(), - "ibm_is_security_group_target": vpc.ResourceIBMISSecurityGroupTarget(), - "ibm_is_share": vpc.ResourceIbmIsShare(), - "ibm_is_share_replica_operations": vpc.ResourceIbmIsShareReplicaOperations(), - "ibm_is_share_mount_target": vpc.ResourceIBMIsShareMountTarget(), - "ibm_is_share_delete_accessor_binding": vpc.ResourceIbmIsShareDeleteAccessorBinding(), - "ibm_is_subnet": vpc.ResourceIBMISSubnet(), - "ibm_is_reservation": vpc.ResourceIBMISReservation(), - "ibm_is_reservation_activate": vpc.ResourceIBMISReservationActivate(), - "ibm_is_subnet_reserved_ip": vpc.ResourceIBMISReservedIP(), - "ibm_is_subnet_reserved_ip_patch": vpc.ResourceIBMISReservedIPPatch(), - "ibm_is_subnet_network_acl_attachment": vpc.ResourceIBMISSubnetNetworkACLAttachment(), - "ibm_is_subnet_public_gateway_attachment": vpc.ResourceIBMISSubnetPublicGatewayAttachment(), - "ibm_is_subnet_routing_table_attachment": vpc.ResourceIBMISSubnetRoutingTableAttachment(), - "ibm_is_ssh_key": vpc.ResourceIBMISSSHKey(), - "ibm_is_snapshot": vpc.ResourceIBMSnapshot(), - "ibm_is_virtual_network_interface": vpc.ResourceIBMIsVirtualNetworkInterface(), - "ibm_is_virtual_network_interface_floating_ip": vpc.ResourceIBMIsVirtualNetworkInterfaceFloatingIP(), - "ibm_is_virtual_network_interface_ip": vpc.ResourceIBMIsVirtualNetworkInterfaceIP(), - "ibm_is_snapshot_consistency_group": vpc.ResourceIBMIsSnapshotConsistencyGroup(), - "ibm_is_volume": vpc.ResourceIBMISVolume(), - "ibm_is_vpn_gateway": vpc.ResourceIBMISVPNGateway(), - "ibm_is_vpn_gateway_connection": vpc.ResourceIBMISVPNGatewayConnection(), - "ibm_is_vpc": vpc.ResourceIBMISVPC(), - "ibm_is_vpc_address_prefix": vpc.ResourceIBMISVpcAddressPrefix(), - "ibm_is_vpc_dns_resolution_binding": vpc.ResourceIBMIsVPCDnsResolutionBinding(), - "ibm_is_vpc_routing_table": vpc.ResourceIBMISVPCRoutingTable(), - "ibm_is_vpc_routing_table_route": vpc.ResourceIBMISVPCRoutingTableRoute(), - "ibm_is_vpn_server": vpc.ResourceIBMIsVPNServer(), - "ibm_is_vpn_server_client": vpc.ResourceIBMIsVPNServerClient(), - "ibm_is_vpn_server_route": vpc.ResourceIBMIsVPNServerRoute(), - "ibm_is_image": vpc.ResourceIBMISImage(), - "ibm_is_image_deprecate": vpc.ResourceIBMISImageDeprecate(), - "ibm_is_image_export_job": vpc.ResourceIBMIsImageExportJob(), - "ibm_is_image_obsolete": vpc.ResourceIBMISImageObsolete(), - "ibm_lb": classicinfrastructure.ResourceIBMLb(), - "ibm_lbaas": classicinfrastructure.ResourceIBMLbaas(), - "ibm_lbaas_health_monitor": classicinfrastructure.ResourceIBMLbaasHealthMonitor(), - "ibm_lbaas_server_instance_attachment": classicinfrastructure.ResourceIBMLbaasServerInstanceAttachment(), - "ibm_lb_service": classicinfrastructure.ResourceIBMLbService(), - "ibm_lb_service_group": classicinfrastructure.ResourceIBMLbServiceGroup(), - "ibm_lb_vpx": classicinfrastructure.ResourceIBMLbVpx(), - "ibm_lb_vpx_ha": classicinfrastructure.ResourceIBMLbVpxHa(), - "ibm_lb_vpx_service": classicinfrastructure.ResourceIBMLbVpxService(), - "ibm_lb_vpx_vip": classicinfrastructure.ResourceIBMLbVpxVip(), - "ibm_multi_vlan_firewall": classicinfrastructure.ResourceIBMMultiVlanFirewall(), - "ibm_network_gateway": classicinfrastructure.ResourceIBMNetworkGateway(), - "ibm_network_gateway_vlan_association": classicinfrastructure.ResourceIBMNetworkGatewayVlanAttachment(), - "ibm_network_interface_sg_attachment": classicinfrastructure.ResourceIBMNetworkInterfaceSGAttachment(), - "ibm_network_public_ip": classicinfrastructure.ResourceIBMNetworkPublicIp(), - "ibm_network_vlan": classicinfrastructure.ResourceIBMNetworkVlan(), - "ibm_network_vlan_spanning": classicinfrastructure.ResourceIBMNetworkVlanSpan(), - "ibm_object_storage_account": classicinfrastructure.ResourceIBMObjectStorageAccount(), - "ibm_org": cloudfoundry.ResourceIBMOrg(), - "ibm_pn_application_chrome": pushnotification.ResourceIBMPNApplicationChrome(), - "ibm_app_config_environment": appconfiguration.ResourceIBMAppConfigEnvironment(), - "ibm_app_config_collection": appconfiguration.ResourceIBMAppConfigCollection(), - "ibm_app_config_feature": appconfiguration.ResourceIBMIbmAppConfigFeature(), - "ibm_app_config_property": appconfiguration.ResourceIBMIbmAppConfigProperty(), - "ibm_app_config_segment": appconfiguration.ResourceIBMIbmAppConfigSegment(), - "ibm_app_config_snapshot": appconfiguration.ResourceIBMIbmAppConfigSnapshot(), - "ibm_kms_key": kms.ResourceIBMKmskey(), - "ibm_kms_key_with_policy_overrides": kms.ResourceIBMKmsKeyWithPolicyOverrides(), - "ibm_kms_key_alias": kms.ResourceIBMKmskeyAlias(), - "ibm_kms_key_rings": kms.ResourceIBMKmskeyRings(), - "ibm_kms_key_policies": kms.ResourceIBMKmskeyPolicies(), - "ibm_kp_key": kms.ResourceIBMkey(), - "ibm_kms_instance_policies": kms.ResourceIBMKmsInstancePolicy(), - "ibm_kms_kmip_adapter": kms.ResourceIBMKmsKMIPAdapter(), - "ibm_kms_kmip_client_cert": kms.ResourceIBMKmsKMIPClientCertificate(), - "ibm_resource_group": resourcemanager.ResourceIBMResourceGroup(), - "ibm_resource_instance": resourcecontroller.ResourceIBMResourceInstance(), - "ibm_resource_key": resourcecontroller.ResourceIBMResourceKey(), - "ibm_security_group": classicinfrastructure.ResourceIBMSecurityGroup(), - "ibm_security_group_rule": classicinfrastructure.ResourceIBMSecurityGroupRule(), - "ibm_service_instance": cloudfoundry.ResourceIBMServiceInstance(), - "ibm_service_key": cloudfoundry.ResourceIBMServiceKey(), - "ibm_space": cloudfoundry.ResourceIBMSpace(), - "ibm_storage_evault": classicinfrastructure.ResourceIBMStorageEvault(), - "ibm_storage_block": classicinfrastructure.ResourceIBMStorageBlock(), - "ibm_storage_file": classicinfrastructure.ResourceIBMStorageFile(), - "ibm_subnet": classicinfrastructure.ResourceIBMSubnet(), - "ibm_dns_reverse_record": classicinfrastructure.ResourceIBMDNSReverseRecord(), - "ibm_ssl_certificate": classicinfrastructure.ResourceIBMSSLCertificate(), - "ibm_cdn": classicinfrastructure.ResourceIBMCDN(), - "ibm_hardware_firewall_shared": classicinfrastructure.ResourceIBMFirewallShared(), + "ibm_is_dedicated_host": vpc.ResourceIbmIsDedicatedHost(), + "ibm_is_dedicated_host_group": vpc.ResourceIbmIsDedicatedHostGroup(), + "ibm_is_dedicated_host_disk_management": vpc.ResourceIBMISDedicatedHostDiskManagement(), + "ibm_is_placement_group": vpc.ResourceIbmIsPlacementGroup(), + "ibm_is_floating_ip": vpc.ResourceIBMISFloatingIP(), + "ibm_is_flow_log": vpc.ResourceIBMISFlowLog(), + "ibm_is_instance": vpc.ResourceIBMISInstance(), + "ibm_is_instance_action": vpc.ResourceIBMISInstanceAction(), + "ibm_is_instance_network_attachment": vpc.ResourceIBMIsInstanceNetworkAttachment(), + "ibm_is_instance_network_interface": vpc.ResourceIBMIsInstanceNetworkInterface(), + "ibm_is_instance_network_interface_floating_ip": vpc.ResourceIBMIsInstanceNetworkInterfaceFloatingIp(), + "ibm_is_instance_disk_management": vpc.ResourceIBMISInstanceDiskManagement(), + "ibm_is_instance_group": vpc.ResourceIBMISInstanceGroup(), + "ibm_is_instance_group_membership": vpc.ResourceIBMISInstanceGroupMembership(), + "ibm_is_instance_group_manager": vpc.ResourceIBMISInstanceGroupManager(), + "ibm_is_instance_group_manager_policy": vpc.ResourceIBMISInstanceGroupManagerPolicy(), + "ibm_is_instance_group_manager_action": vpc.ResourceIBMISInstanceGroupManagerAction(), + "ibm_is_instance_volume_attachment": vpc.ResourceIBMISInstanceVolumeAttachment(), + "ibm_is_virtual_endpoint_gateway": vpc.ResourceIBMISEndpointGateway(), + "ibm_is_virtual_endpoint_gateway_ip": vpc.ResourceIBMISEndpointGatewayIP(), + "ibm_is_instance_template": vpc.ResourceIBMISInstanceTemplate(), + "ibm_is_ike_policy": vpc.ResourceIBMISIKEPolicy(), + "ibm_is_ipsec_policy": vpc.ResourceIBMISIPSecPolicy(), + "ibm_is_lb": vpc.ResourceIBMISLB(), + "ibm_is_lb_listener": vpc.ResourceIBMISLBListener(), + "ibm_is_lb_listener_policy": vpc.ResourceIBMISLBListenerPolicy(), + "ibm_is_lb_listener_policy_rule": vpc.ResourceIBMISLBListenerPolicyRule(), + "ibm_is_lb_pool": vpc.ResourceIBMISLBPool(), + "ibm_is_lb_pool_member": vpc.ResourceIBMISLBPoolMember(), + "ibm_is_network_acl": vpc.ResourceIBMISNetworkACL(), + "ibm_is_network_acl_rule": vpc.ResourceIBMISNetworkACLRule(), + "ibm_is_public_gateway": vpc.ResourceIBMISPublicGateway(), + "ibm_is_private_path_service_gateway_account_policy": vpc.ResourceIBMIsPrivatePathServiceGatewayAccountPolicy(), + "ibm_is_private_path_service_gateway": vpc.ResourceIBMIsPrivatePathServiceGateway(), + "ibm_is_private_path_service_gateway_revoke_account": vpc.ResourceIBMIsPrivatePathServiceGatewayRevokeAccount(), + "ibm_is_private_path_service_gateway_endpoint_gateway_binding_operations": vpc.ResourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperations(), + "ibm_is_private_path_service_gateway_operations": vpc.ResourceIBMIsPrivatePathServiceGatewayOperations(), + "ibm_is_security_group": vpc.ResourceIBMISSecurityGroup(), + "ibm_is_security_group_rule": vpc.ResourceIBMISSecurityGroupRule(), + "ibm_is_security_group_target": vpc.ResourceIBMISSecurityGroupTarget(), + "ibm_is_share": vpc.ResourceIbmIsShare(), + "ibm_is_share_replica_operations": vpc.ResourceIbmIsShareReplicaOperations(), + "ibm_is_share_mount_target": vpc.ResourceIBMIsShareMountTarget(), + "ibm_is_share_delete_accessor_binding": vpc.ResourceIbmIsShareDeleteAccessorBinding(), + "ibm_is_subnet": vpc.ResourceIBMISSubnet(), + "ibm_is_reservation": vpc.ResourceIBMISReservation(), + "ibm_is_reservation_activate": vpc.ResourceIBMISReservationActivate(), + "ibm_is_subnet_reserved_ip": vpc.ResourceIBMISReservedIP(), + "ibm_is_subnet_reserved_ip_patch": vpc.ResourceIBMISReservedIPPatch(), + "ibm_is_subnet_network_acl_attachment": vpc.ResourceIBMISSubnetNetworkACLAttachment(), + "ibm_is_subnet_public_gateway_attachment": vpc.ResourceIBMISSubnetPublicGatewayAttachment(), + "ibm_is_subnet_routing_table_attachment": vpc.ResourceIBMISSubnetRoutingTableAttachment(), + "ibm_is_ssh_key": vpc.ResourceIBMISSSHKey(), + "ibm_is_snapshot": vpc.ResourceIBMSnapshot(), + "ibm_is_virtual_network_interface": vpc.ResourceIBMIsVirtualNetworkInterface(), + "ibm_is_virtual_network_interface_floating_ip": vpc.ResourceIBMIsVirtualNetworkInterfaceFloatingIP(), + "ibm_is_virtual_network_interface_ip": vpc.ResourceIBMIsVirtualNetworkInterfaceIP(), + "ibm_is_snapshot_consistency_group": vpc.ResourceIBMIsSnapshotConsistencyGroup(), + "ibm_is_volume": vpc.ResourceIBMISVolume(), + "ibm_is_vpn_gateway": vpc.ResourceIBMISVPNGateway(), + "ibm_is_vpn_gateway_connection": vpc.ResourceIBMISVPNGatewayConnection(), + "ibm_is_vpc": vpc.ResourceIBMISVPC(), + "ibm_is_vpc_address_prefix": vpc.ResourceIBMISVpcAddressPrefix(), + "ibm_is_vpc_dns_resolution_binding": vpc.ResourceIBMIsVPCDnsResolutionBinding(), + "ibm_is_vpc_routing_table": vpc.ResourceIBMISVPCRoutingTable(), + "ibm_is_vpc_routing_table_route": vpc.ResourceIBMISVPCRoutingTableRoute(), + "ibm_is_vpn_server": vpc.ResourceIBMIsVPNServer(), + "ibm_is_vpn_server_client": vpc.ResourceIBMIsVPNServerClient(), + "ibm_is_vpn_server_route": vpc.ResourceIBMIsVPNServerRoute(), + "ibm_is_image": vpc.ResourceIBMISImage(), + "ibm_is_image_deprecate": vpc.ResourceIBMISImageDeprecate(), + "ibm_is_image_export_job": vpc.ResourceIBMIsImageExportJob(), + "ibm_is_image_obsolete": vpc.ResourceIBMISImageObsolete(), + "ibm_lb": classicinfrastructure.ResourceIBMLb(), + "ibm_lbaas": classicinfrastructure.ResourceIBMLbaas(), + "ibm_lbaas_health_monitor": classicinfrastructure.ResourceIBMLbaasHealthMonitor(), + "ibm_lbaas_server_instance_attachment": classicinfrastructure.ResourceIBMLbaasServerInstanceAttachment(), + "ibm_lb_service": classicinfrastructure.ResourceIBMLbService(), + "ibm_lb_service_group": classicinfrastructure.ResourceIBMLbServiceGroup(), + "ibm_lb_vpx": classicinfrastructure.ResourceIBMLbVpx(), + "ibm_lb_vpx_ha": classicinfrastructure.ResourceIBMLbVpxHa(), + "ibm_lb_vpx_service": classicinfrastructure.ResourceIBMLbVpxService(), + "ibm_lb_vpx_vip": classicinfrastructure.ResourceIBMLbVpxVip(), + "ibm_multi_vlan_firewall": classicinfrastructure.ResourceIBMMultiVlanFirewall(), + "ibm_network_gateway": classicinfrastructure.ResourceIBMNetworkGateway(), + "ibm_network_gateway_vlan_association": classicinfrastructure.ResourceIBMNetworkGatewayVlanAttachment(), + "ibm_network_interface_sg_attachment": classicinfrastructure.ResourceIBMNetworkInterfaceSGAttachment(), + "ibm_network_public_ip": classicinfrastructure.ResourceIBMNetworkPublicIp(), + "ibm_network_vlan": classicinfrastructure.ResourceIBMNetworkVlan(), + "ibm_network_vlan_spanning": classicinfrastructure.ResourceIBMNetworkVlanSpan(), + "ibm_object_storage_account": classicinfrastructure.ResourceIBMObjectStorageAccount(), + "ibm_org": cloudfoundry.ResourceIBMOrg(), + "ibm_pn_application_chrome": pushnotification.ResourceIBMPNApplicationChrome(), + "ibm_app_config_environment": appconfiguration.ResourceIBMAppConfigEnvironment(), + "ibm_app_config_collection": appconfiguration.ResourceIBMAppConfigCollection(), + "ibm_app_config_feature": appconfiguration.ResourceIBMIbmAppConfigFeature(), + "ibm_app_config_property": appconfiguration.ResourceIBMIbmAppConfigProperty(), + "ibm_app_config_segment": appconfiguration.ResourceIBMIbmAppConfigSegment(), + "ibm_app_config_snapshot": appconfiguration.ResourceIBMIbmAppConfigSnapshot(), + "ibm_kms_key": kms.ResourceIBMKmskey(), + "ibm_kms_key_with_policy_overrides": kms.ResourceIBMKmsKeyWithPolicyOverrides(), + "ibm_kms_key_alias": kms.ResourceIBMKmskeyAlias(), + "ibm_kms_key_rings": kms.ResourceIBMKmskeyRings(), + "ibm_kms_key_policies": kms.ResourceIBMKmskeyPolicies(), + "ibm_kp_key": kms.ResourceIBMkey(), + "ibm_kms_instance_policies": kms.ResourceIBMKmsInstancePolicy(), + "ibm_kms_kmip_adapter": kms.ResourceIBMKmsKMIPAdapter(), + "ibm_kms_kmip_client_cert": kms.ResourceIBMKmsKMIPClientCertificate(), + "ibm_resource_group": resourcemanager.ResourceIBMResourceGroup(), + "ibm_resource_instance": resourcecontroller.ResourceIBMResourceInstance(), + "ibm_resource_key": resourcecontroller.ResourceIBMResourceKey(), + "ibm_security_group": classicinfrastructure.ResourceIBMSecurityGroup(), + "ibm_security_group_rule": classicinfrastructure.ResourceIBMSecurityGroupRule(), + "ibm_service_instance": cloudfoundry.ResourceIBMServiceInstance(), + "ibm_service_key": cloudfoundry.ResourceIBMServiceKey(), + "ibm_space": cloudfoundry.ResourceIBMSpace(), + "ibm_storage_evault": classicinfrastructure.ResourceIBMStorageEvault(), + "ibm_storage_block": classicinfrastructure.ResourceIBMStorageBlock(), + "ibm_storage_file": classicinfrastructure.ResourceIBMStorageFile(), + "ibm_subnet": classicinfrastructure.ResourceIBMSubnet(), + "ibm_dns_reverse_record": classicinfrastructure.ResourceIBMDNSReverseRecord(), + "ibm_ssl_certificate": classicinfrastructure.ResourceIBMSSLCertificate(), + "ibm_cdn": classicinfrastructure.ResourceIBMCDN(), + "ibm_hardware_firewall_shared": classicinfrastructure.ResourceIBMFirewallShared(), // Partner Center Sell "ibm_onboarding_registration": partnercentersell.ResourceIbmOnboardingRegistration(), @@ -1299,7 +1326,14 @@ func Provider() *schema.Provider { "ibm_pi_instance": power.ResourceIBMPIInstance(), "ibm_pi_ipsec_policy": power.ResourceIBMPIIPSecPolicy(), "ibm_pi_key": power.ResourceIBMPIKey(), + "ibm_pi_network_address_group_member": power.ResourceIBMPINetworkAddressGroupMember(), + "ibm_pi_network_address_group": power.ResourceIBMPINetworkAddressGroup(), + "ibm_pi_network_interface": power.ResourceIBMPINetworkInterface(), "ibm_pi_network_port_attach": power.ResourceIBMPINetworkPortAttach(), + "ibm_pi_network_security_group_action": power.ResourceIBMPINetworkSecurityGroupAction(), + "ibm_pi_network_security_group_member": power.ResourceIBMPINetworkSecurityGroupMember(), + "ibm_pi_network_security_group_rule": power.ResourceIBMPINetworkSecurityGroupRule(), + "ibm_pi_network_security_group": power.ResourceIBMPINetworkSecurityGroup(), "ibm_pi_network": power.ResourceIBMPINetwork(), "ibm_pi_placement_group": power.ResourceIBMPIPlacementGroup(), "ibm_pi_shared_processor_pool": power.ResourceIBMPISharedProcessorPool(), @@ -1818,6 +1852,7 @@ func Validator() validate.ValidatorDict { "ibm_hpcs_keystore": hpcs.ResourceIbmKeystoreValidator(), "ibm_hpcs_key_template": hpcs.ResourceIbmKeyTemplateValidator(), "ibm_hpcs_vault": hpcs.ResourceIbmVaultValidator(), + "ibm_config_aggregator_settings": configurationaggregator.ResourceIbmConfigAggregatorSettingsValidator(), // MQ on Cloud "ibm_mqcloud_queue_manager": mqcloud.ResourceIbmMqcloudQueueManagerValidator(), @@ -1835,85 +1870,87 @@ func Validator() validate.ValidatorDict { "ibm_is_bare_metal_server_network_interface": vpc.ResourceIBMIsBareMetalServerNetworkInterfaceValidator(), "ibm_is_bare_metal_server": vpc.ResourceIBMIsBareMetalServerValidator(), - "ibm_is_dedicated_host_group": vpc.ResourceIbmIsDedicatedHostGroupValidator(), - "ibm_is_dedicated_host": vpc.ResourceIbmIsDedicatedHostValidator(), - "ibm_is_dedicated_host_disk_management": vpc.ResourceIBMISDedicatedHostDiskManagementValidator(), - "ibm_is_flow_log": vpc.ResourceIBMISFlowLogValidator(), - "ibm_is_instance_group": vpc.ResourceIBMISInstanceGroupValidator(), - "ibm_is_instance_group_membership": vpc.ResourceIBMISInstanceGroupMembershipValidator(), - "ibm_is_instance_group_manager": vpc.ResourceIBMISInstanceGroupManagerValidator(), - "ibm_is_instance_group_manager_policy": vpc.ResourceIBMISInstanceGroupManagerPolicyValidator(), - "ibm_is_instance_group_manager_action": vpc.ResourceIBMISInstanceGroupManagerActionValidator(), - "ibm_is_floating_ip": vpc.ResourceIBMISFloatingIPValidator(), - "ibm_is_ike_policy": vpc.ResourceIBMISIKEValidator(), - "ibm_is_image": vpc.ResourceIBMISImageValidator(), - "ibm_is_image_export_job": vpc.ResourceIBMIsImageExportValidator(), - "ibm_is_instance_template": vpc.ResourceIBMISInstanceTemplateValidator(), - "ibm_is_instance": vpc.ResourceIBMISInstanceValidator(), - "ibm_is_instance_action": vpc.ResourceIBMISInstanceActionValidator(), - "ibm_is_instance_network_attachment": vpc.ResourceIBMIsInstanceNetworkAttachmentValidator(), - "ibm_is_instance_network_interface": vpc.ResourceIBMIsInstanceNetworkInterfaceValidator(), - "ibm_is_instance_disk_management": vpc.ResourceIBMISInstanceDiskManagementValidator(), - "ibm_is_instance_volume_attachment": vpc.ResourceIBMISInstanceVolumeAttachmentValidator(), - "ibm_is_ipsec_policy": vpc.ResourceIBMISIPSECValidator(), - "ibm_is_lb_listener_policy_rule": vpc.ResourceIBMISLBListenerPolicyRuleValidator(), - "ibm_is_lb_listener_policy": vpc.ResourceIBMISLBListenerPolicyValidator(), - "ibm_is_lb_listener": vpc.ResourceIBMISLBListenerValidator(), - "ibm_is_lb_pool_member": vpc.ResourceIBMISLBPoolMemberValidator(), - "ibm_is_lb_pool": vpc.ResourceIBMISLBPoolValidator(), - "ibm_is_lb": vpc.ResourceIBMISLBValidator(), - "ibm_is_network_acl": vpc.ResourceIBMISNetworkACLValidator(), - "ibm_is_network_acl_rule": vpc.ResourceIBMISNetworkACLRuleValidator(), - "ibm_is_public_gateway": vpc.ResourceIBMISPublicGatewayValidator(), - "ibm_is_placement_group": vpc.ResourceIbmIsPlacementGroupValidator(), - "ibm_is_security_group_target": vpc.ResourceIBMISSecurityGroupTargetValidator(), - "ibm_is_security_group_rule": vpc.ResourceIBMISSecurityGroupRuleValidator(), - "ibm_is_security_group": vpc.ResourceIBMISSecurityGroupValidator(), - "ibm_is_share": vpc.ResourceIbmIsShareValidator(), - "ibm_is_share_replica_operations": vpc.ResourceIbmIsShareReplicaOperationsValidator(), - "ibm_is_share_mount_target": vpc.ResourceIBMIsShareMountTargetValidator(), - "ibm_is_snapshot": vpc.ResourceIBMISSnapshotValidator(), - "ibm_is_snapshot_consistency_group": vpc.ResourceIBMIsSnapshotConsistencyGroupValidator(), - "ibm_is_ssh_key": vpc.ResourceIBMISSHKeyValidator(), - "ibm_is_subnet": vpc.ResourceIBMISSubnetValidator(), - "ibm_is_subnet_reserved_ip": vpc.ResourceIBMISSubnetReservedIPValidator(), - "ibm_is_volume": vpc.ResourceIBMISVolumeValidator(), - "ibm_is_virtual_network_interface": vpc.ResourceIBMIsVirtualNetworkInterfaceValidator(), - "ibm_is_address_prefix": vpc.ResourceIBMISAddressPrefixValidator(), - "ibm_is_vpc": vpc.ResourceIBMISVPCValidator(), - "ibm_is_vpc_routing_table": vpc.ResourceIBMISVPCRoutingTableValidator(), - "ibm_is_vpc_routing_table_route": vpc.ResourceIBMISVPCRoutingTableRouteValidator(), - "ibm_is_vpn_gateway_connection": vpc.ResourceIBMISVPNGatewayConnectionValidator(), - "ibm_is_vpn_gateway": vpc.ResourceIBMISVPNGatewayValidator(), - "ibm_is_vpn_server": vpc.ResourceIBMIsVPNServerValidator(), - "ibm_is_vpn_server_route": vpc.ResourceIBMIsVPNServerRouteValidator(), - "ibm_is_reservation": vpc.ResourceIBMISReservationValidator(), - "ibm_kms_key_rings": kms.ResourceIBMKeyRingValidator(), - "ibm_dns_glb_monitor": dnsservices.ResourceIBMPrivateDNSGLBMonitorValidator(), - "ibm_dns_custom_resolver_forwarding_rule": dnsservices.ResourceIBMPrivateDNSForwardingRuleValidator(), - "ibm_schematics_action": schematics.ResourceIBMSchematicsActionValidator(), - "ibm_schematics_job": schematics.ResourceIBMSchematicsJobValidator(), - "ibm_schematics_workspace": schematics.ResourceIBMSchematicsWorkspaceValidator(), - "ibm_schematics_inventory": schematics.ResourceIBMSchematicsInventoryValidator(), - "ibm_schematics_resource_query": schematics.ResourceIBMSchematicsResourceQueryValidator(), - "ibm_schematics_policy": schematics.ResourceIbmSchematicsPolicyValidator(), - "ibm_resource_instance": resourcecontroller.ResourceIBMResourceInstanceValidator(), - "ibm_resource_key": resourcecontroller.ResourceIBMResourceKeyValidator(), - "ibm_is_virtual_endpoint_gateway": vpc.ResourceIBMISEndpointGatewayValidator(), - "ibm_resource_tag": globaltagging.ResourceIBMResourceTagValidator(), - "ibm_resource_access_tag": globaltagging.ResourceIBMResourceAccessTagValidator(), - "ibm_iam_access_tag": globaltagging.ResourceIBMIamAccessTagValidator(), - "ibm_satellite_location": satellite.ResourceIBMSatelliteLocationValidator(), - "ibm_satellite_cluster": satellite.ResourceIBMSatelliteClusterValidator(), - "ibm_pi_volume": power.ResourceIBMPIVolumeValidator(), - "ibm_atracker_target": atracker.ResourceIBMAtrackerTargetValidator(), - "ibm_atracker_route": atracker.ResourceIBMAtrackerRouteValidator(), - "ibm_atracker_settings": atracker.ResourceIBMAtrackerSettingsValidator(), - "ibm_metrics_router_target": metricsrouter.ResourceIBMMetricsRouterTargetValidator(), - "ibm_metrics_router_route": metricsrouter.ResourceIBMMetricsRouterRouteValidator(), - "ibm_metrics_router_settings": metricsrouter.ResourceIBMMetricsRouterSettingsValidator(), - "ibm_satellite_endpoint": satellite.ResourceIBMSatelliteEndpointValidator(), - "ibm_satellite_host": satellite.ResourceIBMSatelliteHostValidator(), + "ibm_is_dedicated_host_group": vpc.ResourceIbmIsDedicatedHostGroupValidator(), + "ibm_is_dedicated_host": vpc.ResourceIbmIsDedicatedHostValidator(), + "ibm_is_dedicated_host_disk_management": vpc.ResourceIBMISDedicatedHostDiskManagementValidator(), + "ibm_is_flow_log": vpc.ResourceIBMISFlowLogValidator(), + "ibm_is_instance_group": vpc.ResourceIBMISInstanceGroupValidator(), + "ibm_is_instance_group_membership": vpc.ResourceIBMISInstanceGroupMembershipValidator(), + "ibm_is_instance_group_manager": vpc.ResourceIBMISInstanceGroupManagerValidator(), + "ibm_is_instance_group_manager_policy": vpc.ResourceIBMISInstanceGroupManagerPolicyValidator(), + "ibm_is_instance_group_manager_action": vpc.ResourceIBMISInstanceGroupManagerActionValidator(), + "ibm_is_floating_ip": vpc.ResourceIBMISFloatingIPValidator(), + "ibm_is_ike_policy": vpc.ResourceIBMISIKEValidator(), + "ibm_is_image": vpc.ResourceIBMISImageValidator(), + "ibm_is_image_export_job": vpc.ResourceIBMIsImageExportValidator(), + "ibm_is_instance_template": vpc.ResourceIBMISInstanceTemplateValidator(), + "ibm_is_instance": vpc.ResourceIBMISInstanceValidator(), + "ibm_is_instance_action": vpc.ResourceIBMISInstanceActionValidator(), + "ibm_is_instance_network_attachment": vpc.ResourceIBMIsInstanceNetworkAttachmentValidator(), + "ibm_is_instance_network_interface": vpc.ResourceIBMIsInstanceNetworkInterfaceValidator(), + "ibm_is_instance_disk_management": vpc.ResourceIBMISInstanceDiskManagementValidator(), + "ibm_is_instance_volume_attachment": vpc.ResourceIBMISInstanceVolumeAttachmentValidator(), + "ibm_is_ipsec_policy": vpc.ResourceIBMISIPSECValidator(), + "ibm_is_lb_listener_policy_rule": vpc.ResourceIBMISLBListenerPolicyRuleValidator(), + "ibm_is_lb_listener_policy": vpc.ResourceIBMISLBListenerPolicyValidator(), + "ibm_is_lb_listener": vpc.ResourceIBMISLBListenerValidator(), + "ibm_is_lb_pool_member": vpc.ResourceIBMISLBPoolMemberValidator(), + "ibm_is_lb_pool": vpc.ResourceIBMISLBPoolValidator(), + "ibm_is_lb": vpc.ResourceIBMISLBValidator(), + "ibm_is_network_acl": vpc.ResourceIBMISNetworkACLValidator(), + "ibm_is_network_acl_rule": vpc.ResourceIBMISNetworkACLRuleValidator(), + "ibm_is_public_gateway": vpc.ResourceIBMISPublicGatewayValidator(), + "ibm_is_private_path_service_gateway": vpc.ResourceIBMIsPrivatePathServiceGatewayValidator(), + "ibm_is_private_path_service_gateway_account_policy": vpc.ResourceIBMIsPrivatePathServiceGatewayAccountPolicyValidator(), + "ibm_is_placement_group": vpc.ResourceIbmIsPlacementGroupValidator(), + "ibm_is_security_group_target": vpc.ResourceIBMISSecurityGroupTargetValidator(), + "ibm_is_security_group_rule": vpc.ResourceIBMISSecurityGroupRuleValidator(), + "ibm_is_security_group": vpc.ResourceIBMISSecurityGroupValidator(), + "ibm_is_share": vpc.ResourceIbmIsShareValidator(), + "ibm_is_share_replica_operations": vpc.ResourceIbmIsShareReplicaOperationsValidator(), + "ibm_is_share_mount_target": vpc.ResourceIBMIsShareMountTargetValidator(), + "ibm_is_snapshot": vpc.ResourceIBMISSnapshotValidator(), + "ibm_is_snapshot_consistency_group": vpc.ResourceIBMIsSnapshotConsistencyGroupValidator(), + "ibm_is_ssh_key": vpc.ResourceIBMISSHKeyValidator(), + "ibm_is_subnet": vpc.ResourceIBMISSubnetValidator(), + "ibm_is_subnet_reserved_ip": vpc.ResourceIBMISSubnetReservedIPValidator(), + "ibm_is_volume": vpc.ResourceIBMISVolumeValidator(), + "ibm_is_virtual_network_interface": vpc.ResourceIBMIsVirtualNetworkInterfaceValidator(), + "ibm_is_address_prefix": vpc.ResourceIBMISAddressPrefixValidator(), + "ibm_is_vpc": vpc.ResourceIBMISVPCValidator(), + "ibm_is_vpc_routing_table": vpc.ResourceIBMISVPCRoutingTableValidator(), + "ibm_is_vpc_routing_table_route": vpc.ResourceIBMISVPCRoutingTableRouteValidator(), + "ibm_is_vpn_gateway_connection": vpc.ResourceIBMISVPNGatewayConnectionValidator(), + "ibm_is_vpn_gateway": vpc.ResourceIBMISVPNGatewayValidator(), + "ibm_is_vpn_server": vpc.ResourceIBMIsVPNServerValidator(), + "ibm_is_vpn_server_route": vpc.ResourceIBMIsVPNServerRouteValidator(), + "ibm_is_reservation": vpc.ResourceIBMISReservationValidator(), + "ibm_kms_key_rings": kms.ResourceIBMKeyRingValidator(), + "ibm_dns_glb_monitor": dnsservices.ResourceIBMPrivateDNSGLBMonitorValidator(), + "ibm_dns_custom_resolver_forwarding_rule": dnsservices.ResourceIBMPrivateDNSForwardingRuleValidator(), + "ibm_schematics_action": schematics.ResourceIBMSchematicsActionValidator(), + "ibm_schematics_job": schematics.ResourceIBMSchematicsJobValidator(), + "ibm_schematics_workspace": schematics.ResourceIBMSchematicsWorkspaceValidator(), + "ibm_schematics_inventory": schematics.ResourceIBMSchematicsInventoryValidator(), + "ibm_schematics_resource_query": schematics.ResourceIBMSchematicsResourceQueryValidator(), + "ibm_schematics_policy": schematics.ResourceIbmSchematicsPolicyValidator(), + "ibm_resource_instance": resourcecontroller.ResourceIBMResourceInstanceValidator(), + "ibm_resource_key": resourcecontroller.ResourceIBMResourceKeyValidator(), + "ibm_is_virtual_endpoint_gateway": vpc.ResourceIBMISEndpointGatewayValidator(), + "ibm_resource_tag": globaltagging.ResourceIBMResourceTagValidator(), + "ibm_resource_access_tag": globaltagging.ResourceIBMResourceAccessTagValidator(), + "ibm_iam_access_tag": globaltagging.ResourceIBMIamAccessTagValidator(), + "ibm_satellite_location": satellite.ResourceIBMSatelliteLocationValidator(), + "ibm_satellite_cluster": satellite.ResourceIBMSatelliteClusterValidator(), + "ibm_pi_volume": power.ResourceIBMPIVolumeValidator(), + "ibm_atracker_target": atracker.ResourceIBMAtrackerTargetValidator(), + "ibm_atracker_route": atracker.ResourceIBMAtrackerRouteValidator(), + "ibm_atracker_settings": atracker.ResourceIBMAtrackerSettingsValidator(), + "ibm_metrics_router_target": metricsrouter.ResourceIBMMetricsRouterTargetValidator(), + "ibm_metrics_router_route": metricsrouter.ResourceIBMMetricsRouterRouteValidator(), + "ibm_metrics_router_settings": metricsrouter.ResourceIBMMetricsRouterSettingsValidator(), + "ibm_satellite_endpoint": satellite.ResourceIBMSatelliteEndpointValidator(), + "ibm_satellite_host": satellite.ResourceIBMSatelliteHostValidator(), // Partner Center Sell "ibm_onboarding_registration": partnercentersell.ResourceIbmOnboardingRegistrationValidator(), diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline.go index 776e12043e..61797149e5 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -14,6 +18,7 @@ import ( "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" ) func DataSourceIBMCdTektonPipeline() *schema.Resource { @@ -59,7 +64,7 @@ func DataSourceIBMCdTektonPipeline() *schema.Resource { "id": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "UUID.", + Description: "Universally Unique Identifier.", }, "crn": &schema.Schema{ Type: schema.TypeString, @@ -525,179 +530,176 @@ func DataSourceIBMCdTektonPipeline() *schema.Resource { func dataSourceIBMCdTektonPipelineRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineOptions := &cdtektonpipelinev2.GetTektonPipelineOptions{} getTektonPipelineOptions.SetID(d.Get("pipeline_id").(string)) - tektonPipeline, response, err := cdTektonPipelineClient.GetTektonPipelineWithContext(context, getTektonPipelineOptions) + tektonPipeline, _, err := cdTektonPipelineClient.GetTektonPipelineWithContext(context, getTektonPipelineOptions) if err != nil { - log.Printf("[DEBUG] GetTektonPipelineWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineWithContext failed: %s", err.Error()), "(Data) ibm_cd_tekton_pipeline", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } - d.SetId(fmt.Sprintf("%s", *getTektonPipelineOptions.ID)) + d.SetId(*getTektonPipelineOptions.ID) if err = d.Set("name", tektonPipeline.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting name: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-name").GetDiag() } if err = d.Set("status", tektonPipeline.Status); err != nil { - return diag.FromErr(fmt.Errorf("Error setting status: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting status: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-status").GetDiag() } resourceGroup := []map[string]interface{}{} - if tektonPipeline.ResourceGroup != nil { - modelMap, err := dataSourceIBMCdTektonPipelineResourceGroupReferenceToMap(tektonPipeline.ResourceGroup) - if err != nil { - return diag.FromErr(err) - } - resourceGroup = append(resourceGroup, modelMap) + resourceGroupMap, err := DataSourceIBMCdTektonPipelineResourceGroupReferenceToMap(tektonPipeline.ResourceGroup) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "resource_group-to-map").GetDiag() } + resourceGroup = append(resourceGroup, resourceGroupMap) if err = d.Set("resource_group", resourceGroup); err != nil { - return diag.FromErr(fmt.Errorf("Error setting resource_group %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting resource_group: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-resource_group").GetDiag() } toolchain := []map[string]interface{}{} - if tektonPipeline.Toolchain != nil { - modelMap, err := dataSourceIBMCdTektonPipelineToolchainReferenceToMap(tektonPipeline.Toolchain) - if err != nil { - return diag.FromErr(err) - } - toolchain = append(toolchain, modelMap) + toolchainMap, err := DataSourceIBMCdTektonPipelineToolchainReferenceToMap(tektonPipeline.Toolchain) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "toolchain-to-map").GetDiag() } + toolchain = append(toolchain, toolchainMap) if err = d.Set("toolchain", toolchain); err != nil { - return diag.FromErr(fmt.Errorf("Error setting toolchain %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting toolchain: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-toolchain").GetDiag() } definitions := []map[string]interface{}{} - if tektonPipeline.Definitions != nil { - for _, modelItem := range tektonPipeline.Definitions { - modelMap, err := dataSourceIBMCdTektonPipelineDefinitionToMap(&modelItem) - if err != nil { - return diag.FromErr(err) - } - definitions = append(definitions, modelMap) + for _, definitionsItem := range tektonPipeline.Definitions { + definitionsItemMap, err := DataSourceIBMCdTektonPipelineDefinitionToMap(&definitionsItem) // #nosec G601 + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "definitions-to-map").GetDiag() } + definitions = append(definitions, definitionsItemMap) } if err = d.Set("definitions", definitions); err != nil { - return diag.FromErr(fmt.Errorf("Error setting definitions %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting definitions: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-definitions").GetDiag() } properties := []map[string]interface{}{} - if tektonPipeline.Properties != nil { - for _, modelItem := range tektonPipeline.Properties { - modelMap, err := dataSourceIBMCdTektonPipelinePropertyToMap(&modelItem) - if err != nil { - return diag.FromErr(err) - } - properties = append(properties, modelMap) + for _, propertiesItem := range tektonPipeline.Properties { + propertiesItemMap, err := DataSourceIBMCdTektonPipelinePropertyToMap(&propertiesItem) // #nosec G601 + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "properties-to-map").GetDiag() } + properties = append(properties, propertiesItemMap) } if err = d.Set("properties", properties); err != nil { - return diag.FromErr(fmt.Errorf("Error setting properties %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting properties: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-properties").GetDiag() } if err = d.Set("updated_at", flex.DateTimeToString(tektonPipeline.UpdatedAt)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting updated_at: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting updated_at: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-updated_at").GetDiag() } if err = d.Set("created_at", flex.DateTimeToString(tektonPipeline.CreatedAt)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting created_at: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-created_at").GetDiag() } triggers := []map[string]interface{}{} if tektonPipeline.Triggers != nil { - for _, modelItem := range tektonPipeline.Triggers { - modelMap, err := dataSourceIBMCdTektonPipelineTriggerToMap(modelItem) + for _, triggersItem := range tektonPipeline.Triggers { + triggersItemMap, err := DataSourceIBMCdTektonPipelineTriggerToMap(triggersItem) // #nosec G601 if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "triggers-to-map").GetDiag() } - triggers = append(triggers, modelMap) + triggers = append(triggers, triggersItemMap) } } if err = d.Set("triggers", triggers); err != nil { - return diag.FromErr(fmt.Errorf("Error setting triggers %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting triggers: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-triggers").GetDiag() } worker := []map[string]interface{}{} - if tektonPipeline.Worker != nil { - modelMap, err := dataSourceIBMCdTektonPipelineWorkerToMap(tektonPipeline.Worker) - if err != nil { - return diag.FromErr(err) - } - worker = append(worker, modelMap) + workerMap, err := DataSourceIBMCdTektonPipelineWorkerToMap(tektonPipeline.Worker) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline", "read", "worker-to-map").GetDiag() } + worker = append(worker, workerMap) if err = d.Set("worker", worker); err != nil { - return diag.FromErr(fmt.Errorf("Error setting worker %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting worker: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-worker").GetDiag() } if err = d.Set("runs_url", tektonPipeline.RunsURL); err != nil { - return diag.FromErr(fmt.Errorf("Error setting runs_url: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting runs_url: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-runs_url").GetDiag() } - if err = d.Set("href", tektonPipeline.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + if !core.IsNil(tektonPipeline.Href) { + if err = d.Set("href", tektonPipeline.Href); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting href: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-href").GetDiag() + } } if err = d.Set("build_number", flex.IntValue(tektonPipeline.BuildNumber)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting build_number: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting build_number: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-build_number").GetDiag() } - if err = d.Set("next_build_number", flex.IntValue(tektonPipeline.NextBuildNumber)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting next_build_number: %s", err)) + if !core.IsNil(tektonPipeline.NextBuildNumber) { + if err = d.Set("next_build_number", flex.IntValue(tektonPipeline.NextBuildNumber)); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting next_build_number: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-next_build_number").GetDiag() + } } if err = d.Set("enable_notifications", tektonPipeline.EnableNotifications); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enable_notifications: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enable_notifications: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-enable_notifications").GetDiag() } if err = d.Set("enable_partial_cloning", tektonPipeline.EnablePartialCloning); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enable_partial_cloning: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enable_partial_cloning: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-enable_partial_cloning").GetDiag() } if err = d.Set("enabled", tektonPipeline.Enabled); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enabled: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enabled: %s", err), "(Data) ibm_cd_tekton_pipeline", "read", "set-enabled").GetDiag() } return nil } -func dataSourceIBMCdTektonPipelineResourceGroupReferenceToMap(model *cdtektonpipelinev2.ResourceGroupReference) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineResourceGroupReferenceToMap(model *cdtektonpipelinev2.ResourceGroupReference) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.ID != nil { - modelMap["id"] = model.ID + modelMap["id"] = *model.ID } return modelMap, nil } -func dataSourceIBMCdTektonPipelineToolchainReferenceToMap(model *cdtektonpipelinev2.ToolchainReference) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineToolchainReferenceToMap(model *cdtektonpipelinev2.ToolchainReference) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID - modelMap["crn"] = model.CRN + modelMap["id"] = *model.ID + modelMap["crn"] = *model.CRN return modelMap, nil } -func dataSourceIBMCdTektonPipelineDefinitionToMap(model *cdtektonpipelinev2.Definition) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineDefinitionToMap(model *cdtektonpipelinev2.Definition) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - sourceMap, err := dataSourceIBMCdTektonPipelineDefinitionSourceToMap(model.Source) + sourceMap, err := DataSourceIBMCdTektonPipelineDefinitionSourceToMap(model.Source) if err != nil { return modelMap, err } modelMap["source"] = []map[string]interface{}{sourceMap} if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func dataSourceIBMCdTektonPipelineDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := dataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := DataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -705,18 +707,18 @@ func dataSourceIBMCdTektonPipelineDefinitionSourceToMap(model *cdtektonpipelinev return modelMap, nil } -func dataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Tag != nil { - modelMap["tag"] = model.Tag + modelMap["tag"] = *model.Tag } - modelMap["path"] = model.Path + modelMap["path"] = *model.Path if model.Tool != nil { - toolMap, err := dataSourceIBMCdTektonPipelineToolToMap(model.Tool) + toolMap, err := DataSourceIBMCdTektonPipelineToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -725,65 +727,65 @@ func dataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model *cdtekto return modelMap, nil } -func dataSourceIBMCdTektonPipelineToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func dataSourceIBMCdTektonPipelinePropertyToMap(model *cdtektonpipelinev2.Property) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelinePropertyToMap(model *cdtektonpipelinev2.Property) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + modelMap["name"] = *model.Name if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.Enum != nil { modelMap["enum"] = model.Enum } - modelMap["type"] = model.Type + modelMap["type"] = *model.Type if model.Locked != nil { - modelMap["locked"] = model.Locked + modelMap["locked"] = *model.Locked } if model.Path != nil { - modelMap["path"] = model.Path + modelMap["path"] = *model.Path } return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerIntf) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerIntf) (map[string]interface{}, error) { if _, ok := model.(*cdtektonpipelinev2.TriggerManualTrigger); ok { - return dataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model.(*cdtektonpipelinev2.TriggerManualTrigger)) + return DataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model.(*cdtektonpipelinev2.TriggerManualTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.TriggerScmTrigger); ok { - return dataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model.(*cdtektonpipelinev2.TriggerScmTrigger)) + return DataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model.(*cdtektonpipelinev2.TriggerScmTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.TriggerTimerTrigger); ok { - return dataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model.(*cdtektonpipelinev2.TriggerTimerTrigger)) + return DataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model.(*cdtektonpipelinev2.TriggerTimerTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.TriggerGenericTrigger); ok { - return dataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model.(*cdtektonpipelinev2.TriggerGenericTrigger)) + return DataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model.(*cdtektonpipelinev2.TriggerGenericTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.Trigger); ok { modelMap := make(map[string]interface{}) model := model.(*cdtektonpipelinev2.Trigger) if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } if model.Name != nil { - modelMap["name"] = model.Name + modelMap["name"] = *model.Name } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.EventListener != nil { - modelMap["event_listener"] = model.EventListener + modelMap["event_listener"] = *model.EventListener } if model.ID != nil { - modelMap["id"] = model.ID + modelMap["id"] = *model.ID } if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := dataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := DataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -795,7 +797,7 @@ func dataSourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerI modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := dataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := DataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -805,16 +807,16 @@ func dataSourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerI modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } if model.Enabled != nil { - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled } if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.EnableEventsFromForks != nil { - modelMap["enable_events_from_forks"] = model.EnableEventsFromForks + modelMap["enable_events_from_forks"] = *model.EnableEventsFromForks } if model.Source != nil { - sourceMap, err := dataSourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) + sourceMap, err := DataSourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) if err != nil { return modelMap, err } @@ -824,23 +826,23 @@ func dataSourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerI modelMap["events"] = model.Events } if model.Filter != nil { - modelMap["filter"] = model.Filter + modelMap["filter"] = *model.Filter } if model.Cron != nil { - modelMap["cron"] = model.Cron + modelMap["cron"] = *model.Cron } if model.Timezone != nil { - modelMap["timezone"] = model.Timezone + modelMap["timezone"] = *model.Timezone } if model.Secret != nil { - secretMap, err := dataSourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) + secretMap, err := DataSourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) if err != nil { return modelMap, err } modelMap["secret"] = []map[string]interface{}{secretMap} } if model.WebhookURL != nil { - modelMap["webhook_url"] = model.WebhookURL + modelMap["webhook_url"] = *model.WebhookURL } return modelMap, nil } else { @@ -848,44 +850,44 @@ func dataSourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerI } } -func dataSourceIBMCdTektonPipelineTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + modelMap["name"] = *model.Name if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.Enum != nil { modelMap["enum"] = model.Enum } - modelMap["type"] = model.Type + modelMap["type"] = *model.Type if model.Path != nil { - modelMap["path"] = model.Path + modelMap["path"] = *model.Path } if model.Locked != nil { - modelMap["locked"] = model.Locked + modelMap["locked"] = *model.Locked } return modelMap, nil } -func dataSourceIBMCdTektonPipelineWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.Name != nil { - modelMap["name"] = model.Name + modelMap["name"] = *model.Name } if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := dataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := DataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -893,20 +895,20 @@ func dataSourceIBMCdTektonPipelineTriggerSourceToMap(model *cdtektonpipelinev2.T return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Pattern != nil { - modelMap["pattern"] = model.Pattern + modelMap["pattern"] = *model.Pattern } - modelMap["blind_connection"] = model.BlindConnection + modelMap["blind_connection"] = *model.BlindConnection if model.HookID != nil { - modelMap["hook_id"] = model.HookID + modelMap["hook_id"] = *model.HookID } - toolMap, err := dataSourceIBMCdTektonPipelineToolToMap(model.Tool) + toolMap, err := DataSourceIBMCdTektonPipelineToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -914,39 +916,39 @@ func dataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model *cdtektonpi return modelMap, nil } -func dataSourceIBMCdTektonPipelineGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Source != nil { - modelMap["source"] = model.Source + modelMap["source"] = *model.Source } if model.KeyName != nil { - modelMap["key_name"] = model.KeyName + modelMap["key_name"] = *model.KeyName } if model.Algorithm != nil { - modelMap["algorithm"] = model.Algorithm + modelMap["algorithm"] = *model.Algorithm } return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipelinev2.TriggerManualTrigger) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipelinev2.TriggerManualTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := dataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := DataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -958,7 +960,7 @@ func dataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipel modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := dataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := DataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -967,26 +969,26 @@ func dataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipel if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2.TriggerScmTrigger) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2.TriggerScmTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := dataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := DataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -998,7 +1000,7 @@ func dataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipeline modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := dataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := DataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1007,15 +1009,15 @@ func dataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipeline if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.EnableEventsFromForks != nil { - modelMap["enable_events_from_forks"] = model.EnableEventsFromForks + modelMap["enable_events_from_forks"] = *model.EnableEventsFromForks } if model.Source != nil { - sourceMap, err := dataSourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) + sourceMap, err := DataSourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) if err != nil { return modelMap, err } @@ -1025,24 +1027,24 @@ func dataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipeline modelMap["events"] = model.Events } if model.Filter != nil { - modelMap["filter"] = model.Filter + modelMap["filter"] = *model.Filter } return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipelinev2.TriggerTimerTrigger) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipelinev2.TriggerTimerTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := dataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := DataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -1054,7 +1056,7 @@ func dataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipeli modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := dataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := DataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1063,32 +1065,32 @@ func dataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipeli if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.Cron != nil { - modelMap["cron"] = model.Cron + modelMap["cron"] = *model.Cron } if model.Timezone != nil { - modelMap["timezone"] = model.Timezone + modelMap["timezone"] = *model.Timezone } return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipelinev2.TriggerGenericTrigger) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipelinev2.TriggerGenericTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := dataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := DataSourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -1100,7 +1102,7 @@ func dataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipe modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := dataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := DataSourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1109,22 +1111,22 @@ func dataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipe if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.Secret != nil { - secretMap, err := dataSourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) + secretMap, err := DataSourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) if err != nil { return modelMap, err } modelMap["secret"] = []map[string]interface{}{secretMap} } if model.WebhookURL != nil { - modelMap["webhook_url"] = model.WebhookURL + modelMap["webhook_url"] = *model.WebhookURL } if model.Filter != nil { - modelMap["filter"] = model.Filter + modelMap["filter"] = *model.Filter } return modelMap, nil } diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition.go index 5e7ec4cf81..712f6b937f 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -12,7 +16,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" ) func DataSourceIBMCdTektonPipelineDefinition() *schema.Resource { @@ -99,7 +105,9 @@ func DataSourceIBMCdTektonPipelineDefinition() *schema.Resource { func dataSourceIBMCdTektonPipelineDefinitionRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_definition", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineDefinitionOptions := &cdtektonpipelinev2.GetTektonPipelineDefinitionOptions{} @@ -107,37 +115,38 @@ func dataSourceIBMCdTektonPipelineDefinitionRead(context context.Context, d *sch getTektonPipelineDefinitionOptions.SetPipelineID(d.Get("pipeline_id").(string)) getTektonPipelineDefinitionOptions.SetDefinitionID(d.Get("definition_id").(string)) - definition, response, err := cdTektonPipelineClient.GetTektonPipelineDefinitionWithContext(context, getTektonPipelineDefinitionOptions) + definition, _, err := cdTektonPipelineClient.GetTektonPipelineDefinitionWithContext(context, getTektonPipelineDefinitionOptions) if err != nil { - log.Printf("[DEBUG] GetTektonPipelineDefinitionWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineDefinitionWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineDefinitionWithContext failed: %s", err.Error()), "(Data) ibm_cd_tekton_pipeline_definition", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(fmt.Sprintf("%s/%s", *getTektonPipelineDefinitionOptions.PipelineID, *getTektonPipelineDefinitionOptions.DefinitionID)) source := []map[string]interface{}{} - if definition.Source != nil { - modelMap, err := dataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(definition.Source) - if err != nil { - return diag.FromErr(err) - } - source = append(source, modelMap) + sourceMap, err := DataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(definition.Source) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_definition", "read", "source-to-map").GetDiag() } + source = append(source, sourceMap) if err = d.Set("source", source); err != nil { - return diag.FromErr(fmt.Errorf("Error setting source %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting source: %s", err), "(Data) ibm_cd_tekton_pipeline_definition", "read", "set-source").GetDiag() } - if err = d.Set("href", definition.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + if !core.IsNil(definition.Href) { + if err = d.Set("href", definition.Href); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting href: %s", err), "(Data) ibm_cd_tekton_pipeline_definition", "read", "set-href").GetDiag() + } } return nil } -func dataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := dataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := DataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -145,18 +154,18 @@ func dataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model *cdtekto return modelMap, nil } -func dataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Tag != nil { - modelMap["tag"] = model.Tag + modelMap["tag"] = *model.Tag } - modelMap["path"] = model.Path + modelMap["path"] = *model.Path if model.Tool != nil { - toolMap, err := dataSourceIBMCdTektonPipelineDefinitionToolToMap(model.Tool) + toolMap, err := DataSourceIBMCdTektonPipelineDefinitionToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -165,8 +174,8 @@ func dataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(mode return modelMap, nil } -func dataSourceIBMCdTektonPipelineDefinitionToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineDefinitionToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition_test.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition_test.go index 892422d78d..6c3942add5 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition_test.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_definition_test.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline_test import ( @@ -11,6 +15,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cdtektonpipeline" + "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" ) func TestAccIBMCdTektonPipelineDefinitionDataSourceBasic(t *testing.T) { @@ -22,10 +30,10 @@ func TestAccIBMCdTektonPipelineDefinitionDataSourceBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineDefinitionDataSourceConfigBasic(""), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "pipeline_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "definition_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "source.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "definition_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "source.#"), ), }, }, @@ -49,7 +57,7 @@ func testAccCheckIBMCdTektonPipelineDefinitionDataSourceConfigBasic(definitionPi name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -68,8 +76,8 @@ func testAccCheckIBMCdTektonPipelineDefinitionDataSourceConfigBasic(definitionPi } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -79,12 +87,96 @@ func testAccCheckIBMCdTektonPipelineDefinitionDataSourceConfigBasic(definitionPi } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - data "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition.pipeline_id - definition_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition.definition_id + data "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance.pipeline_id + definition_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance.definition_id } `, rgName, tcName) } + +func TestDataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{definitionSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.DefinitionSource) + model.Type = core.StringPtr("testString") + model.Properties = definitionSourcePropertiesModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["tag"] = "testString" + model["path"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.DefinitionSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Tag = core.StringPtr("testString") + model.Path = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineDefinitionToolToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineDefinitionToolToMap(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property.go index 5d1b739bd4..0b83e85ed7 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -12,7 +16,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" ) func DataSourceIBMCdTektonPipelineProperty() *schema.Resource { @@ -75,7 +81,9 @@ func DataSourceIBMCdTektonPipelineProperty() *schema.Resource { func dataSourceIBMCdTektonPipelinePropertyRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_property", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelinePropertyOptions := &cdtektonpipelinev2.GetTektonPipelinePropertyOptions{} @@ -83,41 +91,60 @@ func dataSourceIBMCdTektonPipelinePropertyRead(context context.Context, d *schem getTektonPipelinePropertyOptions.SetPipelineID(d.Get("pipeline_id").(string)) getTektonPipelinePropertyOptions.SetPropertyName(d.Get("property_name").(string)) - property, response, err := cdTektonPipelineClient.GetTektonPipelinePropertyWithContext(context, getTektonPipelinePropertyOptions) + property, _, err := cdTektonPipelineClient.GetTektonPipelinePropertyWithContext(context, getTektonPipelinePropertyOptions) if err != nil { - log.Printf("[DEBUG] GetTektonPipelinePropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelinePropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelinePropertyWithContext failed: %s", err.Error()), "(Data) ibm_cd_tekton_pipeline_property", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(fmt.Sprintf("%s/%s", *getTektonPipelinePropertyOptions.PipelineID, *getTektonPipelinePropertyOptions.PropertyName)) if err = d.Set("name", property.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting name: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-name").GetDiag() } - if err = d.Set("value", property.Value); err != nil { - return diag.FromErr(fmt.Errorf("Error setting value: %s", err)) + if !core.IsNil(property.Value) { + if err = d.Set("value", property.Value); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting value: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-value").GetDiag() + } } - if err = d.Set("href", property.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + if !core.IsNil(property.Href) { + if err = d.Set("href", property.Href); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting href: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-href").GetDiag() + } + } + + if !core.IsNil(property.Enum) { + enum := []interface{}{} + for _, enumItem := range property.Enum { + enum = append(enum, enumItem) + } + if err = d.Set("enum", enum); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enum: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-enum").GetDiag() + } } if err = d.Set("type", property.Type); err != nil { - return diag.FromErr(fmt.Errorf("Error setting type: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting type: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-type").GetDiag() } - if err = d.Set("locked", property.Locked); err != nil { - return diag.FromErr(fmt.Errorf("Error setting locked: %s", err)) + if !core.IsNil(property.Locked) { + if err = d.Set("locked", property.Locked); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting locked: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-locked").GetDiag() + } } - if err = d.Set("path", property.Path); err != nil { - return diag.FromErr(fmt.Errorf("Error setting path: %s", err)) + if !core.IsNil(property.Path) { + if err = d.Set("path", property.Path); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting path: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-path").GetDiag() + } } - if property.Enum != nil { + if !core.IsNil(property.Enum) { if err = d.Set("enum", property.Enum); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enum: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enum: %s", err), "(Data) ibm_cd_tekton_pipeline_property", "read", "set-enum").GetDiag() } } diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property_test.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property_test.go index c29ce37471..3a5ead5682 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property_test.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_property_test.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline_test import ( @@ -24,11 +28,11 @@ func TestAccIBMCdTektonPipelinePropertyDataSourceBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelinePropertyDataSourceConfigBasic("", propertyName, propertyType), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "pipeline_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "property_name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "type"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "property_name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "type"), ), }, }, @@ -37,8 +41,8 @@ func TestAccIBMCdTektonPipelinePropertyDataSourceBasic(t *testing.T) { func TestAccIBMCdTektonPipelinePropertyDataSourceAllArgs(t *testing.T) { propertyName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - propertyType := "text" propertyValue := fmt.Sprintf("tf_value_%d", acctest.RandIntRange(10, 100)) + propertyType := "text" propertyLocked := "true" propertyPath := fmt.Sprintf("tf_path_%d", acctest.RandIntRange(10, 100)) @@ -47,16 +51,16 @@ func TestAccIBMCdTektonPipelinePropertyDataSourceAllArgs(t *testing.T) { Providers: acc.TestAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelinePropertyDataSourceConfig("", propertyName, propertyType, propertyValue, propertyLocked, propertyPath), + Config: testAccCheckIBMCdTektonPipelinePropertyDataSourceConfig("", propertyName, propertyValue, propertyType, propertyLocked, propertyPath), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "pipeline_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "property_name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "value"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "href"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "type"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "locked"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "property_name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "value"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "href"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "type"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "locked"), ), }, }, @@ -80,7 +84,7 @@ func testAccCheckIBMCdTektonPipelinePropertyDataSourceConfigBasic(propertyPipeli name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -90,23 +94,23 @@ func testAccCheckIBMCdTektonPipelinePropertyDataSourceConfigBasic(propertyPipeli ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline ] } - resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id name = "property1" type = "text" value = "prop1" depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - data "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id - property_name = ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property.name + data "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id + property_name = ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance.name } `, rgName, tcName) } -func testAccCheckIBMCdTektonPipelinePropertyDataSourceConfig(propertyPipelineID string, propertyName string, propertyType string, propertyValue string, propertyLocked string, propertyPath string) string { +func testAccCheckIBMCdTektonPipelinePropertyDataSourceConfig(propertyPipelineID string, propertyName string, propertyValue string, propertyType string, propertyLocked string, propertyPath string) string { rgName := acc.CdResourceGroupName tcName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) return fmt.Sprintf(` @@ -123,7 +127,7 @@ func testAccCheckIBMCdTektonPipelinePropertyDataSourceConfig(propertyPipelineID name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -133,19 +137,19 @@ func testAccCheckIBMCdTektonPipelinePropertyDataSourceConfig(propertyPipelineID ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline ] } - resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id name = "%s" type = "text" value = "%s" locked = "%s" depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - data "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id - property_name = ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property.name + data "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id + property_name = ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance.name } `, rgName, tcName, propertyName, propertyValue, propertyLocked) } diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_test.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_test.go index 17d29c8447..55152a819b 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_test.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_test.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline_test import ( @@ -11,6 +15,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cdtektonpipeline" + "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" ) func TestAccIBMCdTektonPipelineDataSourceBasic(t *testing.T) { @@ -21,23 +29,22 @@ func TestAccIBMCdTektonPipelineDataSourceBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineDataSourceConfigBasic(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "status"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "resource_group.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "toolchain.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "definitions.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "properties.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "updated_at"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "created_at"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "triggers.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "worker.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "runs_url"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "build_number"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_notifications"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_partial_cloning"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enabled"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "status"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "resource_group.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "toolchain.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "definitions.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "properties.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "updated_at"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "triggers.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "worker.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "runs_url"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "build_number"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_notifications"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_partial_cloning"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enabled"), ), }, }, @@ -56,25 +63,24 @@ func TestAccIBMCdTektonPipelineDataSourceAllArgs(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineDataSourceConfig(tektonPipelineNextBuildNumber, tektonPipelineEnableNotifications, tektonPipelineEnablePartialCloning), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "status"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "resource_group.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "toolchain.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "definitions.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "properties.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "updated_at"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "created_at"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "triggers.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "worker.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "runs_url"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "href"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "build_number"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "next_build_number"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_notifications"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_partial_cloning"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enabled"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "status"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "resource_group.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "toolchain.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "definitions.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "properties.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "updated_at"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "triggers.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "worker.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "runs_url"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "href"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "build_number"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "next_build_number"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_notifications"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_partial_cloning"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enabled"), ), }, }, @@ -98,7 +104,7 @@ func testAccCheckIBMCdTektonPipelineDataSourceConfigBasic() string { name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -108,8 +114,8 @@ func testAccCheckIBMCdTektonPipelineDataSourceConfigBasic() string { ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline ] } - data "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + data "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id } `, rgName, tcName) } @@ -131,7 +137,7 @@ func testAccCheckIBMCdTektonPipelineDataSourceConfig(tektonPipelineNextBuildNumb name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = %s enable_notifications = %s @@ -152,8 +158,8 @@ func testAccCheckIBMCdTektonPipelineDataSourceConfig(tektonPipelineNextBuildNumb } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -163,20 +169,800 @@ func testAccCheckIBMCdTektonPipelineDataSourceConfig(tektonPipelineNextBuildNumb } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id name = "property1" type = "text" value = "prop1" depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - data "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + data "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id } `, rgName, tcName, tektonPipelineNextBuildNumber, tektonPipelineEnableNotifications, tektonPipelineEnablePartialCloning) } + +func TestDataSourceIBMCdTektonPipelineResourceGroupReferenceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.ResourceGroupReference) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineResourceGroupReferenceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineToolchainReferenceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + model["crn"] = "crn:v1:staging:public:toolchain:us-south:a/0ba224679d6c697f9baee5e14ade83ac:bf5fa00f-ddef-4298-b87b-aa8b6da0e1a6::" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.ToolchainReference) + model.ID = core.StringPtr("testString") + model.CRN = core.StringPtr("crn:v1:staging:public:toolchain:us-south:a/0ba224679d6c697f9baee5e14ade83ac:bf5fa00f-ddef-4298-b87b-aa8b6da0e1a6::") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineToolchainReferenceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineDefinitionToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + definitionSourceModel := make(map[string]interface{}) + definitionSourceModel["type"] = "testString" + definitionSourceModel["properties"] = []map[string]interface{}{definitionSourcePropertiesModel} + + model := make(map[string]interface{}) + model["source"] = []map[string]interface{}{definitionSourceModel} + model["href"] = "testString" + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + definitionSourceModel := new(cdtektonpipelinev2.DefinitionSource) + definitionSourceModel.Type = core.StringPtr("testString") + definitionSourceModel.Properties = definitionSourcePropertiesModel + + model := new(cdtektonpipelinev2.Definition) + model.Source = definitionSourceModel + model.Href = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineDefinitionToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineDefinitionSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{definitionSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.DefinitionSource) + model.Type = core.StringPtr("testString") + model.Properties = definitionSourcePropertiesModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineDefinitionSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["tag"] = "testString" + model["path"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.DefinitionSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Tag = core.StringPtr("testString") + model.Path = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineToolToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineToolToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelinePropertyToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["value"] = "testString" + model["href"] = "testString" + model["enum"] = []string{"testString"} + model["type"] = "secure" + model["locked"] = true + model["path"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Property) + model.Name = core.StringPtr("testString") + model.Value = core.StringPtr("testString") + model.Href = core.StringPtr("testString") + model.Enum = []string{"testString"} + model.Type = core.StringPtr("secure") + model.Locked = core.BoolPtr(true) + model.Path = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelinePropertyToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + triggerSourceModel := make(map[string]interface{}) + triggerSourceModel["type"] = "testString" + triggerSourceModel["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + genericSecretModel := make(map[string]interface{}) + genericSecretModel["type"] = "token_matches" + genericSecretModel["value"] = "testString" + genericSecretModel["source"] = "header" + genericSecretModel["key_name"] = "testString" + genericSecretModel["algorithm"] = "md4" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["enable_events_from_forks"] = false + model["source"] = []map[string]interface{}{triggerSourceModel} + model["events"] = []string{"push", "pull_request"} + model["filter"] = "header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'" + model["cron"] = "testString" + model["timezone"] = "America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC" + model["secret"] = []map[string]interface{}{genericSecretModel} + model["webhook_url"] = "testString" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + triggerSourceModel := new(cdtektonpipelinev2.TriggerSource) + triggerSourceModel.Type = core.StringPtr("testString") + triggerSourceModel.Properties = triggerSourcePropertiesModel + + genericSecretModel := new(cdtektonpipelinev2.GenericSecret) + genericSecretModel.Type = core.StringPtr("token_matches") + genericSecretModel.Value = core.StringPtr("testString") + genericSecretModel.Source = core.StringPtr("header") + genericSecretModel.KeyName = core.StringPtr("testString") + genericSecretModel.Algorithm = core.StringPtr("md4") + + model := new(cdtektonpipelinev2.Trigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.EnableEventsFromForks = core.BoolPtr(false) + model.Source = triggerSourceModel + model.Events = []string{"push", "pull_request"} + model.Filter = core.StringPtr("header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'") + model.Cron = core.StringPtr("testString") + model.Timezone = core.StringPtr("America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC") + model.Secret = genericSecretModel + model.WebhookURL = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerPropertyToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["value"] = "testString" + model["href"] = "testString" + model["enum"] = []string{"testString"} + model["type"] = "secure" + model["path"] = "testString" + model["locked"] = true + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.TriggerProperty) + model.Name = core.StringPtr("testString") + model.Value = core.StringPtr("testString") + model.Href = core.StringPtr("testString") + model.Enum = []string{"testString"} + model.Type = core.StringPtr("secure") + model.Path = core.StringPtr("testString") + model.Locked = core.BoolPtr(true) + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerPropertyToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineWorkerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["type"] = "testString" + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Worker) + model.Name = core.StringPtr("testString") + model.Type = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineWorkerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.TriggerSource) + model.Type = core.StringPtr("testString") + model.Properties = triggerSourcePropertiesModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["pattern"] = "testString" + model["blind_connection"] = true + model["hook_id"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Pattern = core.StringPtr("testString") + model.BlindConnection = core.BoolPtr(true) + model.HookID = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineGenericSecretToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["type"] = "token_matches" + model["value"] = "testString" + model["source"] = "header" + model["key_name"] = "testString" + model["algorithm"] = "md4" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.GenericSecret) + model.Type = core.StringPtr("token_matches") + model.Value = core.StringPtr("testString") + model.Source = core.StringPtr("header") + model.KeyName = core.StringPtr("testString") + model.Algorithm = core.StringPtr("md4") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineGenericSecretToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerManualTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerManualTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + triggerSourceModel := make(map[string]interface{}) + triggerSourceModel["type"] = "testString" + triggerSourceModel["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["enable_events_from_forks"] = false + model["source"] = []map[string]interface{}{triggerSourceModel} + model["events"] = []string{"push", "pull_request"} + model["filter"] = "header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + triggerSourceModel := new(cdtektonpipelinev2.TriggerSource) + triggerSourceModel.Type = core.StringPtr("testString") + triggerSourceModel.Properties = triggerSourcePropertiesModel + + model := new(cdtektonpipelinev2.TriggerScmTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.EnableEventsFromForks = core.BoolPtr(false) + model.Source = triggerSourceModel + model.Events = []string{"push", "pull_request"} + model.Filter = core.StringPtr("header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerScmTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["cron"] = "testString" + model["timezone"] = "America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerTimerTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.Cron = core.StringPtr("testString") + model.Timezone = core.StringPtr("America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + genericSecretModel := make(map[string]interface{}) + genericSecretModel["type"] = "token_matches" + genericSecretModel["value"] = "testString" + genericSecretModel["source"] = "header" + genericSecretModel["key_name"] = "testString" + genericSecretModel["algorithm"] = "md4" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["secret"] = []map[string]interface{}{genericSecretModel} + model["webhook_url"] = "testString" + model["filter"] = "event.type == 'message' && event.text.contains('urgent')" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + genericSecretModel := new(cdtektonpipelinev2.GenericSecret) + genericSecretModel.Type = core.StringPtr("token_matches") + genericSecretModel.Value = core.StringPtr("testString") + genericSecretModel.Source = core.StringPtr("header") + genericSecretModel.KeyName = core.StringPtr("testString") + genericSecretModel.Algorithm = core.StringPtr("md4") + + model := new(cdtektonpipelinev2.TriggerGenericTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.Secret = genericSecretModel + model.WebhookURL = core.StringPtr("testString") + model.Filter = core.StringPtr("event.type == 'message' && event.text.contains('urgent')") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger.go index bb69e29b60..4adf746658 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -14,6 +18,7 @@ import ( "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" ) func DataSourceIBMCdTektonPipelineTrigger() *schema.Resource { @@ -281,7 +286,9 @@ func DataSourceIBMCdTektonPipelineTrigger() *schema.Resource { func dataSourceIBMCdTektonPipelineTriggerRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineTriggerOptions := &cdtektonpipelinev2.GetTektonPipelineTriggerOptions{} @@ -289,166 +296,199 @@ func dataSourceIBMCdTektonPipelineTriggerRead(context context.Context, d *schema getTektonPipelineTriggerOptions.SetPipelineID(d.Get("pipeline_id").(string)) getTektonPipelineTriggerOptions.SetTriggerID(d.Get("trigger_id").(string)) - TriggerIntf, response, err := cdTektonPipelineClient.GetTektonPipelineTriggerWithContext(context, getTektonPipelineTriggerOptions) + triggerIntf, _, err := cdTektonPipelineClient.GetTektonPipelineTriggerWithContext(context, getTektonPipelineTriggerOptions) if err != nil { - log.Printf("[DEBUG] GetTektonPipelineTriggerWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineTriggerWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineTriggerWithContext failed: %s", err.Error()), "(Data) ibm_cd_tekton_pipeline_trigger", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } - trigger := TriggerIntf.(*cdtektonpipelinev2.Trigger) + trigger := triggerIntf.(*cdtektonpipelinev2.Trigger) d.SetId(fmt.Sprintf("%s/%s", *getTektonPipelineTriggerOptions.PipelineID, *getTektonPipelineTriggerOptions.TriggerID)) - if err = d.Set("type", trigger.Type); err != nil { - return diag.FromErr(fmt.Errorf("Error setting type: %s", err)) - } - - if err = d.Set("name", trigger.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + if !core.IsNil(trigger.Type) { + if err = d.Set("type", trigger.Type); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting type: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-type").GetDiag() + } } - if err = d.Set("href", trigger.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + if !core.IsNil(trigger.Name) { + if err = d.Set("name", trigger.Name); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting name: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-name").GetDiag() + } } - if err = d.Set("event_listener", trigger.EventListener); err != nil { - return diag.FromErr(fmt.Errorf("Error setting event_listener: %s", err)) + if !core.IsNil(trigger.Href) { + if err = d.Set("href", trigger.Href); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting href: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-href").GetDiag() + } } - if trigger.Tags != nil { - if err = d.Set("tags", trigger.Tags); err != nil { - return diag.FromErr(fmt.Errorf("Error setting tags: %s", err)) + if !core.IsNil(trigger.EventListener) { + if err = d.Set("event_listener", trigger.EventListener); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting event_listener: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-event_listener").GetDiag() } } - properties := []map[string]interface{}{} - if trigger.Properties != nil { - for _, modelItem := range trigger.Properties { - modelMap, err := dataSourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(&modelItem) + if !core.IsNil(trigger.Properties) { + properties := []map[string]interface{}{} + for _, propertiesItem := range trigger.Properties { + propertiesItemMap, err := DataSourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "properties-to-map").GetDiag() } - properties = append(properties, modelMap) + properties = append(properties, propertiesItemMap) + } + if err = d.Set("properties", properties); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting properties: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-properties").GetDiag() } - } - if err = d.Set("properties", properties); err != nil { - return diag.FromErr(fmt.Errorf("Error setting properties %s", err)) } - if trigger.Events != nil { - if err = d.Set("events", trigger.Events); err != nil { - return diag.FromErr(fmt.Errorf("Error setting events: %s", err)) + if !core.IsNil(trigger.Tags) { + tags := []interface{}{} + for _, tagsItem := range trigger.Tags { + tags = append(tags, tagsItem) + } + if err = d.Set("tags", tags); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting tags: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-tags").GetDiag() } } - worker := []map[string]interface{}{} - if trigger.Worker != nil { - modelMap, err := dataSourceIBMCdTektonPipelineTriggerWorkerToMap(trigger.Worker) + if !core.IsNil(trigger.Worker) { + worker := []map[string]interface{}{} + workerMap, err := DataSourceIBMCdTektonPipelineTriggerWorkerToMap(trigger.Worker) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "worker-to-map").GetDiag() + } + worker = append(worker, workerMap) + if err = d.Set("worker", worker); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting worker: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-worker").GetDiag() } - worker = append(worker, modelMap) - } - if err = d.Set("worker", worker); err != nil { - return diag.FromErr(fmt.Errorf("Error setting worker %s", err)) } - if err = d.Set("max_concurrent_runs", flex.IntValue(trigger.MaxConcurrentRuns)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting max_concurrent_runs: %s", err)) + if !core.IsNil(trigger.MaxConcurrentRuns) { + if err = d.Set("max_concurrent_runs", flex.IntValue(trigger.MaxConcurrentRuns)); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting max_concurrent_runs: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-max_concurrent_runs").GetDiag() + } } - if err = d.Set("enabled", trigger.Enabled); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enabled: %s", err)) + if !core.IsNil(trigger.Enabled) { + if err = d.Set("enabled", trigger.Enabled); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enabled: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-enabled").GetDiag() + } } - if err = d.Set("favorite", trigger.Favorite); err != nil { - return diag.FromErr(fmt.Errorf("Error setting favorite: %s", err)) + if !core.IsNil(trigger.Favorite) { + if err = d.Set("favorite", trigger.Favorite); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting favorite: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-favorite").GetDiag() + } } - if err = d.Set("enable_events_from_forks", trigger.EnableEventsFromForks); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enable_events_from_forks: %s", err)) + if !core.IsNil(trigger.EnableEventsFromForks) { + if err = d.Set("enable_events_from_forks", trigger.EnableEventsFromForks); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enable_events_from_forks: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-enable_events_from_forks").GetDiag() + } } - source := []map[string]interface{}{} - if trigger.Source != nil { - modelMap, err := dataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(trigger.Source) + if !core.IsNil(trigger.Source) { + source := []map[string]interface{}{} + sourceMap, err := DataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(trigger.Source) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "source-to-map").GetDiag() + } + source = append(source, sourceMap) + if err = d.Set("source", source); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting source: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-source").GetDiag() } - source = append(source, modelMap) } - if err = d.Set("source", source); err != nil { - return diag.FromErr(fmt.Errorf("Error setting source %s", err)) + + if !core.IsNil(trigger.Events) { + events := []interface{}{} + for _, eventsItem := range trigger.Events { + events = append(events, eventsItem) + } + if err = d.Set("events", events); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting events: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-events").GetDiag() + } } - if err = d.Set("filter", trigger.Filter); err != nil { - return diag.FromErr(fmt.Errorf("Error setting filter: %s", err)) + if !core.IsNil(trigger.Filter) { + if err = d.Set("filter", trigger.Filter); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting filter: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-filter").GetDiag() + } } - if err = d.Set("cron", trigger.Cron); err != nil { - return diag.FromErr(fmt.Errorf("Error setting cron: %s", err)) + if !core.IsNil(trigger.Cron) { + if err = d.Set("cron", trigger.Cron); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting cron: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-cron").GetDiag() + } } - if err = d.Set("timezone", trigger.Timezone); err != nil { - return diag.FromErr(fmt.Errorf("Error setting timezone: %s", err)) + if !core.IsNil(trigger.Timezone) { + if err = d.Set("timezone", trigger.Timezone); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting timezone: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-timezone").GetDiag() + } } - secret := []map[string]interface{}{} - if trigger.Secret != nil { - modelMap, err := dataSourceIBMCdTektonPipelineTriggerGenericSecretToMap(trigger.Secret) + if !core.IsNil(trigger.Secret) { + secret := []map[string]interface{}{} + secretMap, err := DataSourceIBMCdTektonPipelineTriggerGenericSecretToMap(trigger.Secret) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "secret-to-map").GetDiag() + } + secret = append(secret, secretMap) + if err = d.Set("secret", secret); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting secret: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-secret").GetDiag() } - secret = append(secret, modelMap) - } - if err = d.Set("secret", secret); err != nil { - return diag.FromErr(fmt.Errorf("Error setting secret %s", err)) } - if err = d.Set("webhook_url", trigger.WebhookURL); err != nil { - return diag.FromErr(fmt.Errorf("Error setting webhook_url: %s", err)) + if !core.IsNil(trigger.WebhookURL) { + if err = d.Set("webhook_url", trigger.WebhookURL); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting webhook_url: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger", "read", "set-webhook_url").GetDiag() + } } return nil } -func dataSourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + modelMap["name"] = *model.Name if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.Enum != nil { modelMap["enum"] = model.Enum } - modelMap["type"] = model.Type + modelMap["type"] = *model.Type if model.Path != nil { - modelMap["path"] = model.Path + modelMap["path"] = *model.Path } if model.Locked != nil { - modelMap["locked"] = model.Locked + modelMap["locked"] = *model.Locked } return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.Name != nil { - modelMap["name"] = model.Name + modelMap["name"] = *model.Name } if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := dataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := DataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -456,20 +496,20 @@ func dataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(model *cdtektonpipel return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Pattern != nil { - modelMap["pattern"] = model.Pattern + modelMap["pattern"] = *model.Pattern } - modelMap["blind_connection"] = model.BlindConnection + modelMap["blind_connection"] = *model.BlindConnection if model.HookID != nil { - modelMap["hook_id"] = model.HookID + modelMap["hook_id"] = *model.HookID } - toolMap, err := dataSourceIBMCdTektonPipelineTriggerToolToMap(model.Tool) + toolMap, err := DataSourceIBMCdTektonPipelineTriggerToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -477,28 +517,28 @@ func dataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model *cdt return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func dataSourceIBMCdTektonPipelineTriggerGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { +func DataSourceIBMCdTektonPipelineTriggerGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Source != nil { - modelMap["source"] = model.Source + modelMap["source"] = *model.Source } if model.KeyName != nil { - modelMap["key_name"] = model.KeyName + modelMap["key_name"] = *model.KeyName } if model.Algorithm != nil { - modelMap["algorithm"] = model.Algorithm + modelMap["algorithm"] = *model.Algorithm } return modelMap, nil } diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property.go index fe5ed84978..07e89a50b7 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -12,7 +16,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" ) func DataSourceIBMCdTektonPipelineTriggerProperty() *schema.Resource { @@ -80,7 +86,9 @@ func DataSourceIBMCdTektonPipelineTriggerProperty() *schema.Resource { func dataSourceIBMCdTektonPipelineTriggerPropertyRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineTriggerPropertyOptions := &cdtektonpipelinev2.GetTektonPipelineTriggerPropertyOptions{} @@ -89,41 +97,54 @@ func dataSourceIBMCdTektonPipelineTriggerPropertyRead(context context.Context, d getTektonPipelineTriggerPropertyOptions.SetTriggerID(d.Get("trigger_id").(string)) getTektonPipelineTriggerPropertyOptions.SetPropertyName(d.Get("property_name").(string)) - triggerProperty, response, err := cdTektonPipelineClient.GetTektonPipelineTriggerPropertyWithContext(context, getTektonPipelineTriggerPropertyOptions) + triggerProperty, _, err := cdTektonPipelineClient.GetTektonPipelineTriggerPropertyWithContext(context, getTektonPipelineTriggerPropertyOptions) if err != nil { - log.Printf("[DEBUG] GetTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineTriggerPropertyWithContext failed: %s", err.Error()), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(fmt.Sprintf("%s/%s/%s", *getTektonPipelineTriggerPropertyOptions.PipelineID, *getTektonPipelineTriggerPropertyOptions.TriggerID, *getTektonPipelineTriggerPropertyOptions.PropertyName)) if err = d.Set("name", triggerProperty.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting name: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-name").GetDiag() } - if err = d.Set("value", triggerProperty.Value); err != nil { - return diag.FromErr(fmt.Errorf("Error setting value: %s", err)) + if !core.IsNil(triggerProperty.Value) { + if err = d.Set("value", triggerProperty.Value); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting value: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-value").GetDiag() + } } - if err = d.Set("href", triggerProperty.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + if !core.IsNil(triggerProperty.Href) { + if err = d.Set("href", triggerProperty.Href); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting href: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-href").GetDiag() + } } - if err = d.Set("type", triggerProperty.Type); err != nil { - return diag.FromErr(fmt.Errorf("Error setting type: %s", err)) + if !core.IsNil(triggerProperty.Enum) { + enum := []interface{}{} + for _, enumItem := range triggerProperty.Enum { + enum = append(enum, enumItem) + } + if err = d.Set("enum", enum); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting enum: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-enum").GetDiag() + } } - if err = d.Set("path", triggerProperty.Path); err != nil { - return diag.FromErr(fmt.Errorf("Error setting path: %s", err)) + if err = d.Set("type", triggerProperty.Type); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting type: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-type").GetDiag() } - if err = d.Set("locked", triggerProperty.Locked); err != nil { - return diag.FromErr(fmt.Errorf("Error setting locked: %s", err)) + if !core.IsNil(triggerProperty.Path) { + if err = d.Set("path", triggerProperty.Path); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting path: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-path").GetDiag() + } } - if triggerProperty.Enum != nil { - if err = d.Set("enum", triggerProperty.Enum); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enum: %s", err)) + if !core.IsNil(triggerProperty.Locked) { + if err = d.Set("locked", triggerProperty.Locked); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting locked: %s", err), "(Data) ibm_cd_tekton_pipeline_trigger_property", "read", "set-locked").GetDiag() } } diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property_test.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property_test.go index 7e547027c8..e933ce643c 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property_test.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_property_test.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline_test import ( @@ -24,10 +28,10 @@ func TestAccIBMCdTektonPipelineTriggerPropertyDataSourceBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfigBasic("", "", triggerPropertyName, triggerPropertyType), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "property_name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "type"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "property_name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "type"), ), }, }, @@ -36,8 +40,8 @@ func TestAccIBMCdTektonPipelineTriggerPropertyDataSourceBasic(t *testing.T) { func TestAccIBMCdTektonPipelineTriggerPropertyDataSourceAllArgs(t *testing.T) { triggerPropertyName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - triggerPropertyType := "text" triggerPropertyValue := fmt.Sprintf("tf_value_%d", acctest.RandIntRange(10, 100)) + triggerPropertyType := "text" triggerPropertyPath := fmt.Sprintf("tf_path_%d", acctest.RandIntRange(10, 100)) triggerPropertyLocked := "true" @@ -46,15 +50,15 @@ func TestAccIBMCdTektonPipelineTriggerPropertyDataSourceAllArgs(t *testing.T) { Providers: acc.TestAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig("", "", triggerPropertyName, triggerPropertyType, triggerPropertyValue, triggerPropertyPath, triggerPropertyLocked), + Config: testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig("", "", triggerPropertyName, triggerPropertyValue, triggerPropertyType, triggerPropertyPath, triggerPropertyLocked), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "property_name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "value"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "href"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "type"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "locked"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "property_name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "value"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "href"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "type"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "locked"), ), }, }, @@ -78,7 +82,7 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfigBasic(trigger name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -97,8 +101,8 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfigBasic(trigger } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -108,34 +112,34 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfigBasic(trigger } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] name = "trigger" type = "manual" event_listener = "listener" } - resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id type = "text" name = "trig-prop-1" value = "trig-prop-value-1" } - data "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property.trigger_id + data "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance.trigger_id property_name = "trig-prop-1" } `, rgName, tcName) } -func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig(triggerPropertyPipelineID string, triggerPropertyTriggerID string, triggerPropertyName string, triggerPropertyType string, triggerPropertyValue string, triggerPropertyPath string, triggerPropertyLocked string) string { +func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig(triggerPropertyPipelineID string, triggerPropertyTriggerID string, triggerPropertyName string, triggerPropertyValue string, triggerPropertyType string, triggerPropertyPath string, triggerPropertyLocked string) string { rgName := acc.CdResourceGroupName tcName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) return fmt.Sprintf(` @@ -152,7 +156,7 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig(triggerPrope name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -171,8 +175,8 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig(triggerPrope } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -182,29 +186,29 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyDataSourceConfig(triggerPrope } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] name = "trigger" type = "manual" event_listener = "listener" } - resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id name = "%s" type = "%s" value = "%s" locked = "%s" } - data "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property.trigger_id + data "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance.trigger_id property_name = "%s" } `, rgName, tcName, triggerPropertyName, triggerPropertyType, triggerPropertyValue, triggerPropertyLocked, triggerPropertyName) diff --git a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_test.go b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_test.go index 3c60c56c33..751b1941d6 100644 --- a/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_test.go +++ b/ibm/service/cdtektonpipeline/data_source_ibm_cd_tekton_pipeline_trigger_test.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline_test import ( @@ -11,6 +15,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cdtektonpipeline" + "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" ) func TestAccIBMCdTektonPipelineTriggerDataSourceBasic(t *testing.T) { @@ -26,9 +34,9 @@ func TestAccIBMCdTektonPipelineTriggerDataSourceBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineTriggerDataSourceConfigBasic(triggerPipelineID, triggerType, triggerName, triggerEventListener), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "pipeline_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "trigger_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "trigger_id"), ), }, }, @@ -42,34 +50,30 @@ func TestAccIBMCdTektonPipelineTriggerDataSourceAllArgs(t *testing.T) { triggerEventListener := fmt.Sprintf("tf_event_listener_%d", acctest.RandIntRange(10, 100)) triggerMaxConcurrentRuns := fmt.Sprintf("%d", acctest.RandIntRange(10, 100)) triggerEnabled := "false" - triggerCron := fmt.Sprintf("tf_cron_%d", acctest.RandIntRange(10, 100)) - triggerTimezone := fmt.Sprintf("tf_timezone_%d", acctest.RandIntRange(10, 100)) - triggerFilter := fmt.Sprintf("tf_filter_%d", acctest.RandIntRange(10, 100)) triggerFavorite := "true" triggerEnableEventsFromForks := "true" + triggerFilter := fmt.Sprintf("tf_filter_%d", acctest.RandIntRange(10, 100)) + triggerCron := fmt.Sprintf("tf_cron_%d", acctest.RandIntRange(10, 100)) + triggerTimezone := fmt.Sprintf("tf_timezone_%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID, triggerType, triggerName, triggerEventListener, triggerMaxConcurrentRuns, triggerEnabled, triggerCron, triggerTimezone, triggerFilter, triggerFavorite, triggerEnableEventsFromForks), + Config: testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID, triggerType, triggerName, triggerEventListener, triggerMaxConcurrentRuns, triggerEnabled, triggerFavorite, triggerEnableEventsFromForks, triggerFilter, triggerCron, triggerTimezone), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "pipeline_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "trigger_id"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "type"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "name"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "href"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "event_listener"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "properties.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "worker.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "max_concurrent_runs"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "enabled"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "favorite"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "enable_events_from_forks"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "source.#"), - resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "secret.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "trigger_id"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "type"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "name"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "href"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "event_listener"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "properties.#"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "max_concurrent_runs"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "enabled"), + resource.TestCheckResourceAttrSet("data.ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "favorite"), ), }, }, @@ -93,7 +97,7 @@ func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfigBasic(triggerPipeline name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -112,8 +116,8 @@ func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfigBasic(triggerPipeline } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -123,27 +127,27 @@ func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfigBasic(triggerPipeline } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] type = "%s" name = "%s" event_listener = "%s" } - data "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { - pipeline_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + data "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { + pipeline_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id } `, rgName, tcName, triggerType, triggerName, triggerEventListener) } -func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID string, triggerType string, triggerName string, triggerEventListener string, triggerMaxConcurrentRuns string, triggerEnabled string, triggerCron string, triggerTimezone string, triggerFilter string, triggerFavorite string, triggerEnableEventsFromForks string) string { +func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID string, triggerType string, triggerName string, triggerEventListener string, triggerMaxConcurrentRuns string, triggerEnabled string, triggerFavorite string, triggerEnableEventsFromForks string, triggerFilter string, triggerCron string, triggerTimezone string) string { rgName := acc.CdResourceGroupName tcName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) return fmt.Sprintf(` @@ -160,7 +164,7 @@ func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID st name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -179,8 +183,8 @@ func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID st } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -190,24 +194,185 @@ func testAccCheckIBMCdTektonPipelineTriggerDataSourceConfig(triggerPipelineID st } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] type = "%s" name = "%s" event_listener = "%s" max_concurrent_runs = %s enabled = %s + favorite = %s } - data "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { - pipeline_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + data "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { + pipeline_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id } - `, rgName, tcName, triggerType, triggerName, triggerEventListener, triggerMaxConcurrentRuns, triggerEnabled) + `, rgName, tcName, triggerType, triggerName, triggerEventListener, triggerMaxConcurrentRuns, triggerEnabled, triggerFavorite) +} + +func TestDataSourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["value"] = "testString" + model["href"] = "testString" + model["enum"] = []string{"testString"} + model["type"] = "secure" + model["path"] = "testString" + model["locked"] = true + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.TriggerProperty) + model.Name = core.StringPtr("testString") + model.Value = core.StringPtr("testString") + model.Href = core.StringPtr("testString") + model.Enum = []string{"testString"} + model.Type = core.StringPtr("secure") + model.Path = core.StringPtr("testString") + model.Locked = core.BoolPtr(true) + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerWorkerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["type"] = "testString" + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Worker) + model.Name = core.StringPtr("testString") + model.Type = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerWorkerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.TriggerSource) + model.Type = core.StringPtr("testString") + model.Properties = triggerSourcePropertiesModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerTriggerSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["pattern"] = "testString" + model["blind_connection"] = true + model["hook_id"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Pattern = core.StringPtr("testString") + model.BlindConnection = core.BoolPtr(true) + model.HookID = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerToolToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerToolToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIBMCdTektonPipelineTriggerGenericSecretToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["type"] = "token_matches" + model["value"] = "testString" + model["source"] = "header" + model["key_name"] = "testString" + model["algorithm"] = "md4" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.GenericSecret) + model.Type = core.StringPtr("token_matches") + model.Value = core.StringPtr("testString") + model.Source = core.StringPtr("header") + model.KeyName = core.StringPtr("testString") + model.Algorithm = core.StringPtr("md4") + + result, err := cdtektonpipeline.DataSourceIBMCdTektonPipelineTriggerGenericSecretToMap(model) + assert.Nil(t, err) + checkResult(result) } diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline.go index 4d5b9abd67..8e7da3babc 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -27,30 +31,23 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { Importer: &schema.ResourceImporter{}, Schema: map[string]*schema.Schema{ - "next_build_number": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Description: "Specify the build number that will be used for the next pipeline run. Build numbers can be any positive whole number between 0 and 100000000000000.", - }, - "enable_notifications": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Flag to enable notifications for this pipeline. If enabled, the Tekton pipeline run events will be published to all the destinations specified by the Slack and Event Notifications integrations in the parent toolchain.", - }, - "enable_partial_cloning": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Flag to enable partial cloning for this pipeline. When partial clone is enabled, only the files contained within the paths specified in definition repositories are read and cloned, this means that symbolic links might not work.", - }, "worker": &schema.Schema{ Type: schema.TypeList, MaxItems: 1, Optional: true, - Description: "Specify the worker that is to be used to run the trigger, indicated by a worker object with only the worker ID. If not specified or set as `worker: { id: 'public' }`, the IBM Managed shared workers are used.", + Description: "Details of the worker used to run the pipeline.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the worker. Computed based on the worker ID.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the worker. Computed based on the worker ID.", + }, "id": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -59,6 +56,23 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { }, }, }, + "next_build_number": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: "The build number that will be used for the next pipeline run.", + }, + "enable_notifications": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Flag to enable notifications for this pipeline. If enabled, the Tekton pipeline run events will be published to all the destinations specified by the Slack and Event Notifications integrations in the parent toolchain. If omitted, this feature is disabled by default.", + }, + "enable_partial_cloning": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Flag to enable partial cloning for this pipeline. When partial clone is enabled, only the files contained within the paths specified in definition repositories are read and cloned, this means that symbolic links might not work. If omitted, this feature is disabled by default.", + }, "pipeline_id": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -84,6 +98,7 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { "id": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "ID.", }, }, @@ -97,12 +112,12 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { Schema: map[string]*schema.Schema{ "id": &schema.Schema{ Type: schema.TypeString, - Required: true, - Description: "UUID.", + Computed: true, + Description: "Universally Unique Identifier.", }, "crn": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, Description: "The CRN for the toolchain that contains the Tekton pipeline.", }, }, @@ -116,48 +131,47 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { Schema: map[string]*schema.Schema{ "source": &schema.Schema{ Type: schema.TypeList, - MinItems: 1, - MaxItems: 1, - Required: true, + Computed: true, Description: "Source repository containing the Tekton pipeline definition.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, Description: "The only supported source type is \"git\", indicating that the source is a git repository.", }, "properties": &schema.Schema{ Type: schema.TypeList, - MinItems: 1, - MaxItems: 1, - Required: true, + Computed: true, Description: "Properties of the source, which define the URL of the repository and a branch or tag.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "url": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "URL of the definition repository.", }, "branch": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "A branch from the repo, specify one of branch or tag only.", }, "tag": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "A tag from the repo, specify one of branch or tag only.", }, "path": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, Description: "The path to the definition's YAML files.", }, "tool": &schema.Schema{ Type: schema.TypeList, + Optional: true, Computed: true, Description: "Reference to the repository tool in the parent toolchain.", Elem: &schema.Resource{ @@ -178,6 +192,7 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { }, "href": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "API URL for interacting with the definition.", }, @@ -197,41 +212,46 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "Property name.", }, "value": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, DiffSuppressFunc: flex.SuppressPipelinePropertyRawSecret, Description: "Property value. Any string value is valid.", }, "href": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "API URL for interacting with the property.", }, "enum": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, Description: "Options for `single_select` property type. Only needed when using `single_select` property type.", Elem: &schema.Schema{Type: schema.TypeString}, }, "type": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "Property type.", }, "locked": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Computed: true, Description: "When true, this property cannot be overridden by a trigger property or at runtime. Attempting to override it will result in run requests being rejected. The default is false.", }, "path": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used.", }, }, @@ -255,72 +275,83 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { Schema: map[string]*schema.Schema{ "type": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, Description: "Trigger type.", }, "name": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, Description: "Trigger name.", }, "href": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "API URL for interacting with the trigger. Only included when fetching the list of pipeline triggers.", }, "event_listener": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, Description: "Event listener name. The name of the event listener to which the trigger is associated. The event listeners are defined in the definition repositories of the Tekton pipeline.", }, "id": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "The Trigger ID.", }, "properties": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, Description: "Optional trigger properties are used to override or supplement the pipeline properties when triggering a pipeline run.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "Property name.", }, "value": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, DiffSuppressFunc: flex.SuppressTriggerPropertyRawSecret, Description: "Property value. Any string value is valid.", }, "href": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "API URL for interacting with the trigger property.", }, "enum": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, Description: "Options for `single_select` property type. Only needed for `single_select` property type.", Elem: &schema.Schema{Type: schema.TypeString}, }, "type": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "Property type.", }, "path": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used.", }, "locked": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Computed: true, Description: "When true, this property cannot be overridden at runtime. Attempting to override it will result in run requests being rejected. The default is false.", }, }, @@ -329,13 +360,14 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { "tags": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, Description: "Optional trigger tags array.", Elem: &schema.Schema{Type: schema.TypeString}, }, "worker": &schema.Schema{ Type: schema.TypeList, - MaxItems: 1, Optional: true, + Computed: true, Description: "Details of the worker used to run the trigger.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -351,7 +383,7 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { }, "id": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, Description: "ID of the worker.", }, }, @@ -360,60 +392,61 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { "max_concurrent_runs": &schema.Schema{ Type: schema.TypeInt, Optional: true, + Computed: true, Description: "Defines the maximum number of concurrent runs for this trigger. If omitted then the concurrency limit is disabled for this trigger.", }, "enabled": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Default: true, + Computed: true, Description: "Flag to check if the trigger is enabled.", }, "favorite": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Default: false, + Computed: true, Description: "Mark the trigger as a favorite.", }, "enable_events_from_forks": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Default: false, + Computed: true, Description: "When enabled, pull request events from forks of the selected repository will trigger a pipeline run.", }, "source": &schema.Schema{ Type: schema.TypeList, - MaxItems: 1, Optional: true, + Computed: true, Description: "Source repository for a Git trigger. Only required for Git triggers. The referenced repository URL must match the URL of a repository tool integration in the parent toolchain. Obtain the list of integrations from the toolchain API https://cloud.ibm.com/apidocs/toolchain#list-tools.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, Description: "The only supported source type is \"git\", indicating that the source is a git repository.", }, "properties": &schema.Schema{ Type: schema.TypeList, - MinItems: 1, - MaxItems: 1, - Required: true, + Computed: true, Description: "Properties of the source, which define the URL of the repository and a branch or pattern.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "url": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "URL of the repository to which the trigger is listening.", }, "branch": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Name of a branch from the repo. Only one of branch, pattern, or filter should be specified.", }, "pattern": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "The pattern of Git branch or tag. You can specify a glob pattern such as '!test' or '*master' to match against multiple tags or branches in the repository.The glob pattern used must conform to Bash 4.3 specifications, see bash documentation for more info: https://www.gnu.org/software/bash/manual/bash.html#Pattern-Matching. Only one of branch, pattern, or filter should be specified.", }, "blind_connection": &schema.Schema{ @@ -449,6 +482,7 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { "events": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, DiffSuppressFunc: flex.SuppressTriggerEvents, Description: "Either 'events' or 'filter' is required specifically for Git triggers. Stores a list of events that a Git trigger listens to. Choose one or more from 'push', 'pull_request', and 'pull_request_closed'. If SCM repositories use the 'merge request' term, they correspond to the generic term i.e. 'pull request'.", Elem: &schema.Schema{Type: schema.TypeString}, @@ -456,49 +490,57 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { "filter": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Either 'events' or 'filter' can be used. Stores the CEL (Common Expression Language) expression value which is used for event filtering against the Git webhook payloads.", }, "cron": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Only needed for timer triggers. CRON expression that indicates when this trigger will activate. Maximum frequency is every 5 minutes. The string is based on UNIX crontab syntax: minute, hour, day of month, month, day of week. Example: The CRON expression 0 *_/2 * * * - translates to - every 2 hours.", }, "timezone": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Only used for timer triggers. Specify the timezone used for this timer trigger, which will ensure the CRON activates this trigger relative to the specified timezone. If no timezone is specified, the default timezone used is UTC. Valid timezones are those listed in the IANA timezone database, https://www.iana.org/time-zones.", }, "secret": &schema.Schema{ Type: schema.TypeList, - MaxItems: 1, Optional: true, + Computed: true, Description: "Only needed for Generic Webhook trigger type. The secret is used to start the Generic Webhook trigger.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Secret type.", }, "value": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, DiffSuppressFunc: flex.SuppressGenericWebhookRawSecret, Description: "Secret value, not needed if secret type is `internal_validation`.", }, "source": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Secret location, not needed if secret type is `internal_validation`.", }, "key_name": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Secret name, not needed if type is `internal_validation`.", }, "algorithm": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "Algorithm used for `digest_matches` secret type. Only needed for `digest_matches` secret type.", }, }, @@ -506,6 +548,7 @@ func ResourceIBMCdTektonPipeline() *schema.Resource { }, "webhook_url": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "Webhook URL that can be used to trigger pipeline runs.", }, @@ -556,7 +599,9 @@ func ResourceIBMCdTektonPipelineValidator() *validate.ResourceValidator { func resourceIBMCdTektonPipelineCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "create", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } createTektonPipelineOptions := &cdtektonpipelinev2.CreateTektonPipelineOptions{} @@ -564,16 +609,16 @@ func resourceIBMCdTektonPipelineCreate(context context.Context, d *schema.Resour if _, ok := d.GetOk("next_build_number"); ok { createTektonPipelineOptions.SetNextBuildNumber(int64(d.Get("next_build_number").(int))) } - if _, ok := d.GetOkExists("enable_notifications"); ok { + if _, ok := d.GetOk("enable_notifications"); ok { createTektonPipelineOptions.SetEnableNotifications(d.Get("enable_notifications").(bool)) } - if _, ok := d.GetOkExists("enable_partial_cloning"); ok { + if _, ok := d.GetOk("enable_partial_cloning"); ok { createTektonPipelineOptions.SetEnablePartialCloning(d.Get("enable_partial_cloning").(bool)) } if _, ok := d.GetOk("worker"); ok { - workerModel, err := resourceIBMCdTektonPipelineMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) + workerModel, err := ResourceIBMCdTektonPipelineMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "create", "parse-worker").GetDiag() } createTektonPipelineOptions.SetWorker(workerModel) } @@ -581,10 +626,11 @@ func resourceIBMCdTektonPipelineCreate(context context.Context, d *schema.Resour if _, ok := d.GetOk("pipeline_id"); ok { createTektonPipelineOptions.SetID(d.Get("pipeline_id").(string)) } - tektonPipeline, response, err := cdTektonPipelineClient.CreateTektonPipelineWithContext(context, createTektonPipelineOptions) + tektonPipeline, _, err := cdTektonPipelineClient.CreateTektonPipelineWithContext(context, createTektonPipelineOptions) if err != nil { - log.Printf("[DEBUG] CreateTektonPipelineWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("CreateTektonPipelineWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateTektonPipelineWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(*tektonPipeline.ID) @@ -595,7 +641,9 @@ func resourceIBMCdTektonPipelineCreate(context context.Context, d *schema.Resour func resourceIBMCdTektonPipelineRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineOptions := &cdtektonpipelinev2.GetTektonPipelineOptions{} @@ -608,109 +656,128 @@ func resourceIBMCdTektonPipelineRead(context context.Context, d *schema.Resource d.SetId("") return nil } - log.Printf("[DEBUG] GetTektonPipelineWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } + if !core.IsNil(tektonPipeline.Worker) { + workerMap, err := ResourceIBMCdTektonPipelineWorkerToMap(tektonPipeline.Worker) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "worker-to-map").GetDiag() + } + if err = d.Set("worker", []map[string]interface{}{workerMap}); err != nil { + err = fmt.Errorf("Error setting worker: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-worker").GetDiag() + } + } if !core.IsNil(tektonPipeline.NextBuildNumber) { if err = d.Set("next_build_number", flex.IntValue(tektonPipeline.NextBuildNumber)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting next_build_number: %s", err)) + err = fmt.Errorf("Error setting next_build_number: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-next_build_number").GetDiag() } } if !core.IsNil(tektonPipeline.EnableNotifications) { if err = d.Set("enable_notifications", tektonPipeline.EnableNotifications); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enable_notifications: %s", err)) + err = fmt.Errorf("Error setting enable_notifications: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-enable_notifications").GetDiag() } } if !core.IsNil(tektonPipeline.EnablePartialCloning) { if err = d.Set("enable_partial_cloning", tektonPipeline.EnablePartialCloning); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enable_partial_cloning: %s", err)) - } - } - if !core.IsNil(tektonPipeline.Worker) { - workerMap, err := resourceIBMCdTektonPipelineWorkerIdentityToMap(tektonPipeline.Worker) - if err != nil { - return diag.FromErr(err) - } - if err = d.Set("worker", []map[string]interface{}{workerMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting worker: %s", err)) + err = fmt.Errorf("Error setting enable_partial_cloning: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-enable_partial_cloning").GetDiag() } } if err = d.Set("pipeline_id", tektonPipeline.ID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting pipeline_id: %s", err)) + err = fmt.Errorf("Error setting pipeline_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-pipeline_id").GetDiag() } if err = d.Set("name", tektonPipeline.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + err = fmt.Errorf("Error setting name: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-name").GetDiag() } if err = d.Set("status", tektonPipeline.Status); err != nil { - return diag.FromErr(fmt.Errorf("Error setting status: %s", err)) + err = fmt.Errorf("Error setting status: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-status").GetDiag() } - resourceGroupMap, err := resourceIBMCdTektonPipelineResourceGroupReferenceToMap(tektonPipeline.ResourceGroup) + resourceGroupMap, err := ResourceIBMCdTektonPipelineResourceGroupReferenceToMap(tektonPipeline.ResourceGroup) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "resource_group-to-map").GetDiag() } if err = d.Set("resource_group", []map[string]interface{}{resourceGroupMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting resource_group: %s", err)) + err = fmt.Errorf("Error setting resource_group: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-resource_group").GetDiag() } - toolchainMap, err := resourceIBMCdTektonPipelineToolchainReferenceToMap(tektonPipeline.Toolchain) + toolchainMap, err := ResourceIBMCdTektonPipelineToolchainReferenceToMap(tektonPipeline.Toolchain) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "toolchain-to-map").GetDiag() } if err = d.Set("toolchain", []map[string]interface{}{toolchainMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting toolchain: %s", err)) + err = fmt.Errorf("Error setting toolchain: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-toolchain").GetDiag() } definitions := []map[string]interface{}{} for _, definitionsItem := range tektonPipeline.Definitions { - definitionsItemMap, err := resourceIBMCdTektonPipelineDefinitionToMap(&definitionsItem) + definitionsItemMap, err := ResourceIBMCdTektonPipelineDefinitionToMap(&definitionsItem) // #nosec G601 if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "definitions-to-map").GetDiag() } definitions = append(definitions, definitionsItemMap) } if err = d.Set("definitions", definitions); err != nil { - return diag.FromErr(fmt.Errorf("Error setting definitions: %s", err)) + err = fmt.Errorf("Error setting definitions: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-definitions").GetDiag() } properties := []map[string]interface{}{} for _, propertiesItem := range tektonPipeline.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelinePropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelinePropertyToMap(&propertiesItem) // #nosec G601 if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "properties-to-map").GetDiag() } properties = append(properties, propertiesItemMap) } if err = d.Set("properties", properties); err != nil { - return diag.FromErr(fmt.Errorf("Error setting properties: %s", err)) + err = fmt.Errorf("Error setting properties: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-properties").GetDiag() } if err = d.Set("updated_at", flex.DateTimeToString(tektonPipeline.UpdatedAt)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting updated_at: %s", err)) + err = fmt.Errorf("Error setting updated_at: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-updated_at").GetDiag() } if err = d.Set("created_at", flex.DateTimeToString(tektonPipeline.CreatedAt)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + err = fmt.Errorf("Error setting created_at: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-created_at").GetDiag() } triggers := []map[string]interface{}{} for _, triggersItem := range tektonPipeline.Triggers { - triggersItemMap, err := resourceIBMCdTektonPipelineTriggerToMap(triggersItem) + triggersItemMap, err := ResourceIBMCdTektonPipelineTriggerToMap(triggersItem) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "triggers-to-map").GetDiag() } triggers = append(triggers, triggersItemMap) } if err = d.Set("triggers", triggers); err != nil { - return diag.FromErr(fmt.Errorf("Error setting triggers: %s", err)) + err = fmt.Errorf("Error setting triggers: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-triggers").GetDiag() } if err = d.Set("runs_url", tektonPipeline.RunsURL); err != nil { - return diag.FromErr(fmt.Errorf("Error setting runs_url: %s", err)) + err = fmt.Errorf("Error setting runs_url: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-runs_url").GetDiag() } if !core.IsNil(tektonPipeline.Href) { if err = d.Set("href", tektonPipeline.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + err = fmt.Errorf("Error setting href: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-href").GetDiag() } } if err = d.Set("build_number", flex.IntValue(tektonPipeline.BuildNumber)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting build_number: %s", err)) + err = fmt.Errorf("Error setting build_number: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-build_number").GetDiag() } if err = d.Set("enabled", tektonPipeline.Enabled); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enabled: %s", err)) + err = fmt.Errorf("Error setting enabled: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "read", "set-enabled").GetDiag() } return nil @@ -719,7 +786,9 @@ func resourceIBMCdTektonPipelineRead(context context.Context, d *schema.Resource func resourceIBMCdTektonPipelineUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "update", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } updateTektonPipelineOptions := &cdtektonpipelinev2.UpdateTektonPipelineOptions{} @@ -745,20 +814,25 @@ func resourceIBMCdTektonPipelineUpdate(context context.Context, d *schema.Resour hasChange = true } if d.HasChange("worker") { - worker, err := resourceIBMCdTektonPipelineMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) + worker, err := ResourceIBMCdTektonPipelineMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "update", "parse-worker").GetDiag() } patchVals.Worker = worker hasChange = true } if hasChange { - updateTektonPipelineOptions.TektonPipelinePatch, _ = patchVals.AsPatch() - _, response, err := cdTektonPipelineClient.UpdateTektonPipelineWithContext(context, updateTektonPipelineOptions) + // Fields with `nil` values are omitted from the generic map, + // so we need to re-add them to support removing arguments + // in merge-patch operations sent to the service. + updateTektonPipelineOptions.TektonPipelinePatch = ResourceIBMCdTektonPipelineTektonPipelinePatchAsPatch(patchVals, d) + + _, _, err = cdTektonPipelineClient.UpdateTektonPipelineWithContext(context, updateTektonPipelineOptions) if err != nil { - log.Printf("[DEBUG] UpdateTektonPipelineWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("UpdateTektonPipelineWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("UpdateTektonPipelineWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } } @@ -768,17 +842,20 @@ func resourceIBMCdTektonPipelineUpdate(context context.Context, d *schema.Resour func resourceIBMCdTektonPipelineDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline", "delete", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } deleteTektonPipelineOptions := &cdtektonpipelinev2.DeleteTektonPipelineOptions{} deleteTektonPipelineOptions.SetID(d.Id()) - response, err := cdTektonPipelineClient.DeleteTektonPipelineWithContext(context, deleteTektonPipelineOptions) + _, err = cdTektonPipelineClient.DeleteTektonPipelineWithContext(context, deleteTektonPipelineOptions) if err != nil { - log.Printf("[DEBUG] DeleteTektonPipelineWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("DeleteTektonPipelineWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteTektonPipelineWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId("") @@ -786,51 +863,57 @@ func resourceIBMCdTektonPipelineDelete(context context.Context, d *schema.Resour return nil } -func resourceIBMCdTektonPipelineMapToWorkerIdentity(modelMap map[string]interface{}) (*cdtektonpipelinev2.WorkerIdentity, error) { +func ResourceIBMCdTektonPipelineMapToWorkerIdentity(modelMap map[string]interface{}) (*cdtektonpipelinev2.WorkerIdentity, error) { model := &cdtektonpipelinev2.WorkerIdentity{} model.ID = core.StringPtr(modelMap["id"].(string)) return model, nil } -func resourceIBMCdTektonPipelineWorkerIdentityToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.Type != nil { + modelMap["type"] = *model.Type + } + modelMap["id"] = *model.ID return modelMap, nil } -func resourceIBMCdTektonPipelineResourceGroupReferenceToMap(model *cdtektonpipelinev2.ResourceGroupReference) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineResourceGroupReferenceToMap(model *cdtektonpipelinev2.ResourceGroupReference) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.ID != nil { - modelMap["id"] = model.ID + modelMap["id"] = *model.ID } return modelMap, nil } -func resourceIBMCdTektonPipelineToolchainReferenceToMap(model *cdtektonpipelinev2.ToolchainReference) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineToolchainReferenceToMap(model *cdtektonpipelinev2.ToolchainReference) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID - modelMap["crn"] = model.CRN + modelMap["id"] = *model.ID + modelMap["crn"] = *model.CRN return modelMap, nil } -func resourceIBMCdTektonPipelineDefinitionToMap(model *cdtektonpipelinev2.Definition) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineDefinitionToMap(model *cdtektonpipelinev2.Definition) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - sourceMap, err := resourceIBMCdTektonPipelineDefinitionSourceToMap(model.Source) + sourceMap, err := ResourceIBMCdTektonPipelineDefinitionSourceToMap(model.Source) if err != nil { return modelMap, err } modelMap["source"] = []map[string]interface{}{sourceMap} if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func resourceIBMCdTektonPipelineDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := resourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := ResourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -838,18 +921,18 @@ func resourceIBMCdTektonPipelineDefinitionSourceToMap(model *cdtektonpipelinev2. return modelMap, nil } -func resourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Tag != nil { - modelMap["tag"] = model.Tag + modelMap["tag"] = *model.Tag } - modelMap["path"] = model.Path + modelMap["path"] = *model.Path if model.Tool != nil { - toolMap, err := resourceIBMCdTektonPipelineToolToMap(model.Tool) + toolMap, err := ResourceIBMCdTektonPipelineToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -858,65 +941,65 @@ func resourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model *cdtektonp return modelMap, nil } -func resourceIBMCdTektonPipelineToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } -func resourceIBMCdTektonPipelinePropertyToMap(model *cdtektonpipelinev2.Property) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelinePropertyToMap(model *cdtektonpipelinev2.Property) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + modelMap["name"] = *model.Name if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.Enum != nil { modelMap["enum"] = model.Enum } - modelMap["type"] = model.Type + modelMap["type"] = *model.Type if model.Locked != nil { - modelMap["locked"] = model.Locked + modelMap["locked"] = *model.Locked } if model.Path != nil { - modelMap["path"] = model.Path + modelMap["path"] = *model.Path } return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerIntf) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerIntf) (map[string]interface{}, error) { if _, ok := model.(*cdtektonpipelinev2.TriggerManualTrigger); ok { - return resourceIBMCdTektonPipelineTriggerManualTriggerToMap(model.(*cdtektonpipelinev2.TriggerManualTrigger)) + return ResourceIBMCdTektonPipelineTriggerManualTriggerToMap(model.(*cdtektonpipelinev2.TriggerManualTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.TriggerScmTrigger); ok { - return resourceIBMCdTektonPipelineTriggerScmTriggerToMap(model.(*cdtektonpipelinev2.TriggerScmTrigger)) + return ResourceIBMCdTektonPipelineTriggerScmTriggerToMap(model.(*cdtektonpipelinev2.TriggerScmTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.TriggerTimerTrigger); ok { - return resourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model.(*cdtektonpipelinev2.TriggerTimerTrigger)) + return ResourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model.(*cdtektonpipelinev2.TriggerTimerTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.TriggerGenericTrigger); ok { - return resourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model.(*cdtektonpipelinev2.TriggerGenericTrigger)) + return ResourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model.(*cdtektonpipelinev2.TriggerGenericTrigger)) } else if _, ok := model.(*cdtektonpipelinev2.Trigger); ok { modelMap := make(map[string]interface{}) model := model.(*cdtektonpipelinev2.Trigger) if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } if model.Name != nil { - modelMap["name"] = model.Name + modelMap["name"] = *model.Name } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.EventListener != nil { - modelMap["event_listener"] = model.EventListener + modelMap["event_listener"] = *model.EventListener } if model.ID != nil { - modelMap["id"] = model.ID + modelMap["id"] = *model.ID } if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -928,7 +1011,7 @@ func resourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerInt modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := resourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := ResourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -938,16 +1021,16 @@ func resourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerInt modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } if model.Enabled != nil { - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled } if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.EnableEventsFromForks != nil { - modelMap["enable_events_from_forks"] = model.EnableEventsFromForks + modelMap["enable_events_from_forks"] = *model.EnableEventsFromForks } if model.Source != nil { - sourceMap, err := resourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) + sourceMap, err := ResourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) if err != nil { return modelMap, err } @@ -957,23 +1040,23 @@ func resourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerInt modelMap["events"] = model.Events } if model.Filter != nil { - modelMap["filter"] = model.Filter + modelMap["filter"] = *model.Filter } if model.Cron != nil { - modelMap["cron"] = model.Cron + modelMap["cron"] = *model.Cron } if model.Timezone != nil { - modelMap["timezone"] = model.Timezone + modelMap["timezone"] = *model.Timezone } if model.Secret != nil { - secretMap, err := resourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) + secretMap, err := ResourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) if err != nil { return modelMap, err } modelMap["secret"] = []map[string]interface{}{secretMap} } if model.WebhookURL != nil { - modelMap["webhook_url"] = model.WebhookURL + modelMap["webhook_url"] = *model.WebhookURL } return modelMap, nil } else { @@ -981,44 +1064,32 @@ func resourceIBMCdTektonPipelineTriggerToMap(model cdtektonpipelinev2.TriggerInt } } -func resourceIBMCdTektonPipelineTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + modelMap["name"] = *model.Name if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.Enum != nil { modelMap["enum"] = model.Enum } - modelMap["type"] = model.Type + modelMap["type"] = *model.Type if model.Path != nil { - modelMap["path"] = model.Path + modelMap["path"] = *model.Path } if model.Locked != nil { - modelMap["locked"] = model.Locked + modelMap["locked"] = *model.Locked } return modelMap, nil } -func resourceIBMCdTektonPipelineWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.Name != nil { - modelMap["name"] = model.Name - } - if model.Type != nil { - modelMap["type"] = model.Type - } - modelMap["id"] = model.ID - return modelMap, nil -} - -func resourceIBMCdTektonPipelineTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { - modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := resourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := ResourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -1026,20 +1097,20 @@ func resourceIBMCdTektonPipelineTriggerSourceToMap(model *cdtektonpipelinev2.Tri return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Pattern != nil { - modelMap["pattern"] = model.Pattern + modelMap["pattern"] = *model.Pattern } - modelMap["blind_connection"] = model.BlindConnection + modelMap["blind_connection"] = *model.BlindConnection if model.HookID != nil { - modelMap["hook_id"] = model.HookID + modelMap["hook_id"] = *model.HookID } - toolMap, err := resourceIBMCdTektonPipelineToolToMap(model.Tool) + toolMap, err := ResourceIBMCdTektonPipelineToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -1047,39 +1118,39 @@ func resourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model *cdtektonpipe return modelMap, nil } -func resourceIBMCdTektonPipelineGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type } if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Source != nil { - modelMap["source"] = model.Source + modelMap["source"] = *model.Source } if model.KeyName != nil { - modelMap["key_name"] = model.KeyName + modelMap["key_name"] = *model.KeyName } if model.Algorithm != nil { - modelMap["algorithm"] = model.Algorithm + modelMap["algorithm"] = *model.Algorithm } return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipelinev2.TriggerManualTrigger) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipelinev2.TriggerManualTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -1091,7 +1162,7 @@ func resourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipelin modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := resourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := ResourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1100,26 +1171,26 @@ func resourceIBMCdTektonPipelineTriggerManualTriggerToMap(model *cdtektonpipelin if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2.TriggerScmTrigger) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2.TriggerScmTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -1131,7 +1202,7 @@ func resourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2 modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := resourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := ResourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1140,15 +1211,15 @@ func resourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2 if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.EnableEventsFromForks != nil { - modelMap["enable_events_from_forks"] = model.EnableEventsFromForks + modelMap["enable_events_from_forks"] = *model.EnableEventsFromForks } if model.Source != nil { - sourceMap, err := resourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) + sourceMap, err := ResourceIBMCdTektonPipelineTriggerSourceToMap(model.Source) if err != nil { return modelMap, err } @@ -1158,24 +1229,24 @@ func resourceIBMCdTektonPipelineTriggerScmTriggerToMap(model *cdtektonpipelinev2 modelMap["events"] = model.Events } if model.Filter != nil { - modelMap["filter"] = model.Filter + modelMap["filter"] = *model.Filter } return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipelinev2.TriggerTimerTrigger) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipelinev2.TriggerTimerTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -1187,7 +1258,7 @@ func resourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipeline modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := resourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := ResourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1196,32 +1267,32 @@ func resourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model *cdtektonpipeline if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.Cron != nil { - modelMap["cron"] = model.Cron + modelMap["cron"] = *model.Cron } if model.Timezone != nil { - modelMap["timezone"] = model.Timezone + modelMap["timezone"] = *model.Timezone } return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipelinev2.TriggerGenericTrigger) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipelinev2.TriggerGenericTrigger) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - modelMap["name"] = model.Name + modelMap["type"] = *model.Type + modelMap["name"] = *model.Name if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } - modelMap["event_listener"] = model.EventListener - modelMap["id"] = model.ID + modelMap["event_listener"] = *model.EventListener + modelMap["id"] = *model.ID if model.Properties != nil { properties := []map[string]interface{}{} for _, propertiesItem := range model.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelineTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { return modelMap, err } @@ -1233,7 +1304,7 @@ func resourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipeli modelMap["tags"] = model.Tags } if model.Worker != nil { - workerMap, err := resourceIBMCdTektonPipelineWorkerToMap(model.Worker) + workerMap, err := ResourceIBMCdTektonPipelineWorkerToMap(model.Worker) if err != nil { return modelMap, err } @@ -1242,22 +1313,46 @@ func resourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model *cdtektonpipeli if model.MaxConcurrentRuns != nil { modelMap["max_concurrent_runs"] = flex.IntValue(model.MaxConcurrentRuns) } - modelMap["enabled"] = model.Enabled + modelMap["enabled"] = *model.Enabled if model.Favorite != nil { - modelMap["favorite"] = model.Favorite + modelMap["favorite"] = *model.Favorite } if model.Secret != nil { - secretMap, err := resourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) + secretMap, err := ResourceIBMCdTektonPipelineGenericSecretToMap(model.Secret) if err != nil { return modelMap, err } modelMap["secret"] = []map[string]interface{}{secretMap} } if model.WebhookURL != nil { - modelMap["webhook_url"] = model.WebhookURL + modelMap["webhook_url"] = *model.WebhookURL } if model.Filter != nil { - modelMap["filter"] = model.Filter + modelMap["filter"] = *model.Filter } return modelMap, nil } + +func ResourceIBMCdTektonPipelineTektonPipelinePatchAsPatch(patchVals *cdtektonpipelinev2.TektonPipelinePatch, d *schema.ResourceData) map[string]interface{} { + patch, _ := patchVals.AsPatch() + var path string + + path = "next_build_number" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["next_build_number"] = nil + } + path = "enable_notifications" + if _, exists := d.GetOkExists(path); d.HasChange(path) && !exists { + patch["enable_notifications"] = nil + } + path = "enable_partial_cloning" + if _, exists := d.GetOkExists(path); d.HasChange(path) && !exists { + patch["enable_partial_cloning"] = nil + } + path = "worker" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["worker"] = nil + } + + return patch +} diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition.go index 516c935aa1..c1237d1d80 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -78,6 +82,7 @@ func ResourceIBMCdTektonPipelineDefinition() *schema.Resource { }, "tool": &schema.Schema{ Type: schema.TypeList, + Optional: true, Computed: true, Description: "Reference to the repository tool in the parent toolchain.", Elem: &schema.Resource{ @@ -131,22 +136,25 @@ func ResourceIBMCdTektonPipelineDefinitionValidator() *validate.ResourceValidato func resourceIBMCdTektonPipelineDefinitionCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "create", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } createTektonPipelineDefinitionOptions := &cdtektonpipelinev2.CreateTektonPipelineDefinitionOptions{} createTektonPipelineDefinitionOptions.SetPipelineID(d.Get("pipeline_id").(string)) - sourceModel, err := resourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(d.Get("source.0").(map[string]interface{})) + sourceModel, err := ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(d.Get("source.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "create", "parse-source").GetDiag() } createTektonPipelineDefinitionOptions.SetSource(sourceModel) - definition, response, err := cdTektonPipelineClient.CreateTektonPipelineDefinitionWithContext(context, createTektonPipelineDefinitionOptions) + definition, _, err := cdTektonPipelineClient.CreateTektonPipelineDefinitionWithContext(context, createTektonPipelineDefinitionOptions) if err != nil { - log.Printf("[DEBUG] CreateTektonPipelineDefinitionWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("CreateTektonPipelineDefinitionWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateTektonPipelineDefinitionWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_definition", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(fmt.Sprintf("%s/%s", *createTektonPipelineDefinitionOptions.PipelineID, *definition.ID)) @@ -157,14 +165,16 @@ func resourceIBMCdTektonPipelineDefinitionCreate(context context.Context, d *sch func resourceIBMCdTektonPipelineDefinitionRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineDefinitionOptions := &cdtektonpipelinev2.GetTektonPipelineDefinitionOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "read", "sep-id-parts").GetDiag() } getTektonPipelineDefinitionOptions.SetPipelineID(parts[0]) @@ -176,27 +186,28 @@ func resourceIBMCdTektonPipelineDefinitionRead(context context.Context, d *schem d.SetId("") return nil } - log.Printf("[DEBUG] GetTektonPipelineDefinitionWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineDefinitionWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineDefinitionWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_definition", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } - if err = d.Set("pipeline_id", getTektonPipelineDefinitionOptions.PipelineID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting pipeline_id: %s", err)) - } - sourceMap, err := resourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(definition.Source) + sourceMap, err := ResourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(definition.Source) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "read", "source-to-map").GetDiag() } if err = d.Set("source", []map[string]interface{}{sourceMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting source: %s", err)) + err = fmt.Errorf("Error setting source: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "read", "set-source").GetDiag() } if !core.IsNil(definition.Href) { if err = d.Set("href", definition.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + err = fmt.Errorf("Error setting href: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "read", "set-href").GetDiag() } } if err = d.Set("definition_id", definition.ID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting definition_id: %s", err)) + err = fmt.Errorf("Error setting definition_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "read", "set-definition_id").GetDiag() } return nil @@ -205,14 +216,16 @@ func resourceIBMCdTektonPipelineDefinitionRead(context context.Context, d *schem func resourceIBMCdTektonPipelineDefinitionUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "update", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } replaceTektonPipelineDefinitionOptions := &cdtektonpipelinev2.ReplaceTektonPipelineDefinitionOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "update", "sep-id-parts").GetDiag() } replaceTektonPipelineDefinitionOptions.SetPipelineID(parts[0]) @@ -221,23 +234,25 @@ func resourceIBMCdTektonPipelineDefinitionUpdate(context context.Context, d *sch hasChange := false if d.HasChange("pipeline_id") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "pipeline_id")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "pipeline_id") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_definition", "update", "pipeline_id-forces-new").GetDiag() } if d.HasChange("source") { - source, err := resourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(d.Get("source.0").(map[string]interface{})) + source, err := ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(d.Get("source.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "update", "parse-source").GetDiag() } replaceTektonPipelineDefinitionOptions.SetSource(source) hasChange = true } if hasChange { - _, response, err := cdTektonPipelineClient.ReplaceTektonPipelineDefinitionWithContext(context, replaceTektonPipelineDefinitionOptions) + _, _, err = cdTektonPipelineClient.ReplaceTektonPipelineDefinitionWithContext(context, replaceTektonPipelineDefinitionOptions) if err != nil { - log.Printf("[DEBUG] ReplaceTektonPipelineDefinitionWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("ReplaceTektonPipelineDefinitionWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("ReplaceTektonPipelineDefinitionWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_definition", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } } @@ -247,23 +262,26 @@ func resourceIBMCdTektonPipelineDefinitionUpdate(context context.Context, d *sch func resourceIBMCdTektonPipelineDefinitionDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "delete", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } deleteTektonPipelineDefinitionOptions := &cdtektonpipelinev2.DeleteTektonPipelineDefinitionOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_definition", "delete", "sep-id-parts").GetDiag() } deleteTektonPipelineDefinitionOptions.SetPipelineID(parts[0]) deleteTektonPipelineDefinitionOptions.SetDefinitionID(parts[1]) - response, err := cdTektonPipelineClient.DeleteTektonPipelineDefinitionWithContext(context, deleteTektonPipelineDefinitionOptions) + _, err = cdTektonPipelineClient.DeleteTektonPipelineDefinitionWithContext(context, deleteTektonPipelineDefinitionOptions) if err != nil { - log.Printf("[DEBUG] DeleteTektonPipelineDefinitionWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("DeleteTektonPipelineDefinitionWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteTektonPipelineDefinitionWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_definition", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId("") @@ -271,10 +289,10 @@ func resourceIBMCdTektonPipelineDefinitionDelete(context context.Context, d *sch return nil } -func resourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(modelMap map[string]interface{}) (*cdtektonpipelinev2.DefinitionSource, error) { +func ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(modelMap map[string]interface{}) (*cdtektonpipelinev2.DefinitionSource, error) { model := &cdtektonpipelinev2.DefinitionSource{} model.Type = core.StringPtr(modelMap["type"].(string)) - PropertiesModel, err := resourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(modelMap["properties"].([]interface{})[0].(map[string]interface{})) + PropertiesModel, err := ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(modelMap["properties"].([]interface{})[0].(map[string]interface{})) if err != nil { return model, err } @@ -282,7 +300,7 @@ func resourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(modelMap map[str return model, nil } -func resourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(modelMap map[string]interface{}) (*cdtektonpipelinev2.DefinitionSourceProperties, error) { +func ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(modelMap map[string]interface{}) (*cdtektonpipelinev2.DefinitionSourceProperties, error) { model := &cdtektonpipelinev2.DefinitionSourceProperties{} model.URL = core.StringPtr(modelMap["url"].(string)) if modelMap["branch"] != nil && modelMap["branch"].(string) != "" { @@ -293,7 +311,7 @@ func resourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(modelM } model.Path = core.StringPtr(modelMap["path"].(string)) if modelMap["tool"] != nil && len(modelMap["tool"].([]interface{})) > 0 { - ToolModel, err := resourceIBMCdTektonPipelineDefinitionMapToTool(modelMap["tool"].([]interface{})[0].(map[string]interface{})) + ToolModel, err := ResourceIBMCdTektonPipelineDefinitionMapToTool(modelMap["tool"].([]interface{})[0].(map[string]interface{})) if err != nil { return model, err } @@ -302,16 +320,16 @@ func resourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(modelM return model, nil } -func resourceIBMCdTektonPipelineDefinitionMapToTool(modelMap map[string]interface{}) (*cdtektonpipelinev2.Tool, error) { +func ResourceIBMCdTektonPipelineDefinitionMapToTool(modelMap map[string]interface{}) (*cdtektonpipelinev2.Tool, error) { model := &cdtektonpipelinev2.Tool{} model.ID = core.StringPtr(modelMap["id"].(string)) return model, nil } -func resourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model *cdtektonpipelinev2.DefinitionSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := resourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model.Properties) + modelMap["type"] = *model.Type + propertiesMap, err := ResourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model.Properties) if err != nil { return modelMap, err } @@ -319,18 +337,18 @@ func resourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model *cdtektonp return modelMap, nil } -func resourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model *cdtektonpipelinev2.DefinitionSourceProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL + modelMap["url"] = *model.URL if model.Branch != nil { - modelMap["branch"] = model.Branch + modelMap["branch"] = *model.Branch } if model.Tag != nil { - modelMap["tag"] = model.Tag + modelMap["tag"] = *model.Tag } - modelMap["path"] = model.Path + modelMap["path"] = *model.Path if model.Tool != nil { - toolMap, err := resourceIBMCdTektonPipelineDefinitionToolToMap(model.Tool) + toolMap, err := ResourceIBMCdTektonPipelineDefinitionToolToMap(model.Tool) if err != nil { return modelMap, err } @@ -339,8 +357,8 @@ func resourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model return modelMap, nil } -func resourceIBMCdTektonPipelineDefinitionToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineDefinitionToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + modelMap["id"] = *model.ID return modelMap, nil } diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition_test.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition_test.go index ea4ffb111f..dfbf008421 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition_test.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_definition_test.go @@ -14,7 +14,10 @@ import ( acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cdtektonpipeline" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" ) func TestAccIBMCdTektonPipelineDefinitionBasic(t *testing.T) { @@ -28,17 +31,18 @@ func TestAccIBMCdTektonPipelineDefinitionBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineDefinitionConfigBasic(""), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineDefinitionExists("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", conf), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "definition_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", "source.#"), + testAccCheckIBMCdTektonPipelineDefinitionExists("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", conf), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "definition_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", "source.#"), ), }, resource.TestStep{ - ResourceName: "ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"pipeline_id"}, }, }, }) @@ -61,7 +65,7 @@ func testAccCheckIBMCdTektonPipelineDefinitionConfigBasic(pipelineID string) str name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -80,8 +84,8 @@ func testAccCheckIBMCdTektonPipelineDefinitionConfigBasic(pipelineID string) str } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -91,7 +95,7 @@ func testAccCheckIBMCdTektonPipelineDefinitionConfigBasic(pipelineID string) str } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } `, rgName, tcName) @@ -162,3 +166,171 @@ func testAccCheckIBMCdTektonPipelineDefinitionDestroy(s *terraform.State) error return nil } + +func TestResourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{definitionSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.DefinitionSource) + model.Type = core.StringPtr("testString") + model.Properties = definitionSourcePropertiesModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionDefinitionSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["tag"] = "testString" + model["path"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.DefinitionSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Tag = core.StringPtr("testString") + model.Path = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionDefinitionSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionToolToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionToolToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.DefinitionSource) { + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.DefinitionSource) + model.Type = core.StringPtr("testString") + model.Properties = definitionSourcePropertiesModel + + assert.Equal(t, result, model) + } + + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []interface{}{definitionSourcePropertiesModel} + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSource(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.DefinitionSourceProperties) { + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.DefinitionSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Tag = core.StringPtr("testString") + model.Path = core.StringPtr("testString") + model.Tool = toolModel + + assert.Equal(t, result, model) + } + + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["tag"] = "testString" + model["path"] = "testString" + model["tool"] = []interface{}{toolModel} + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionMapToDefinitionSourceProperties(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionMapToTool(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.Tool) { + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["id"] = "testString" + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionMapToTool(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property.go index dfddc9844f..7b1fb15eeb 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -41,13 +45,6 @@ func ResourceIBMCdTektonPipelineProperty() *schema.Resource { ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_property", "name"), Description: "Property name.", }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_property", "type"), - Description: "Property type.", - }, "value": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -61,6 +58,13 @@ func ResourceIBMCdTektonPipelineProperty() *schema.Resource { Description: "Options for `single_select` property type. Only needed when using `single_select` property type.", Elem: &schema.Schema{Type: schema.TypeString}, }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_property", "type"), + Description: "Property type.", + }, "locked": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -71,7 +75,7 @@ func ResourceIBMCdTektonPipelineProperty() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_property", "path"), - Description: "A dot notation path for `integration` type properties only, to select a value from the tool integration. If left blank the full tool integration data will be used.", + Description: "A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used.", }, "href": &schema.Schema{ Type: schema.TypeString, @@ -103,13 +107,6 @@ func ResourceIBMCdTektonPipelinePropertyValidator() *validate.ResourceValidator MinValueLength: 1, MaxValueLength: 253, }, - validate.ValidateSchema{ - Identifier: "type", - ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, - Type: validate.TypeString, - Required: true, - AllowedValues: "appconfig, integration, secure, single_select, text", - }, validate.ValidateSchema{ Identifier: "value", ValidateFunctionIdentifier: validate.ValidateRegexpLen, @@ -119,6 +116,13 @@ func ResourceIBMCdTektonPipelinePropertyValidator() *validate.ResourceValidator MinValueLength: 0, MaxValueLength: 4096, }, + validate.ValidateSchema{ + Identifier: "type", + ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, + Type: validate.TypeString, + Required: true, + AllowedValues: "appconfig, integration, secure, single_select, text", + }, validate.ValidateSchema{ Identifier: "path", ValidateFunctionIdentifier: validate.ValidateRegexpLen, @@ -137,7 +141,9 @@ func ResourceIBMCdTektonPipelinePropertyValidator() *validate.ResourceValidator func resourceIBMCdTektonPipelinePropertyCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "create", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } createTektonPipelinePropertiesOptions := &cdtektonpipelinev2.CreateTektonPipelinePropertiesOptions{} @@ -163,10 +169,11 @@ func resourceIBMCdTektonPipelinePropertyCreate(context context.Context, d *schem createTektonPipelinePropertiesOptions.SetPath(d.Get("path").(string)) } - property, response, err := cdTektonPipelineClient.CreateTektonPipelinePropertiesWithContext(context, createTektonPipelinePropertiesOptions) + property, _, err := cdTektonPipelineClient.CreateTektonPipelinePropertiesWithContext(context, createTektonPipelinePropertiesOptions) if err != nil { - log.Printf("[DEBUG] CreateTektonPipelinePropertiesWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("CreateTektonPipelinePropertiesWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateTektonPipelinePropertiesWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_property", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(fmt.Sprintf("%s/%s", *createTektonPipelinePropertiesOptions.PipelineID, *property.Name)) @@ -177,14 +184,16 @@ func resourceIBMCdTektonPipelinePropertyCreate(context context.Context, d *schem func resourceIBMCdTektonPipelinePropertyRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelinePropertyOptions := &cdtektonpipelinev2.GetTektonPipelinePropertyOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "sep-id-parts").GetDiag() } getTektonPipelinePropertyOptions.SetPipelineID(parts[0]) @@ -196,42 +205,47 @@ func resourceIBMCdTektonPipelinePropertyRead(context context.Context, d *schema. d.SetId("") return nil } - log.Printf("[DEBUG] GetTektonPipelinePropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelinePropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelinePropertyWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_property", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } - if err = d.Set("pipeline_id", getTektonPipelinePropertyOptions.PipelineID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting pipeline_id: %s", err)) - } if err = d.Set("name", property.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) - } - if err = d.Set("type", property.Type); err != nil { - return diag.FromErr(fmt.Errorf("Error setting type: %s", err)) + err = fmt.Errorf("Error setting name: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-name").GetDiag() } if !core.IsNil(property.Value) { if err = d.Set("value", property.Value); err != nil { - return diag.FromErr(fmt.Errorf("Error setting value: %s", err)) + err = fmt.Errorf("Error setting value: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-value").GetDiag() } } if !core.IsNil(property.Enum) { if err = d.Set("enum", property.Enum); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enum: %s", err)) + err = fmt.Errorf("Error setting enum: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-enum").GetDiag() } } + if err = d.Set("type", property.Type); err != nil { + err = fmt.Errorf("Error setting type: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-type").GetDiag() + } if !core.IsNil(property.Locked) { if err = d.Set("locked", property.Locked); err != nil { - return diag.FromErr(fmt.Errorf("Error setting locked: %s", err)) + err = fmt.Errorf("Error setting locked: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-locked").GetDiag() } } if !core.IsNil(property.Path) { if err = d.Set("path", property.Path); err != nil { - return diag.FromErr(fmt.Errorf("Error setting path: %s", err)) + err = fmt.Errorf("Error setting path: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-path").GetDiag() } } if !core.IsNil(property.Href) { if err = d.Set("href", property.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + err = fmt.Errorf("Error setting href: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "read", "set-href").GetDiag() } } @@ -241,14 +255,16 @@ func resourceIBMCdTektonPipelinePropertyRead(context context.Context, d *schema. func resourceIBMCdTektonPipelinePropertyUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "update", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } replaceTektonPipelinePropertyOptions := &cdtektonpipelinev2.ReplaceTektonPipelinePropertyOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "update", "sep-id-parts").GetDiag() } replaceTektonPipelinePropertyOptions.SetPipelineID(parts[0]) @@ -259,16 +275,19 @@ func resourceIBMCdTektonPipelinePropertyUpdate(context context.Context, d *schem hasChange := false if d.HasChange("pipeline_id") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "pipeline_id")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "pipeline_id") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_property", "update", "pipeline_id-forces-new").GetDiag() } if d.HasChange("name") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "name")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "name") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_property", "update", "name-forces-new").GetDiag() } if d.HasChange("type") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "type")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "type") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_property", "update", "type-forces-new").GetDiag() } if d.HasChange("locked") { replaceTektonPipelinePropertyOptions.SetLocked(d.Get("locked").(bool)) @@ -299,10 +318,11 @@ func resourceIBMCdTektonPipelinePropertyUpdate(context context.Context, d *schem } if hasChange { - _, response, err := cdTektonPipelineClient.ReplaceTektonPipelinePropertyWithContext(context, replaceTektonPipelinePropertyOptions) + _, _, err = cdTektonPipelineClient.ReplaceTektonPipelinePropertyWithContext(context, replaceTektonPipelinePropertyOptions) if err != nil { - log.Printf("[DEBUG] ReplaceTektonPipelinePropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("ReplaceTektonPipelinePropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("ReplaceTektonPipelinePropertyWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_property", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } } @@ -312,23 +332,26 @@ func resourceIBMCdTektonPipelinePropertyUpdate(context context.Context, d *schem func resourceIBMCdTektonPipelinePropertyDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "delete", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } deleteTektonPipelinePropertyOptions := &cdtektonpipelinev2.DeleteTektonPipelinePropertyOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_property", "delete", "sep-id-parts").GetDiag() } deleteTektonPipelinePropertyOptions.SetPipelineID(parts[0]) deleteTektonPipelinePropertyOptions.SetPropertyName(parts[1]) - response, err := cdTektonPipelineClient.DeleteTektonPipelinePropertyWithContext(context, deleteTektonPipelinePropertyOptions) + _, err = cdTektonPipelineClient.DeleteTektonPipelinePropertyWithContext(context, deleteTektonPipelinePropertyOptions) if err != nil { - log.Printf("[DEBUG] DeleteTektonPipelinePropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("DeleteTektonPipelinePropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteTektonPipelinePropertyWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_property", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId("") diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property_test.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property_test.go index 74171f47ec..b4a23f38c3 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property_test.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_property_test.go @@ -29,9 +29,9 @@ func TestAccIBMCdTektonPipelinePropertyBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelinePropertyConfigBasic("", name, typeVar), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelinePropertyExists("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", conf), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "type", typeVar), + testAccCheckIBMCdTektonPipelinePropertyExists("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", conf), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "type", typeVar), ), }, }, @@ -41,8 +41,8 @@ func TestAccIBMCdTektonPipelinePropertyBasic(t *testing.T) { func TestAccIBMCdTektonPipelinePropertyAllArgs(t *testing.T) { var conf cdtektonpipelinev2.Property name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - typeVar := "text" value := fmt.Sprintf("tf_value_%d", acctest.RandIntRange(10, 100)) + typeVar := "text" locked := "true" path := fmt.Sprintf("tf_path_%d", acctest.RandIntRange(10, 100)) valueUpdate := fmt.Sprintf("tf_value_%d", acctest.RandIntRange(10, 100)) @@ -55,28 +55,29 @@ func TestAccIBMCdTektonPipelinePropertyAllArgs(t *testing.T) { CheckDestroy: testAccCheckIBMCdTektonPipelinePropertyDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelinePropertyConfig("", name, typeVar, value, locked, path), + Config: testAccCheckIBMCdTektonPipelinePropertyConfig("", name, value, typeVar, locked, path), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelinePropertyExists("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", conf), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "type", typeVar), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "value", value), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "locked", locked), + testAccCheckIBMCdTektonPipelinePropertyExists("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", conf), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "value", value), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "type", typeVar), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "locked", locked), ), }, resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelinePropertyConfig("", name, typeVar, valueUpdate, lockedUpdate, pathUpdate), + Config: testAccCheckIBMCdTektonPipelinePropertyConfig("", name, valueUpdate, typeVar, lockedUpdate, pathUpdate), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "type", typeVar), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "value", valueUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", "locked", lockedUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "value", valueUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "type", typeVar), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", "locked", lockedUpdate), ), }, resource.TestStep{ - ResourceName: "ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"pipeline_id"}, }, }, }) @@ -99,7 +100,7 @@ func testAccCheckIBMCdTektonPipelinePropertyConfigBasic(pipelineID string, name name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -109,19 +110,19 @@ func testAccCheckIBMCdTektonPipelinePropertyConfigBasic(pipelineID string, name ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline ] } - resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id name = "property1" type = "text" value = "prop1" depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } `, rgName, tcName) } -func testAccCheckIBMCdTektonPipelinePropertyConfig(pipelineID string, name string, typeVar string, value string, locked string, path string) string { +func testAccCheckIBMCdTektonPipelinePropertyConfig(pipelineID string, name string, value string, typeVar string, locked string, path string) string { rgName := acc.CdResourceGroupName tcName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) return fmt.Sprintf(` @@ -138,7 +139,7 @@ func testAccCheckIBMCdTektonPipelinePropertyConfig(pipelineID string, name strin name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -148,14 +149,14 @@ func testAccCheckIBMCdTektonPipelinePropertyConfig(pipelineID string, name strin ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline ] } - resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id name = "%s" type = "text" value = "%s" locked = "%s" depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } `, rgName, tcName, name, value, locked) diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_test.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_test.go index 3469a2cc5a..028a4b6bd3 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_test.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_test.go @@ -13,7 +13,10 @@ import ( acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cdtektonpipeline" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" ) func TestAccIBMCdTektonPipelineBasic(t *testing.T) { @@ -27,7 +30,7 @@ func TestAccIBMCdTektonPipelineBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineConfigBasic(), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineExists("ibm_cd_tekton_pipeline.cd_tekton_pipeline", conf), + testAccCheckIBMCdTektonPipelineExists("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", conf), ), }, }, @@ -51,22 +54,22 @@ func TestAccIBMCdTektonPipelineAllArgs(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineConfig(nextBuildNumber, enableNotifications, enablePartialCloning), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineExists("ibm_cd_tekton_pipeline.cd_tekton_pipeline", conf), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline", "next_build_number", nextBuildNumber), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_notifications", enableNotifications), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_partial_cloning", enablePartialCloning), + testAccCheckIBMCdTektonPipelineExists("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", conf), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "next_build_number", nextBuildNumber), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_notifications", enableNotifications), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_partial_cloning", enablePartialCloning), ), }, resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineConfig(nextBuildNumberUpdate, enableNotificationsUpdate, enablePartialCloningUpdate), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline", "next_build_number", nextBuildNumberUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_notifications", enableNotificationsUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline", "enable_partial_cloning", enablePartialCloningUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "next_build_number", nextBuildNumberUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_notifications", enableNotificationsUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", "enable_partial_cloning", enablePartialCloningUpdate), ), }, resource.TestStep{ - ResourceName: "ibm_cd_tekton_pipeline.cd_tekton_pipeline", + ResourceName: "ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance", ImportState: true, ImportStateVerify: true, }, @@ -91,7 +94,7 @@ func testAccCheckIBMCdTektonPipelineConfigBasic() string { name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -121,7 +124,7 @@ func testAccCheckIBMCdTektonPipelineConfig(nextBuildNumber string, enableNotific name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = %s enable_notifications = %s @@ -189,3 +192,799 @@ func testAccCheckIBMCdTektonPipelineDestroy(s *terraform.State) error { return nil } + +func TestResourceIBMCdTektonPipelineWorkerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["type"] = "testString" + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Worker) + model.Name = core.StringPtr("testString") + model.Type = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineWorkerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineResourceGroupReferenceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.ResourceGroupReference) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineResourceGroupReferenceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineToolchainReferenceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + model["crn"] = "crn:v1:staging:public:toolchain:us-south:a/0ba224679d6c697f9baee5e14ade83ac:bf5fa00f-ddef-4298-b87b-aa8b6da0e1a6::" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.ToolchainReference) + model.ID = core.StringPtr("testString") + model.CRN = core.StringPtr("crn:v1:staging:public:toolchain:us-south:a/0ba224679d6c697f9baee5e14ade83ac:bf5fa00f-ddef-4298-b87b-aa8b6da0e1a6::") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineToolchainReferenceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + definitionSourceModel := make(map[string]interface{}) + definitionSourceModel["type"] = "testString" + definitionSourceModel["properties"] = []map[string]interface{}{definitionSourcePropertiesModel} + + model := make(map[string]interface{}) + model["source"] = []map[string]interface{}{definitionSourceModel} + model["href"] = "testString" + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + definitionSourceModel := new(cdtektonpipelinev2.DefinitionSource) + definitionSourceModel.Type = core.StringPtr("testString") + definitionSourceModel.Properties = definitionSourcePropertiesModel + + model := new(cdtektonpipelinev2.Definition) + model.Source = definitionSourceModel + model.Href = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + definitionSourcePropertiesModel := make(map[string]interface{}) + definitionSourcePropertiesModel["url"] = "testString" + definitionSourcePropertiesModel["branch"] = "testString" + definitionSourcePropertiesModel["tag"] = "testString" + definitionSourcePropertiesModel["path"] = "testString" + definitionSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{definitionSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + definitionSourcePropertiesModel := new(cdtektonpipelinev2.DefinitionSourceProperties) + definitionSourcePropertiesModel.URL = core.StringPtr("testString") + definitionSourcePropertiesModel.Branch = core.StringPtr("testString") + definitionSourcePropertiesModel.Tag = core.StringPtr("testString") + definitionSourcePropertiesModel.Path = core.StringPtr("testString") + definitionSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.DefinitionSource) + model.Type = core.StringPtr("testString") + model.Properties = definitionSourcePropertiesModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["tag"] = "testString" + model["path"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.DefinitionSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Tag = core.StringPtr("testString") + model.Path = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineDefinitionSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineToolToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineToolToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelinePropertyToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["value"] = "testString" + model["href"] = "testString" + model["enum"] = []string{"testString"} + model["type"] = "secure" + model["locked"] = true + model["path"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Property) + model.Name = core.StringPtr("testString") + model.Value = core.StringPtr("testString") + model.Href = core.StringPtr("testString") + model.Enum = []string{"testString"} + model.Type = core.StringPtr("secure") + model.Locked = core.BoolPtr(true) + model.Path = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelinePropertyToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + triggerSourceModel := make(map[string]interface{}) + triggerSourceModel["type"] = "testString" + triggerSourceModel["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + genericSecretModel := make(map[string]interface{}) + genericSecretModel["type"] = "token_matches" + genericSecretModel["value"] = "testString" + genericSecretModel["source"] = "header" + genericSecretModel["key_name"] = "testString" + genericSecretModel["algorithm"] = "md4" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["enable_events_from_forks"] = false + model["source"] = []map[string]interface{}{triggerSourceModel} + model["events"] = []string{"push", "pull_request"} + model["filter"] = "header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'" + model["cron"] = "testString" + model["timezone"] = "America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC" + model["secret"] = []map[string]interface{}{genericSecretModel} + model["webhook_url"] = "testString" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + triggerSourceModel := new(cdtektonpipelinev2.TriggerSource) + triggerSourceModel.Type = core.StringPtr("testString") + triggerSourceModel.Properties = triggerSourcePropertiesModel + + genericSecretModel := new(cdtektonpipelinev2.GenericSecret) + genericSecretModel.Type = core.StringPtr("token_matches") + genericSecretModel.Value = core.StringPtr("testString") + genericSecretModel.Source = core.StringPtr("header") + genericSecretModel.KeyName = core.StringPtr("testString") + genericSecretModel.Algorithm = core.StringPtr("md4") + + model := new(cdtektonpipelinev2.Trigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.EnableEventsFromForks = core.BoolPtr(false) + model.Source = triggerSourceModel + model.Events = []string{"push", "pull_request"} + model.Filter = core.StringPtr("header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'") + model.Cron = core.StringPtr("testString") + model.Timezone = core.StringPtr("America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC") + model.Secret = genericSecretModel + model.WebhookURL = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerPropertyToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["value"] = "testString" + model["href"] = "testString" + model["enum"] = []string{"testString"} + model["type"] = "secure" + model["path"] = "testString" + model["locked"] = true + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.TriggerProperty) + model.Name = core.StringPtr("testString") + model.Value = core.StringPtr("testString") + model.Href = core.StringPtr("testString") + model.Enum = []string{"testString"} + model.Type = core.StringPtr("secure") + model.Path = core.StringPtr("testString") + model.Locked = core.BoolPtr(true) + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerPropertyToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.TriggerSource) + model.Type = core.StringPtr("testString") + model.Properties = triggerSourcePropertiesModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["pattern"] = "testString" + model["blind_connection"] = true + model["hook_id"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Pattern = core.StringPtr("testString") + model.BlindConnection = core.BoolPtr(true) + model.HookID = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineGenericSecretToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["type"] = "token_matches" + model["value"] = "testString" + model["source"] = "header" + model["key_name"] = "testString" + model["algorithm"] = "md4" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.GenericSecret) + model.Type = core.StringPtr("token_matches") + model.Value = core.StringPtr("testString") + model.Source = core.StringPtr("header") + model.KeyName = core.StringPtr("testString") + model.Algorithm = core.StringPtr("md4") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineGenericSecretToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerManualTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerManualTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerManualTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerScmTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + triggerSourceModel := make(map[string]interface{}) + triggerSourceModel["type"] = "testString" + triggerSourceModel["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["enable_events_from_forks"] = false + model["source"] = []map[string]interface{}{triggerSourceModel} + model["events"] = []string{"push", "pull_request"} + model["filter"] = "header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + triggerSourceModel := new(cdtektonpipelinev2.TriggerSource) + triggerSourceModel.Type = core.StringPtr("testString") + triggerSourceModel.Properties = triggerSourcePropertiesModel + + model := new(cdtektonpipelinev2.TriggerScmTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.EnableEventsFromForks = core.BoolPtr(false) + model.Source = triggerSourceModel + model.Events = []string{"push", "pull_request"} + model.Filter = core.StringPtr("header['x-github-event'] == 'push' && body.ref == 'refs/heads/main'") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerScmTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerTimerTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["cron"] = "testString" + model["timezone"] = "America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerTimerTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.Cron = core.StringPtr("testString") + model.Timezone = core.StringPtr("America/Los_Angeles, CET, Europe/London, GMT, US/Eastern, or UTC") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerTimerTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerGenericTriggerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + triggerPropertyModel := make(map[string]interface{}) + triggerPropertyModel["name"] = "testString" + triggerPropertyModel["value"] = "testString" + triggerPropertyModel["href"] = "testString" + triggerPropertyModel["enum"] = []string{"testString"} + triggerPropertyModel["type"] = "secure" + triggerPropertyModel["path"] = "testString" + triggerPropertyModel["locked"] = true + + workerModel := make(map[string]interface{}) + workerModel["name"] = "testString" + workerModel["type"] = "testString" + workerModel["id"] = "testString" + + genericSecretModel := make(map[string]interface{}) + genericSecretModel["type"] = "token_matches" + genericSecretModel["value"] = "testString" + genericSecretModel["source"] = "header" + genericSecretModel["key_name"] = "testString" + genericSecretModel["algorithm"] = "md4" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["name"] = "start-deploy" + model["href"] = "testString" + model["event_listener"] = "testString" + model["id"] = "testString" + model["properties"] = []map[string]interface{}{triggerPropertyModel} + model["tags"] = []string{"testString"} + model["worker"] = []map[string]interface{}{workerModel} + model["max_concurrent_runs"] = int(4) + model["enabled"] = true + model["favorite"] = false + model["secret"] = []map[string]interface{}{genericSecretModel} + model["webhook_url"] = "testString" + model["filter"] = "event.type == 'message' && event.text.contains('urgent')" + + assert.Equal(t, result, model) + } + + triggerPropertyModel := new(cdtektonpipelinev2.TriggerProperty) + triggerPropertyModel.Name = core.StringPtr("testString") + triggerPropertyModel.Value = core.StringPtr("testString") + triggerPropertyModel.Href = core.StringPtr("testString") + triggerPropertyModel.Enum = []string{"testString"} + triggerPropertyModel.Type = core.StringPtr("secure") + triggerPropertyModel.Path = core.StringPtr("testString") + triggerPropertyModel.Locked = core.BoolPtr(true) + + workerModel := new(cdtektonpipelinev2.Worker) + workerModel.Name = core.StringPtr("testString") + workerModel.Type = core.StringPtr("testString") + workerModel.ID = core.StringPtr("testString") + + genericSecretModel := new(cdtektonpipelinev2.GenericSecret) + genericSecretModel.Type = core.StringPtr("token_matches") + genericSecretModel.Value = core.StringPtr("testString") + genericSecretModel.Source = core.StringPtr("header") + genericSecretModel.KeyName = core.StringPtr("testString") + genericSecretModel.Algorithm = core.StringPtr("md4") + + model := new(cdtektonpipelinev2.TriggerGenericTrigger) + model.Type = core.StringPtr("testString") + model.Name = core.StringPtr("start-deploy") + model.Href = core.StringPtr("testString") + model.EventListener = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + model.Properties = []cdtektonpipelinev2.TriggerProperty{*triggerPropertyModel} + model.Tags = []string{"testString"} + model.Worker = workerModel + model.MaxConcurrentRuns = core.Int64Ptr(int64(4)) + model.Enabled = core.BoolPtr(true) + model.Favorite = core.BoolPtr(false) + model.Secret = genericSecretModel + model.WebhookURL = core.StringPtr("testString") + model.Filter = core.StringPtr("event.type == 'message' && event.text.contains('urgent')") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerGenericTriggerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineMapToWorkerIdentity(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.WorkerIdentity) { + model := new(cdtektonpipelinev2.WorkerIdentity) + model.ID = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["id"] = "testString" + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineMapToWorkerIdentity(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger.go index feb3f492c1..c3da6ccc31 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -56,16 +60,26 @@ func ResourceIBMCdTektonPipelineTrigger() *schema.Resource { "tags": &schema.Schema{ Type: schema.TypeList, Optional: true, - Description: "Trigger tags array.", + Description: "Optional trigger tags array.", Elem: &schema.Schema{Type: schema.TypeString}, }, "worker": &schema.Schema{ Type: schema.TypeList, MaxItems: 1, Optional: true, - Description: "Specify the worker used to run the trigger. Use `worker: { id: 'public' }` to use the IBM Managed workers. The default is to inherit the worker set in the pipeline settings, which can also be explicitly set using `worker: { id: 'inherit' }`.", + Description: "Details of the worker used to run the trigger.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Name of the worker. Computed based on the worker ID.", + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Type of the worker. Computed based on the worker ID.", + }, "id": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -83,55 +97,19 @@ func ResourceIBMCdTektonPipelineTrigger() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - Description: "Flag to check if the trigger is enabled. If omitted the trigger is enabled by default.", + Description: "Flag to check if the trigger is enabled.", }, - "secret": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, + "favorite": &schema.Schema{ + Type: schema.TypeBool, Optional: true, - Description: "Only needed for Generic Webhook trigger type. The secret is used to start the Generic Webhook trigger.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "Secret type.", - }, - "value": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: flex.SuppressGenericWebhookRawSecret, - Description: "Secret value, not needed if secret type is `internal_validation`.", - }, - "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Secret location, not needed if secret type is `internal_validation`.", - }, - "key_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Secret name, not needed if type is `internal_validation`.", - }, - "algorithm": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "Algorithm used for `digest_matches` secret type. Only needed for `digest_matches` secret type.", - }, - }, - }, - }, - "cron": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger", "cron"), - Description: "Only needed for timer triggers. CRON expression that indicates when this trigger will activate. Maximum frequency is every 5 minutes. The string is based on UNIX crontab syntax: minute, hour, day of month, month, day of week. Example: The CRON expression 0 *_/2 * * * - translates to - every 2 hours.", + Default: false, + Description: "Mark the trigger as a favorite.", }, - "timezone": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger", "timezone"), - Description: "Only used for timer triggers. Specify the timezone used for this timer trigger, which will ensure the CRON activates this trigger relative to the specified timezone. If no timezone is specified, the default timezone used is UTC. Valid timezones are those listed in the IANA timezone database, https://www.iana.org/time-zones.", + "enable_events_from_forks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "When enabled, pull request events from forks of the selected repository will trigger a pipeline run.", }, "source": &schema.Schema{ Type: schema.TypeList, @@ -169,6 +147,30 @@ func ResourceIBMCdTektonPipelineTrigger() *schema.Resource { Optional: true, Description: "The pattern of Git branch or tag. You can specify a glob pattern such as '!test' or '*master' to match against multiple tags or branches in the repository.The glob pattern used must conform to Bash 4.3 specifications, see bash documentation for more info: https://www.gnu.org/software/bash/manual/bash.html#Pattern-Matching. Only one of branch, pattern, or filter should be specified.", }, + "blind_connection": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "True if the repository server is not addressable on the public internet. IBM Cloud will not be able to validate the connection details you provide.", + }, + "hook_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Repository webhook ID. It is generated upon trigger creation.", + }, + "tool": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Reference to the repository tool in the parent toolchain.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "ID of the repository tool instance in the parent toolchain.", + }, + }, + }, + }, }, }, }, @@ -188,17 +190,53 @@ func ResourceIBMCdTektonPipelineTrigger() *schema.Resource { ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger", "filter"), Description: "Either 'events' or 'filter' can be used. Stores the CEL (Common Expression Language) expression value which is used for event filtering against the Git webhook payloads.", }, - "favorite": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Mark the trigger as a favorite.", + "cron": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger", "cron"), + Description: "Only needed for timer triggers. CRON expression that indicates when this trigger will activate. Maximum frequency is every 5 minutes. The string is based on UNIX crontab syntax: minute, hour, day of month, month, day of week. Example: The CRON expression 0 *_/2 * * * - translates to - every 2 hours.", }, - "enable_events_from_forks": &schema.Schema{ - Type: schema.TypeBool, + "timezone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger", "timezone"), + Description: "Only used for timer triggers. Specify the timezone used for this timer trigger, which will ensure the CRON activates this trigger relative to the specified timezone. If no timezone is specified, the default timezone used is UTC. Valid timezones are those listed in the IANA timezone database, https://www.iana.org/time-zones.", + }, + "secret": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, Optional: true, - Default: false, - Description: "Only used for SCM triggers. When enabled, pull request events from forks of the selected repository will trigger a pipeline run.", + Description: "Only needed for Generic Webhook trigger type. The secret is used to start the Generic Webhook trigger.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Secret type.", + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.SuppressGenericWebhookRawSecret, + Description: "Secret value, not needed if secret type is `internal_validation`.", + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Secret location, not needed if secret type is `internal_validation`.", + }, + "key_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Secret name, not needed if type is `internal_validation`.", + }, + "algorithm": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Algorithm used for `digest_matches` secret type. Only needed for `digest_matches` secret type.", + }, + }, + }, }, "href": &schema.Schema{ Type: schema.TypeString, @@ -213,41 +251,46 @@ func ResourceIBMCdTektonPipelineTrigger() *schema.Resource { Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "Property name.", }, "value": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, DiffSuppressFunc: flex.SuppressTriggerPropertyRawSecret, Description: "Property value. Any string value is valid.", }, "href": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, Description: "API URL for interacting with the trigger property.", }, "enum": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, Description: "Options for `single_select` property type. Only needed for `single_select` property type.", Elem: &schema.Schema{Type: schema.TypeString}, }, "type": &schema.Schema{ Type: schema.TypeString, - Required: true, + Computed: true, ForceNew: true, Description: "Property type.", }, "path": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, Description: "A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used.", }, "locked": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Computed: true, Description: "When true, this property cannot be overridden at runtime. Attempting to override it will result in run requests being rejected. The default is false.", }, }, @@ -304,6 +347,15 @@ func ResourceIBMCdTektonPipelineTriggerValidator() *validate.ResourceValidator { MinValueLength: 1, MaxValueLength: 253, }, + validate.ValidateSchema{ + Identifier: "filter", + ValidateFunctionIdentifier: validate.ValidateRegexpLen, + Type: validate.TypeString, + Optional: true, + Regexp: `^.*$`, + MinValueLength: 1, + MaxValueLength: 4096, + }, validate.ValidateSchema{ Identifier: "cron", ValidateFunctionIdentifier: validate.ValidateRegexpLen, @@ -322,15 +374,6 @@ func ResourceIBMCdTektonPipelineTriggerValidator() *validate.ResourceValidator { MinValueLength: 1, MaxValueLength: 253, }, - validate.ValidateSchema{ - Identifier: "filter", - ValidateFunctionIdentifier: validate.ValidateRegexpLen, - Type: validate.TypeString, - Optional: true, - Regexp: `^.*$`, - MinValueLength: 1, - MaxValueLength: 4096, - }, ) resourceValidator := validate.ResourceValidator{ResourceName: "ibm_cd_tekton_pipeline_trigger", Schema: validateSchema} @@ -340,7 +383,9 @@ func ResourceIBMCdTektonPipelineTriggerValidator() *validate.ResourceValidator { func resourceIBMCdTektonPipelineTriggerCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "create", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } createTektonPipelineTriggerOptions := &cdtektonpipelinev2.CreateTektonPipelineTriggerOptions{} @@ -350,16 +395,17 @@ func resourceIBMCdTektonPipelineTriggerCreate(context context.Context, d *schema createTektonPipelineTriggerOptions.SetName(d.Get("name").(string)) createTektonPipelineTriggerOptions.SetEventListener(d.Get("event_listener").(string)) if _, ok := d.GetOk("tags"); ok { - tags := []string{} - for _, tagsItem := range d.Get("tags").([]interface{}) { - tags = append(tags, tagsItem.(string)) + var tags []string + for _, v := range d.Get("tags").([]interface{}) { + tagsItem := v.(string) + tags = append(tags, tagsItem) } createTektonPipelineTriggerOptions.SetTags(tags) } if _, ok := d.GetOk("worker"); ok { - workerModel, err := resourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) + workerModel, err := ResourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "create", "parse-worker").GetDiag() } createTektonPipelineTriggerOptions.SetWorker(workerModel) } @@ -370,9 +416,9 @@ func resourceIBMCdTektonPipelineTriggerCreate(context context.Context, d *schema createTektonPipelineTriggerOptions.SetEnabled(d.Get("enabled").(bool)) } if _, ok := d.GetOk("secret"); ok { - secretModel, err := resourceIBMCdTektonPipelineTriggerMapToGenericSecret(d.Get("secret.0").(map[string]interface{})) + secretModel, err := ResourceIBMCdTektonPipelineTriggerMapToGenericSecret(d.Get("secret.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "create", "parse-secret").GetDiag() } createTektonPipelineTriggerOptions.SetSecret(secretModel) } @@ -383,9 +429,9 @@ func resourceIBMCdTektonPipelineTriggerCreate(context context.Context, d *schema createTektonPipelineTriggerOptions.SetTimezone(d.Get("timezone").(string)) } if _, ok := d.GetOk("source"); ok { - sourceModel, err := resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(d.Get("source.0").(map[string]interface{})) + sourceModel, err := ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(d.Get("source.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "create", "parse-source").GetDiag() } createTektonPipelineTriggerOptions.SetSource(sourceModel) } @@ -408,10 +454,11 @@ func resourceIBMCdTektonPipelineTriggerCreate(context context.Context, d *schema createTektonPipelineTriggerOptions.SetEnableEventsFromForks(d.Get("enable_events_from_forks").(bool)) } - triggerIntf, response, err := cdTektonPipelineClient.CreateTektonPipelineTriggerWithContext(context, createTektonPipelineTriggerOptions) + triggerIntf, _, err := cdTektonPipelineClient.CreateTektonPipelineTriggerWithContext(context, createTektonPipelineTriggerOptions) if err != nil { - log.Printf("[DEBUG] CreateTektonPipelineTriggerWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("CreateTektonPipelineTriggerWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateTektonPipelineTriggerWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } trigger := triggerIntf.(*cdtektonpipelinev2.Trigger) @@ -423,14 +470,16 @@ func resourceIBMCdTektonPipelineTriggerCreate(context context.Context, d *schema func resourceIBMCdTektonPipelineTriggerRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineTriggerOptions := &cdtektonpipelinev2.GetTektonPipelineTriggerOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "sep-id-parts").GetDiag() } getTektonPipelineTriggerOptions.SetPipelineID(parts[0]) @@ -442,121 +491,138 @@ func resourceIBMCdTektonPipelineTriggerRead(context context.Context, d *schema.R d.SetId("") return nil } - log.Printf("[DEBUG] GetTektonPipelineTriggerWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineTriggerWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineTriggerWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } trigger := triggerIntf.(*cdtektonpipelinev2.Trigger) - if err = d.Set("pipeline_id", getTektonPipelineTriggerOptions.PipelineID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting pipeline_id: %s", err)) - } if err = d.Set("type", trigger.Type); err != nil { - return diag.FromErr(fmt.Errorf("Error setting type: %s", err)) + err = fmt.Errorf("Error setting type: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-type").GetDiag() } if err = d.Set("name", trigger.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + err = fmt.Errorf("Error setting name: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-name").GetDiag() } if err = d.Set("event_listener", trigger.EventListener); err != nil { - return diag.FromErr(fmt.Errorf("Error setting event_listener: %s", err)) + err = fmt.Errorf("Error setting event_listener: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-event_listener").GetDiag() } if !core.IsNil(trigger.Tags) { if err = d.Set("tags", trigger.Tags); err != nil { - return diag.FromErr(fmt.Errorf("Error setting tags: %s", err)) + err = fmt.Errorf("Error setting tags: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-tags").GetDiag() } } if !core.IsNil(trigger.Worker) { - workerMap, err := resourceIBMCdTektonPipelineTriggerWorkerIdentityToMap(trigger.Worker) + workerMap, err := ResourceIBMCdTektonPipelineTriggerWorkerToMap(trigger.Worker) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "worker-to-map").GetDiag() } if err = d.Set("worker", []map[string]interface{}{workerMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting worker: %s", err)) + err = fmt.Errorf("Error setting worker: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-worker").GetDiag() } } if !core.IsNil(trigger.MaxConcurrentRuns) { if err = d.Set("max_concurrent_runs", flex.IntValue(trigger.MaxConcurrentRuns)); err != nil { - return diag.FromErr(fmt.Errorf("Error setting max_concurrent_runs: %s", err)) + err = fmt.Errorf("Error setting max_concurrent_runs: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-max_concurrent_runs").GetDiag() } } if !core.IsNil(trigger.Enabled) { if err = d.Set("enabled", trigger.Enabled); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enabled: %s", err)) - } - } - if !core.IsNil(trigger.Secret) { - secretMap, err := resourceIBMCdTektonPipelineTriggerGenericSecretToMap(trigger.Secret) - if err != nil { - return diag.FromErr(err) - } - if err = d.Set("secret", []map[string]interface{}{secretMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting secret: %s", err)) + err = fmt.Errorf("Error setting enabled: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-enabled").GetDiag() } } - if !core.IsNil(trigger.Cron) { - if err = d.Set("cron", trigger.Cron); err != nil { - return diag.FromErr(fmt.Errorf("Error setting cron: %s", err)) + if !core.IsNil(trigger.Favorite) { + if err = d.Set("favorite", trigger.Favorite); err != nil { + err = fmt.Errorf("Error setting favorite: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-favorite").GetDiag() } } - if !core.IsNil(trigger.Timezone) { - if err = d.Set("timezone", trigger.Timezone); err != nil { - return diag.FromErr(fmt.Errorf("Error setting timezone: %s", err)) + if !core.IsNil(trigger.EnableEventsFromForks) { + if err = d.Set("enable_events_from_forks", trigger.EnableEventsFromForks); err != nil { + err = fmt.Errorf("Error setting enable_events_from_forks: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-enable_events_from_forks").GetDiag() } } if !core.IsNil(trigger.Source) { - sourceMap, err := resourceIBMCdTektonPipelineTriggerTriggerSourcePrototypeToMap(trigger.Source) + sourceMap, err := ResourceIBMCdTektonPipelineTriggerTriggerSourceToMap(trigger.Source) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "source-to-map").GetDiag() } if err = d.Set("source", []map[string]interface{}{sourceMap}); err != nil { - return diag.FromErr(fmt.Errorf("Error setting source: %s", err)) + err = fmt.Errorf("Error setting source: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-source").GetDiag() } } if !core.IsNil(trigger.Events) { if err = d.Set("events", trigger.Events); err != nil { - return diag.FromErr(fmt.Errorf("Error setting events: %s", err)) + err = fmt.Errorf("Error setting events: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-events").GetDiag() } } if !core.IsNil(trigger.Filter) { if err = d.Set("filter", trigger.Filter); err != nil { - return diag.FromErr(fmt.Errorf("Error setting filter: %s", err)) + err = fmt.Errorf("Error setting filter: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-filter").GetDiag() } } - if !core.IsNil(trigger.Favorite) { - if err = d.Set("favorite", trigger.Favorite); err != nil { - return diag.FromErr(fmt.Errorf("Error setting favorite: %s", err)) + if !core.IsNil(trigger.Cron) { + if err = d.Set("cron", trigger.Cron); err != nil { + err = fmt.Errorf("Error setting cron: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-cron").GetDiag() } } - if !core.IsNil(trigger.EnableEventsFromForks) { - if err = d.Set("enable_events_from_forks", trigger.EnableEventsFromForks); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enable_events_from_forks: %s", err)) + if !core.IsNil(trigger.Timezone) { + if err = d.Set("timezone", trigger.Timezone); err != nil { + err = fmt.Errorf("Error setting timezone: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-timezone").GetDiag() + } + } + if !core.IsNil(trigger.Secret) { + secretMap, err := ResourceIBMCdTektonPipelineTriggerGenericSecretToMap(trigger.Secret) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "secret-to-map").GetDiag() + } + if err = d.Set("secret", []map[string]interface{}{secretMap}); err != nil { + err = fmt.Errorf("Error setting secret: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-secret").GetDiag() } } if !core.IsNil(trigger.Href) { if err = d.Set("href", trigger.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + err = fmt.Errorf("Error setting href: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-href").GetDiag() } } if !core.IsNil(trigger.Properties) { properties := []map[string]interface{}{} for _, propertiesItem := range trigger.Properties { - propertiesItemMap, err := resourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(&propertiesItem) + propertiesItemMap, err := ResourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(&propertiesItem) // #nosec G601 if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "properties-to-map").GetDiag() } properties = append(properties, propertiesItemMap) } if err = d.Set("properties", properties); err != nil { - return diag.FromErr(fmt.Errorf("Error setting properties: %s", err)) + err = fmt.Errorf("Error setting properties: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-properties").GetDiag() } } if !core.IsNil(trigger.WebhookURL) { if err = d.Set("webhook_url", trigger.WebhookURL); err != nil { - return diag.FromErr(fmt.Errorf("Error setting webhook_url: %s", err)) + err = fmt.Errorf("Error setting webhook_url: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-webhook_url").GetDiag() } } if !core.IsNil(trigger.ID) { if err = d.Set("trigger_id", trigger.ID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting trigger_id: %s", err)) + err = fmt.Errorf("Error setting trigger_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "read", "set-trigger_id").GetDiag() } } @@ -566,14 +632,16 @@ func resourceIBMCdTektonPipelineTriggerRead(context context.Context, d *schema.R func resourceIBMCdTektonPipelineTriggerUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "update", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } updateTektonPipelineTriggerOptions := &cdtektonpipelinev2.UpdateTektonPipelineTriggerOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "update", "sep-id-parts").GetDiag() } updateTektonPipelineTriggerOptions.SetPipelineID(parts[0]) @@ -583,30 +651,38 @@ func resourceIBMCdTektonPipelineTriggerUpdate(context context.Context, d *schema patchVals := &cdtektonpipelinev2.TriggerPatch{} if d.HasChange("pipeline_id") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "pipeline_id")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "pipeline_id") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_trigger", "update", "pipeline_id-forces-new").GetDiag() } - if d.HasChange("type") || d.HasChange("name") || d.HasChange("event_listener") { + if d.HasChange("type") { newType := d.Get("type").(string) patchVals.Type = &newType + hasChange = true + } + if d.HasChange("name") { newName := d.Get("name").(string) patchVals.Name = &newName + hasChange = true + } + if d.HasChange("event_listener") { newEventListener := d.Get("event_listener").(string) patchVals.EventListener = &newEventListener hasChange = true } if d.HasChange("tags") { - tags := []string{} - for _, tagsItem := range d.Get("tags").([]interface{}) { - tags = append(tags, tagsItem.(string)) + var tags []string + for _, v := range d.Get("tags").([]interface{}) { + tagsItem := v.(string) + tags = append(tags, tagsItem) } patchVals.Tags = tags hasChange = true } if d.HasChange("worker") { - worker, err := resourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) + worker, err := ResourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(d.Get("worker.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "update", "parse-worker").GetDiag() } patchVals.Worker = worker hasChange = true @@ -622,9 +698,9 @@ func resourceIBMCdTektonPipelineTriggerUpdate(context context.Context, d *schema hasChange = true } if d.HasChange("secret") { - secret, err := resourceIBMCdTektonPipelineTriggerMapToGenericSecret(d.Get("secret.0").(map[string]interface{})) + secret, err := ResourceIBMCdTektonPipelineTriggerMapToGenericSecret(d.Get("secret.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "update", "parse-secret").GetDiag() } patchVals.Secret = secret hasChange = true @@ -640,9 +716,9 @@ func resourceIBMCdTektonPipelineTriggerUpdate(context context.Context, d *schema hasChange = true } if d.HasChange("source") { - source, err := resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(d.Get("source.0").(map[string]interface{})) + source, err := ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(d.Get("source.0").(map[string]interface{})) if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "update", "parse-source").GetDiag() } patchVals.Source = source hasChange = true @@ -675,11 +751,16 @@ func resourceIBMCdTektonPipelineTriggerUpdate(context context.Context, d *schema } if hasChange { - updateTektonPipelineTriggerOptions.TriggerPatch, _ = patchVals.AsPatch() - _, response, err := cdTektonPipelineClient.UpdateTektonPipelineTriggerWithContext(context, updateTektonPipelineTriggerOptions) + // Fields with `nil` values are omitted from the generic map, + // so we need to re-add them to support removing arguments + // in merge-patch operations sent to the service. + updateTektonPipelineTriggerOptions.TriggerPatch = ResourceIBMCdTektonPipelineTriggerTriggerPatchAsPatch(patchVals, d) + + _, _, err = cdTektonPipelineClient.UpdateTektonPipelineTriggerWithContext(context, updateTektonPipelineTriggerOptions) if err != nil { - log.Printf("[DEBUG] UpdateTektonPipelineTriggerWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("UpdateTektonPipelineTriggerWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("UpdateTektonPipelineTriggerWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } } @@ -689,23 +770,26 @@ func resourceIBMCdTektonPipelineTriggerUpdate(context context.Context, d *schema func resourceIBMCdTektonPipelineTriggerDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "delete", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } deleteTektonPipelineTriggerOptions := &cdtektonpipelinev2.DeleteTektonPipelineTriggerOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger", "delete", "sep-id-parts").GetDiag() } deleteTektonPipelineTriggerOptions.SetPipelineID(parts[0]) deleteTektonPipelineTriggerOptions.SetTriggerID(parts[1]) - response, err := cdTektonPipelineClient.DeleteTektonPipelineTriggerWithContext(context, deleteTektonPipelineTriggerOptions) + _, err = cdTektonPipelineClient.DeleteTektonPipelineTriggerWithContext(context, deleteTektonPipelineTriggerOptions) if err != nil { - log.Printf("[DEBUG] DeleteTektonPipelineTriggerWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("DeleteTektonPipelineTriggerWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteTektonPipelineTriggerWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId("") @@ -713,13 +797,13 @@ func resourceIBMCdTektonPipelineTriggerDelete(context context.Context, d *schema return nil } -func resourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(modelMap map[string]interface{}) (*cdtektonpipelinev2.WorkerIdentity, error) { +func ResourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(modelMap map[string]interface{}) (*cdtektonpipelinev2.WorkerIdentity, error) { model := &cdtektonpipelinev2.WorkerIdentity{} model.ID = core.StringPtr(modelMap["id"].(string)) return model, nil } -func resourceIBMCdTektonPipelineTriggerMapToGenericSecret(modelMap map[string]interface{}) (*cdtektonpipelinev2.GenericSecret, error) { +func ResourceIBMCdTektonPipelineTriggerMapToGenericSecret(modelMap map[string]interface{}) (*cdtektonpipelinev2.GenericSecret, error) { model := &cdtektonpipelinev2.GenericSecret{} if modelMap["type"] != nil && modelMap["type"].(string) != "" { model.Type = core.StringPtr(modelMap["type"].(string)) @@ -739,10 +823,10 @@ func resourceIBMCdTektonPipelineTriggerMapToGenericSecret(modelMap map[string]in return model, nil } -func resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(modelMap map[string]interface{}) (*cdtektonpipelinev2.TriggerSourcePrototype, error) { +func ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(modelMap map[string]interface{}) (*cdtektonpipelinev2.TriggerSourcePrototype, error) { model := &cdtektonpipelinev2.TriggerSourcePrototype{} model.Type = core.StringPtr(modelMap["type"].(string)) - PropertiesModel, err := resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(modelMap["properties"].([]interface{})[0].(map[string]interface{})) + PropertiesModel, err := ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(modelMap["properties"].([]interface{})[0].(map[string]interface{})) if err != nil { return model, err } @@ -750,7 +834,7 @@ func resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(modelMap map[ return model, nil } -func resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(modelMap map[string]interface{}) (*cdtektonpipelinev2.TriggerSourcePropertiesPrototype, error) { +func ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(modelMap map[string]interface{}) (*cdtektonpipelinev2.TriggerSourcePropertiesPrototype, error) { model := &cdtektonpipelinev2.TriggerSourcePropertiesPrototype{} model.URL = core.StringPtr(modelMap["url"].(string)) if modelMap["branch"] != nil && modelMap["branch"].(string) != "" { @@ -762,73 +846,189 @@ func resourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(mod return model, nil } -func resourceIBMCdTektonPipelineTriggerWorkerIdentityToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerWorkerToMap(model *cdtektonpipelinev2.Worker) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["id"] = model.ID + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.Type != nil { + modelMap["type"] = *model.Type + } + modelMap["id"] = *model.ID return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerTriggerSourceToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.Type != nil { - modelMap["type"] = model.Type + modelMap["type"] = *model.Type + propertiesMap, err := ResourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model.Properties) + if err != nil { + return modelMap, err } - if model.Value != nil { - modelMap["value"] = model.Value + modelMap["properties"] = []map[string]interface{}{propertiesMap} + return modelMap, nil +} + +func ResourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["url"] = *model.URL + if model.Branch != nil { + modelMap["branch"] = *model.Branch } - if model.Source != nil { - modelMap["source"] = model.Source + if model.Pattern != nil { + modelMap["pattern"] = *model.Pattern } - if model.KeyName != nil { - modelMap["key_name"] = model.KeyName + modelMap["blind_connection"] = *model.BlindConnection + if model.HookID != nil { + modelMap["hook_id"] = *model.HookID } - if model.Algorithm != nil { - modelMap["algorithm"] = model.Algorithm + toolMap, err := ResourceIBMCdTektonPipelineTriggerToolToMap(model.Tool) + if err != nil { + return modelMap, err } + modelMap["tool"] = []map[string]interface{}{toolMap} return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerTriggerSourcePrototypeToMap(model *cdtektonpipelinev2.TriggerSource) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerToolToMap(model *cdtektonpipelinev2.Tool) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["type"] = model.Type - propertiesMap, err := resourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesPrototypeToMap(model.Properties) - if err != nil { - return modelMap, err - } - modelMap["properties"] = []map[string]interface{}{propertiesMap} + modelMap["id"] = *model.ID return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesPrototypeToMap(model *cdtektonpipelinev2.TriggerSourceProperties) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerGenericSecretToMap(model *cdtektonpipelinev2.GenericSecret) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["url"] = model.URL - if model.Branch != nil { - modelMap["branch"] = model.Branch + if model.Type != nil { + modelMap["type"] = *model.Type } - if model.Pattern != nil { - modelMap["pattern"] = model.Pattern + if model.Value != nil { + modelMap["value"] = *model.Value + } + if model.Source != nil { + modelMap["source"] = *model.Source + } + if model.KeyName != nil { + modelMap["key_name"] = *model.KeyName + } + if model.Algorithm != nil { + modelMap["algorithm"] = *model.Algorithm } return modelMap, nil } -func resourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { +func ResourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(model *cdtektonpipelinev2.TriggerProperty) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + modelMap["name"] = *model.Name if model.Value != nil { - modelMap["value"] = model.Value + modelMap["value"] = *model.Value } if model.Href != nil { - modelMap["href"] = model.Href + modelMap["href"] = *model.Href } if model.Enum != nil { modelMap["enum"] = model.Enum } - modelMap["type"] = model.Type + modelMap["type"] = *model.Type if model.Path != nil { - modelMap["path"] = model.Path + modelMap["path"] = *model.Path } if model.Locked != nil { - modelMap["locked"] = model.Locked + modelMap["locked"] = *model.Locked } return modelMap, nil } + +func ResourceIBMCdTektonPipelineTriggerTriggerPatchAsPatch(patchVals *cdtektonpipelinev2.TriggerPatch, d *schema.ResourceData) map[string]interface{} { + patch, _ := patchVals.AsPatch() + var path string + + path = "type" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["type"] = nil + } + path = "name" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["name"] = nil + } + path = "event_listener" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["event_listener"] = nil + } + path = "tags" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["tags"] = nil + } + path = "worker" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["worker"] = nil + } + path = "max_concurrent_runs" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["max_concurrent_runs"] = nil + } + path = "enabled" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["enabled"] = nil + } + path = "secret" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["secret"] = nil + } else if exists && patch["secret"] != nil { + ResourceIBMCdTektonPipelineTriggerGenericSecretAsPatch(patch["secret"].(map[string]interface{}), d) + } + path = "cron" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["cron"] = nil + } + path = "timezone" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["timezone"] = nil + } + path = "source" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["source"] = nil + } + path = "events" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["events"] = nil + } + path = "filter" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["filter"] = nil + } + path = "favorite" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["favorite"] = nil + } + path = "enable_events_from_forks" + if _, exists := d.GetOkExists(path); d.HasChange(path) && !exists { + patch["enable_events_from_forks"] = nil + } + + return patch +} + +func ResourceIBMCdTektonPipelineTriggerGenericSecretAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "secret.0.type" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["type"] = nil + } + path = "secret.0.value" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["value"] = nil + } + path = "secret.0.source" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["source"] = nil + } + path = "secret.0.key_name" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["key_name"] = nil + } + path = "secret.0.algorithm" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["algorithm"] = nil + } +} diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property.go index 871b7e30a1..46e8a6641e 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property.go @@ -1,6 +1,10 @@ // Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 +/* + * IBM OpenAPI Terraform Generator Version: 3.95.2-120e65bc-20240924-152329 + */ + package cdtektonpipeline import ( @@ -48,13 +52,6 @@ func ResourceIBMCdTektonPipelineTriggerProperty() *schema.Resource { ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger_property", "name"), Description: "Property name.", }, - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger_property", "type"), - Description: "Property type.", - }, "value": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -68,11 +65,18 @@ func ResourceIBMCdTektonPipelineTriggerProperty() *schema.Resource { Description: "Options for `single_select` property type. Only needed for `single_select` property type.", Elem: &schema.Schema{Type: schema.TypeString}, }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger_property", "type"), + Description: "Property type.", + }, "path": &schema.Schema{ Type: schema.TypeString, Optional: true, ValidateFunc: validate.InvokeValidator("ibm_cd_tekton_pipeline_trigger_property", "path"), - Description: "A dot notation path for `integration` type properties only, to select a value from the tool integration. If left blank the full tool integration data will be used.", + Description: "A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used.", }, "locked": &schema.Schema{ Type: schema.TypeBool, @@ -119,13 +123,6 @@ func ResourceIBMCdTektonPipelineTriggerPropertyValidator() *validate.ResourceVal MinValueLength: 1, MaxValueLength: 253, }, - validate.ValidateSchema{ - Identifier: "type", - ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, - Type: validate.TypeString, - Required: true, - AllowedValues: "appconfig, integration, secure, single_select, text", - }, validate.ValidateSchema{ Identifier: "value", ValidateFunctionIdentifier: validate.ValidateRegexpLen, @@ -135,6 +132,13 @@ func ResourceIBMCdTektonPipelineTriggerPropertyValidator() *validate.ResourceVal MinValueLength: 0, MaxValueLength: 4096, }, + validate.ValidateSchema{ + Identifier: "type", + ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, + Type: validate.TypeString, + Required: true, + AllowedValues: "appconfig, integration, secure, single_select, text", + }, validate.ValidateSchema{ Identifier: "path", ValidateFunctionIdentifier: validate.ValidateRegexpLen, @@ -153,7 +157,9 @@ func ResourceIBMCdTektonPipelineTriggerPropertyValidator() *validate.ResourceVal func resourceIBMCdTektonPipelineTriggerPropertyCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "create", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } createTektonPipelineTriggerPropertiesOptions := &cdtektonpipelinev2.CreateTektonPipelineTriggerPropertiesOptions{} @@ -180,10 +186,11 @@ func resourceIBMCdTektonPipelineTriggerPropertyCreate(context context.Context, d createTektonPipelineTriggerPropertiesOptions.SetLocked(d.Get("locked").(bool)) } - triggerProperty, response, err := cdTektonPipelineClient.CreateTektonPipelineTriggerPropertiesWithContext(context, createTektonPipelineTriggerPropertiesOptions) + triggerProperty, _, err := cdTektonPipelineClient.CreateTektonPipelineTriggerPropertiesWithContext(context, createTektonPipelineTriggerPropertiesOptions) if err != nil { - log.Printf("[DEBUG] CreateTektonPipelineTriggerPropertiesWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("CreateTektonPipelineTriggerPropertiesWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateTektonPipelineTriggerPropertiesWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger_property", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId(fmt.Sprintf("%s/%s/%s", *createTektonPipelineTriggerPropertiesOptions.PipelineID, *createTektonPipelineTriggerPropertiesOptions.TriggerID, *triggerProperty.Name)) @@ -194,14 +201,16 @@ func resourceIBMCdTektonPipelineTriggerPropertyCreate(context context.Context, d func resourceIBMCdTektonPipelineTriggerPropertyRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getTektonPipelineTriggerPropertyOptions := &cdtektonpipelinev2.GetTektonPipelineTriggerPropertyOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "sep-id-parts").GetDiag() } getTektonPipelineTriggerPropertyOptions.SetPipelineID(parts[0]) @@ -214,45 +223,47 @@ func resourceIBMCdTektonPipelineTriggerPropertyRead(context context.Context, d * d.SetId("") return nil } - log.Printf("[DEBUG] GetTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("GetTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetTektonPipelineTriggerPropertyWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger_property", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } - if err = d.Set("pipeline_id", getTektonPipelineTriggerPropertyOptions.PipelineID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting pipeline_id: %s", err)) - } - if err = d.Set("trigger_id", getTektonPipelineTriggerPropertyOptions.TriggerID); err != nil { - return diag.FromErr(fmt.Errorf("Error setting trigger_id: %s", err)) - } if err = d.Set("name", triggerProperty.Name); err != nil { - return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) - } - if err = d.Set("type", triggerProperty.Type); err != nil { - return diag.FromErr(fmt.Errorf("Error setting type: %s", err)) + err = fmt.Errorf("Error setting name: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-name").GetDiag() } if !core.IsNil(triggerProperty.Value) { if err = d.Set("value", triggerProperty.Value); err != nil { - return diag.FromErr(fmt.Errorf("Error setting value: %s", err)) + err = fmt.Errorf("Error setting value: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-value").GetDiag() } } if !core.IsNil(triggerProperty.Enum) { if err = d.Set("enum", triggerProperty.Enum); err != nil { - return diag.FromErr(fmt.Errorf("Error setting enum: %s", err)) + err = fmt.Errorf("Error setting enum: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-enum").GetDiag() } } + if err = d.Set("type", triggerProperty.Type); err != nil { + err = fmt.Errorf("Error setting type: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-type").GetDiag() + } if !core.IsNil(triggerProperty.Path) { if err = d.Set("path", triggerProperty.Path); err != nil { - return diag.FromErr(fmt.Errorf("Error setting path: %s", err)) + err = fmt.Errorf("Error setting path: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-path").GetDiag() } } if !core.IsNil(triggerProperty.Locked) { if err = d.Set("locked", triggerProperty.Locked); err != nil { - return diag.FromErr(fmt.Errorf("Error setting locked: %s", err)) + err = fmt.Errorf("Error setting locked: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-locked").GetDiag() } } if !core.IsNil(triggerProperty.Href) { if err = d.Set("href", triggerProperty.Href); err != nil { - return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + err = fmt.Errorf("Error setting href: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "read", "set-href").GetDiag() } } @@ -262,14 +273,16 @@ func resourceIBMCdTektonPipelineTriggerPropertyRead(context context.Context, d * func resourceIBMCdTektonPipelineTriggerPropertyUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "update", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } replaceTektonPipelineTriggerPropertyOptions := &cdtektonpipelinev2.ReplaceTektonPipelineTriggerPropertyOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "update", "sep-id-parts").GetDiag() } replaceTektonPipelineTriggerPropertyOptions.SetPipelineID(parts[0]) @@ -281,20 +294,24 @@ func resourceIBMCdTektonPipelineTriggerPropertyUpdate(context context.Context, d hasChange := false if d.HasChange("pipeline_id") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "pipeline_id")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "pipeline_id") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_trigger_property", "update", "pipeline_id-forces-new").GetDiag() } if d.HasChange("trigger_id") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "trigger_id")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "trigger_id") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_trigger_property", "update", "trigger_id-forces-new").GetDiag() } if d.HasChange("name") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "name")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "name") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_trigger_property", "update", "name-forces-new").GetDiag() } if d.HasChange("type") { - return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ - " The resource must be re-created to update this property.", "type")) + errMsg := fmt.Sprintf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "type") + return flex.DiscriminatedTerraformErrorf(nil, errMsg, "ibm_cd_tekton_pipeline_trigger_property", "update", "type-forces-new").GetDiag() } if d.HasChange("locked") { replaceTektonPipelineTriggerPropertyOptions.SetLocked(d.Get("locked").(bool)) @@ -325,10 +342,11 @@ func resourceIBMCdTektonPipelineTriggerPropertyUpdate(context context.Context, d } if hasChange { - _, response, err := cdTektonPipelineClient.ReplaceTektonPipelineTriggerPropertyWithContext(context, replaceTektonPipelineTriggerPropertyOptions) + _, _, err = cdTektonPipelineClient.ReplaceTektonPipelineTriggerPropertyWithContext(context, replaceTektonPipelineTriggerPropertyOptions) if err != nil { - log.Printf("[DEBUG] ReplaceTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("ReplaceTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("ReplaceTektonPipelineTriggerPropertyWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger_property", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } } @@ -338,24 +356,27 @@ func resourceIBMCdTektonPipelineTriggerPropertyUpdate(context context.Context, d func resourceIBMCdTektonPipelineTriggerPropertyDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { cdTektonPipelineClient, err := meta.(conns.ClientSession).CdTektonPipelineV2() if err != nil { - return diag.FromErr(err) + tfErr := flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "delete", "initialize-client") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } deleteTektonPipelineTriggerPropertyOptions := &cdtektonpipelinev2.DeleteTektonPipelineTriggerPropertyOptions{} parts, err := flex.SepIdParts(d.Id(), "/") if err != nil { - return diag.FromErr(err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_cd_tekton_pipeline_trigger_property", "delete", "sep-id-parts").GetDiag() } deleteTektonPipelineTriggerPropertyOptions.SetPipelineID(parts[0]) deleteTektonPipelineTriggerPropertyOptions.SetTriggerID(parts[1]) deleteTektonPipelineTriggerPropertyOptions.SetPropertyName(parts[2]) - response, err := cdTektonPipelineClient.DeleteTektonPipelineTriggerPropertyWithContext(context, deleteTektonPipelineTriggerPropertyOptions) + _, err = cdTektonPipelineClient.DeleteTektonPipelineTriggerPropertyWithContext(context, deleteTektonPipelineTriggerPropertyOptions) if err != nil { - log.Printf("[DEBUG] DeleteTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("DeleteTektonPipelineTriggerPropertyWithContext failed %s\n%s", err, response)) + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteTektonPipelineTriggerPropertyWithContext failed: %s", err.Error()), "ibm_cd_tekton_pipeline_trigger_property", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } d.SetId("") diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property_test.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property_test.go index fe5bcbf629..d837c2dd5a 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property_test.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_property_test.go @@ -29,9 +29,9 @@ func TestAccIBMCdTektonPipelineTriggerPropertyBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineTriggerPropertyConfigBasic("", "", name, typeVar), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineTriggerPropertyExists("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", conf), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "type", typeVar), + testAccCheckIBMCdTektonPipelineTriggerPropertyExists("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", conf), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "type", typeVar), ), }, }, @@ -41,8 +41,8 @@ func TestAccIBMCdTektonPipelineTriggerPropertyBasic(t *testing.T) { func TestAccIBMCdTektonPipelineTriggerPropertyAllArgs(t *testing.T) { var conf cdtektonpipelinev2.TriggerProperty name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - typeVar := "text" value := fmt.Sprintf("tf_value_%d", acctest.RandIntRange(10, 100)) + typeVar := "text" path := fmt.Sprintf("tf_path_%d", acctest.RandIntRange(10, 100)) locked := "true" valueUpdate := fmt.Sprintf("tf_value_%d", acctest.RandIntRange(10, 100)) @@ -55,28 +55,29 @@ func TestAccIBMCdTektonPipelineTriggerPropertyAllArgs(t *testing.T) { CheckDestroy: testAccCheckIBMCdTektonPipelineTriggerPropertyDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelineTriggerPropertyConfig("", "", name, typeVar, value, path, locked), + Config: testAccCheckIBMCdTektonPipelineTriggerPropertyConfig("", "", name, value, typeVar, path, locked), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineTriggerPropertyExists("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", conf), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "type", typeVar), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "value", value), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "locked", locked), + testAccCheckIBMCdTektonPipelineTriggerPropertyExists("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", conf), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "value", value), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "type", typeVar), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "locked", locked), ), }, resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelineTriggerPropertyConfig("", "", name, typeVar, valueUpdate, pathUpdate, lockedUpdate), + Config: testAccCheckIBMCdTektonPipelineTriggerPropertyConfig("", "", name, valueUpdate, typeVar, pathUpdate, lockedUpdate), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "type", typeVar), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "value", valueUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", "locked", lockedUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "value", valueUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "type", typeVar), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", "locked", lockedUpdate), ), }, resource.TestStep{ - ResourceName: "ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"pipeline_id", "trigger_id"}, }, }, }) @@ -99,7 +100,7 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfigBasic(pipelineID string name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -118,8 +119,8 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfigBasic(pipelineID string } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -129,21 +130,21 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfigBasic(pipelineID string } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] name = "trigger" type = "manual" event_listener = "listener" } - resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id type = "text" name = "trig-prop-1" value = "trig-prop-value-1" @@ -151,7 +152,7 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfigBasic(pipelineID string `, rgName, tcName) } -func testAccCheckIBMCdTektonPipelineTriggerPropertyConfig(pipelineID string, triggerID string, name string, typeVar string, value string, path string, locked string) string { +func testAccCheckIBMCdTektonPipelineTriggerPropertyConfig(pipelineID string, triggerID string, name string, value string, typeVar string, path string, locked string) string { rgName := acc.CdResourceGroupName tcName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) return fmt.Sprintf(` @@ -168,7 +169,7 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfig(pipelineID string, tri name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -187,8 +188,8 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfig(pipelineID string, tri } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -198,21 +199,21 @@ func testAccCheckIBMCdTektonPipelineTriggerPropertyConfig(pipelineID string, tri } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] name = "trigger" type = "manual" event_listener = "listener" } - resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id name = "%s" type = "%s" value = "%s" diff --git a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_test.go b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_test.go index 7c327c2a5a..39329b80eb 100644 --- a/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_test.go +++ b/ibm/service/cdtektonpipeline/resource_ibm_cd_tekton_pipeline_trigger_test.go @@ -14,7 +14,10 @@ import ( acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/cdtektonpipeline" "github.com/IBM/continuous-delivery-go-sdk/cdtektonpipelinev2" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" ) func TestAccIBMCdTektonPipelineTriggerBasic(t *testing.T) { @@ -34,20 +37,20 @@ func TestAccIBMCdTektonPipelineTriggerBasic(t *testing.T) { resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineTriggerConfigBasic("", typeVar, name, eventListener), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", conf), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "trigger_id"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "type", typeVar), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "event_listener", eventListener), + testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", conf), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "trigger_id"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "type", typeVar), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "event_listener", eventListener), ), }, resource.TestStep{ Config: testAccCheckIBMCdTektonPipelineTriggerConfigBasic("", typeVarUpdate, nameUpdate, eventListenerUpdate), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "name", nameUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "event_listener", eventListenerUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "name", nameUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "event_listener", eventListenerUpdate), ), }, }, @@ -84,60 +87,59 @@ func TestAccIBMCdTektonPipelineTriggerAllArgs(t *testing.T) { CheckDestroy: testAccCheckIBMCdTektonPipelineTriggerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID, typeVar, name, eventListener, maxConcurrentRuns, enabled, cron, timezone, filter, favorite, enableEventsFromForks), + Config: testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID, typeVar, name, eventListener, maxConcurrentRuns, enabled, favorite, enableEventsFromForks, filter, cron, timezone), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", conf), - testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", conf), - testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", conf), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "name", name), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "max_concurrent_runs", maxConcurrentRuns), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "enabled", enabled), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "cron", cron), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "timezone", timezone), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "filter", filter), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "favorite", favorite), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "trigger_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "trigger_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "trigger_id"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "type", "manual"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "tags.#"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "type", "timer"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "type", "generic"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "secret.#"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "webhook_url"), + testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", conf), + testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", conf), + testAccCheckIBMCdTektonPipelineTriggerExists("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", conf), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "name", name), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "max_concurrent_runs", maxConcurrentRuns), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "enabled", enabled), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "cron", cron), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "timezone", timezone), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "favorite", favorite), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "trigger_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "trigger_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "trigger_id"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "type", "manual"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "tags.#"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "type", "timer"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "type", "generic"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "secret.#"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "webhook_url"), ), }, resource.TestStep{ - Config: testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID, typeVarUpdate, nameUpdate, eventListenerUpdate, maxConcurrentRunsUpdate, enabledUpdate, cronUpdate, timezoneUpdate, filterUpdate, favoriteUpdate, enableEventsFromForksUpdate), + Config: testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID, typeVarUpdate, nameUpdate, eventListenerUpdate, maxConcurrentRunsUpdate, enabledUpdate, favoriteUpdate, enableEventsFromForksUpdate, filterUpdate, cronUpdate, timezoneUpdate), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "name", nameUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "max_concurrent_runs", maxConcurrentRunsUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "enabled", enabledUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "cron", cronUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "timezone", timezoneUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "filter", filterUpdate), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "favorite", favoriteUpdate), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "trigger_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "trigger_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "pipeline_id"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "trigger_id"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "type", "manual"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", "tags.#"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2", "type", "timer"), - resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "type", "generic"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "secret.#"), - resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3", "webhook_url"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "name", nameUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "max_concurrent_runs", maxConcurrentRunsUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "enabled", enabledUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "cron", cronUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "timezone", timezoneUpdate), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "favorite", favoriteUpdate), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "trigger_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "trigger_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "pipeline_id"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "trigger_id"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "type", "manual"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", "tags.#"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger2_instance", "type", "timer"), + resource.TestCheckResourceAttr("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "type", "generic"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "secret.#"), + resource.TestCheckResourceAttrSet("ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger3_instance", "webhook_url"), ), }, resource.TestStep{ - ResourceName: "ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"pipeline_id", "enable_events_from_forks"}, }, }, }) @@ -160,7 +162,7 @@ func testAccCheckIBMCdTektonPipelineTriggerConfigBasic(pipelineID string, typeVa name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -179,8 +181,8 @@ func testAccCheckIBMCdTektonPipelineTriggerConfigBasic(pipelineID string, typeVa } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -190,13 +192,13 @@ func testAccCheckIBMCdTektonPipelineTriggerConfigBasic(pipelineID string, typeVa } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] type = "%s" name = "%s" @@ -205,7 +207,7 @@ func testAccCheckIBMCdTektonPipelineTriggerConfigBasic(pipelineID string, typeVa `, rgName, tcName, typeVar, name, eventListener) } -func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar string, name string, eventListener string, maxConcurrentRuns string, enabled string, cron string, timezone string, filter string, favorite string, enableEventsFromForks string) string { +func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar string, name string, eventListener string, maxConcurrentRuns string, enabled string, favorite string, enableEventsFromForks string, filter string, cron string, timezone string) string { rgName := acc.CdResourceGroupName tcName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) return fmt.Sprintf(` @@ -222,7 +224,7 @@ func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar str name = "pipeline-name" } } - resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { + resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id next_build_number = 5 worker { @@ -241,8 +243,8 @@ func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar str } parameters {} } - resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline.pipeline_id + resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_instance" { + pipeline_id = ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance.pipeline_id source { type = "git" properties { @@ -252,13 +254,13 @@ func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar str } } depends_on = [ - ibm_cd_tekton_pipeline.cd_tekton_pipeline + ibm_cd_tekton_pipeline.cd_tekton_pipeline_instance ] } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] type = "manual" event_listener = "listener" @@ -268,10 +270,10 @@ func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar str enabled = %s favorite = %s } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger2" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger2_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] type = "timer" name = "timer1" @@ -279,10 +281,10 @@ func testAccCheckIBMCdTektonPipelineTriggerConfig(pipelineID string, typeVar str cron = "%s" timezone = "%s" } - resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger3" { + resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger3_instance" { pipeline_id = ibm_cd_toolchain_tool_pipeline.ibm_cd_toolchain_tool_pipeline.tool_id depends_on = [ - ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition + ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance ] type = "generic" name = "generic1" @@ -364,3 +366,251 @@ func testAccCheckIBMCdTektonPipelineTriggerDestroy(s *terraform.State) error { return nil } + +func TestResourceIBMCdTektonPipelineTriggerWorkerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["type"] = "testString" + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Worker) + model.Name = core.StringPtr("testString") + model.Type = core.StringPtr("testString") + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerWorkerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerTriggerSourceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + triggerSourcePropertiesModel := make(map[string]interface{}) + triggerSourcePropertiesModel["url"] = "testString" + triggerSourcePropertiesModel["branch"] = "testString" + triggerSourcePropertiesModel["pattern"] = "testString" + triggerSourcePropertiesModel["blind_connection"] = true + triggerSourcePropertiesModel["hook_id"] = "testString" + triggerSourcePropertiesModel["tool"] = []map[string]interface{}{toolModel} + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []map[string]interface{}{triggerSourcePropertiesModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + triggerSourcePropertiesModel := new(cdtektonpipelinev2.TriggerSourceProperties) + triggerSourcePropertiesModel.URL = core.StringPtr("testString") + triggerSourcePropertiesModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesModel.Pattern = core.StringPtr("testString") + triggerSourcePropertiesModel.BlindConnection = core.BoolPtr(true) + triggerSourcePropertiesModel.HookID = core.StringPtr("testString") + triggerSourcePropertiesModel.Tool = toolModel + + model := new(cdtektonpipelinev2.TriggerSource) + model.Type = core.StringPtr("testString") + model.Properties = triggerSourcePropertiesModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerTriggerSourceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + toolModel := make(map[string]interface{}) + toolModel["id"] = "testString" + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["pattern"] = "testString" + model["blind_connection"] = true + model["hook_id"] = "testString" + model["tool"] = []map[string]interface{}{toolModel} + + assert.Equal(t, result, model) + } + + toolModel := new(cdtektonpipelinev2.Tool) + toolModel.ID = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerSourceProperties) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Pattern = core.StringPtr("testString") + model.BlindConnection = core.BoolPtr(true) + model.HookID = core.StringPtr("testString") + model.Tool = toolModel + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerTriggerSourcePropertiesToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerToolToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.Tool) + model.ID = core.StringPtr("testString") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerToolToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerGenericSecretToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["type"] = "token_matches" + model["value"] = "testString" + model["source"] = "header" + model["key_name"] = "testString" + model["algorithm"] = "md4" + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.GenericSecret) + model.Type = core.StringPtr("token_matches") + model.Value = core.StringPtr("testString") + model.Source = core.StringPtr("header") + model.KeyName = core.StringPtr("testString") + model.Algorithm = core.StringPtr("md4") + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerGenericSecretToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["value"] = "testString" + model["href"] = "testString" + model["enum"] = []string{"testString"} + model["type"] = "secure" + model["path"] = "testString" + model["locked"] = true + + assert.Equal(t, result, model) + } + + model := new(cdtektonpipelinev2.TriggerProperty) + model.Name = core.StringPtr("testString") + model.Value = core.StringPtr("testString") + model.Href = core.StringPtr("testString") + model.Enum = []string{"testString"} + model.Type = core.StringPtr("secure") + model.Path = core.StringPtr("testString") + model.Locked = core.BoolPtr(true) + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerTriggerPropertyToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.WorkerIdentity) { + model := new(cdtektonpipelinev2.WorkerIdentity) + model.ID = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["id"] = "testString" + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerMapToWorkerIdentity(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerMapToGenericSecret(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.GenericSecret) { + model := new(cdtektonpipelinev2.GenericSecret) + model.Type = core.StringPtr("token_matches") + model.Value = core.StringPtr("testString") + model.Source = core.StringPtr("header") + model.KeyName = core.StringPtr("testString") + model.Algorithm = core.StringPtr("md4") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["type"] = "token_matches" + model["value"] = "testString" + model["source"] = "header" + model["key_name"] = "testString" + model["algorithm"] = "md4" + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerMapToGenericSecret(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.TriggerSourcePrototype) { + triggerSourcePropertiesPrototypeModel := new(cdtektonpipelinev2.TriggerSourcePropertiesPrototype) + triggerSourcePropertiesPrototypeModel.URL = core.StringPtr("testString") + triggerSourcePropertiesPrototypeModel.Branch = core.StringPtr("testString") + triggerSourcePropertiesPrototypeModel.Pattern = core.StringPtr("testString") + + model := new(cdtektonpipelinev2.TriggerSourcePrototype) + model.Type = core.StringPtr("testString") + model.Properties = triggerSourcePropertiesPrototypeModel + + assert.Equal(t, result, model) + } + + triggerSourcePropertiesPrototypeModel := make(map[string]interface{}) + triggerSourcePropertiesPrototypeModel["url"] = "testString" + triggerSourcePropertiesPrototypeModel["branch"] = "testString" + triggerSourcePropertiesPrototypeModel["pattern"] = "testString" + + model := make(map[string]interface{}) + model["type"] = "testString" + model["properties"] = []interface{}{triggerSourcePropertiesPrototypeModel} + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePrototype(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(t *testing.T) { + checkResult := func(result *cdtektonpipelinev2.TriggerSourcePropertiesPrototype) { + model := new(cdtektonpipelinev2.TriggerSourcePropertiesPrototype) + model.URL = core.StringPtr("testString") + model.Branch = core.StringPtr("testString") + model.Pattern = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["url"] = "testString" + model["branch"] = "testString" + model["pattern"] = "testString" + + result, err := cdtektonpipeline.ResourceIBMCdTektonPipelineTriggerMapToTriggerSourcePropertiesPrototype(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/configurationaggregator/README.md b/ibm/service/configurationaggregator/README.md new file mode 100644 index 0000000000..a6838d1cb7 --- /dev/null +++ b/ibm/service/configurationaggregator/README.md @@ -0,0 +1,11 @@ +# Terraform IBM Provider + +This area is primarily for IBM provider contributors and maintainers. For information on _using_ Terraform and the IBM provider, see the links below. + + +## Handy Links +* [Find out about contributing](../../../CONTRIBUTING.md) to the IBM provider! +* IBM Provider Docs: [Home](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs) +* IBM Provider Docs: [One of the resources](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/resources/config_aggregator_settings) +* IBM API Docs: [IBM API Docs for ]() +* IBM SDK: [IBM SDK for ](https://github.com/IBM/appconfiguration-go-admin-sdk/tree/master/configurationaggregatorv1) diff --git a/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_configurations.go b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_configurations.go new file mode 100644 index 0000000000..9cac155764 --- /dev/null +++ b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_configurations.go @@ -0,0 +1,230 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator + +import ( + "context" + "encoding/json" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" +) + +func DataSourceIbmConfigAggregatorConfigurations() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIbmConfigAggregatorConfigurationsRead, + + Schema: map[string]*schema.Schema{ + "config_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The type of resource configuration that are to be retrieved.", + }, + "service_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the IBM Cloud service for which resources are to be retrieved.", + }, + "resource_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The resource group id of the resources.", + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The location or region in which the resources are created.", + }, + "resource_crn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The crn of the resource.", + }, + "prev": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The reference to the previous page of entries.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The reference to the previous page of entries.", + }, + "start": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "the start string for the query to view the page.", + }, + }, + }, + }, + "configs": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Array of resource configurations.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "about": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Description: "The basic metadata fetched from the query API.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "config": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The configuration of the resource.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmConfigAggregatorConfigurationsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + configurationAggregatorClient, err := meta.(conns.ClientSession).ConfigurationAggregatorV1() + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_config_aggregator_configurations", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + region := getConfigurationInstanceRegion(configurationAggregatorClient, d) + instanceId := d.Get("instance_id").(string) + log.Printf("Fetching config for instance_id: %s", instanceId) + configurationAggregatorClient = getClientWithConfigurationInstanceEndpoint(configurationAggregatorClient, instanceId, region) + + listConfigsOptions := &configurationaggregatorv1.ListConfigsOptions{} + + if _, ok := d.GetOk("config_type"); ok { + listConfigsOptions.SetConfigType(d.Get("config_type").(string)) + } + if _, ok := d.GetOk("service_name"); ok { + listConfigsOptions.SetServiceName(d.Get("service_name").(string)) + } + if _, ok := d.GetOk("resource_group_id"); ok { + listConfigsOptions.SetResourceGroupID(d.Get("resource_group_id").(string)) + } + if _, ok := d.GetOk("location"); ok { + listConfigsOptions.SetLocation(d.Get("location").(string)) + } + if _, ok := d.GetOk("resource_crn"); ok { + listConfigsOptions.SetResourceCrn(d.Get("resource_crn").(string)) + } + + var pager *configurationaggregatorv1.ConfigsPager + pager, err = configurationAggregatorClient.NewConfigsPager(listConfigsOptions) + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_config_aggregator_configurations", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + allItems, err := pager.GetAll() + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("ConfigsPager.GetAll() failed %s", err), "(Data) ibm_config_aggregator_configurations", "read") + log.Printf("[DEBUG] %s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + d.SetId(dataSourceIbmConfigAggregatorConfigurationsID(d)) + + mapSlice := []map[string]interface{}{} + for _, modelItem := range allItems { + modelMap, err := DataSourceIbmConfigAggregatorConfigurationsConfigToMap(&modelItem) + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_config_aggregator_configurations", "read") + return tfErr.GetDiag() + } + mapSlice = append(mapSlice, modelMap) + } + + if err = d.Set("configs", mapSlice); err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("Error setting configs %s", err), "(Data) ibm_config_aggregator_configurations", "read") + return tfErr.GetDiag() + } + + return nil +} + +// dataSourceIbmConfigAggregatorConfigurationsID returns a reasonable ID for the list. +func dataSourceIbmConfigAggregatorConfigurationsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func DataSourceIbmConfigAggregatorConfigurationsPaginatedPreviousToMap(model *configurationaggregatorv1.PaginatedPrevious) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.Start != nil { + modelMap["start"] = *model.Start + } + return modelMap, nil +} + +func DataSourceIbmConfigAggregatorConfigurationsConfigToMap(model *configurationaggregatorv1.Config) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + aboutMap, err := DataSourceIbmConfigAggregatorConfigurationsAboutToMap(model.About) + if err != nil { + return modelMap, err + } + modelMap["about"] = aboutMap + configMap, err := DataSourceIbmConfigAggregatorConfigurationsConfigurationToMap(model.Config) + if err != nil { + return modelMap, err + } + modelMap["config"] = configMap + return modelMap, nil +} + +func DataSourceIbmConfigAggregatorConfigurationsAboutToMap(model *configurationaggregatorv1.About) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["account_id"] = *model.AccountID + modelMap["config_type"] = *model.ConfigType + modelMap["resource_crn"] = *model.ResourceCrn + modelMap["resource_group_id"] = *model.ResourceGroupID + modelMap["service_name"] = *model.ServiceName + modelMap["resource_name"] = *model.ResourceName + modelMap["last_config_refresh_time"] = model.LastConfigRefreshTime.String() + modelMap["location"] = *model.Location + // modelMap["tags"] = make(map[string]interface{}) + return modelMap, nil +} + +func DataSourceIbmConfigAggregatorConfigurationsTagsToMap(model *configurationaggregatorv1.Tags) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Tag != nil { + modelMap["tag"] = *model.Tag + } + return modelMap, nil +} + +func DataSourceIbmConfigAggregatorConfigurationsConfigurationToMap(model *configurationaggregatorv1.Configuration) (string, error) { + checkMap := model.GetProperties() + tryMap := make(map[string]interface{}) + for i, v := range checkMap { + tryMap[i] = v + } + jsonData, err := json.Marshal(tryMap) + if err != nil { + return "", err + } + return string(jsonData), nil +} diff --git a/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_configurations_test.go b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_configurations_test.go new file mode 100644 index 0000000000..9ecd3f0b64 --- /dev/null +++ b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_configurations_test.go @@ -0,0 +1,57 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIbmConfigAggregatorConfigurationsDataSourceBasic(t *testing.T) { + instanceID := "instance_id" + var configType = "your-config-type" + var location = "your-location" + var resourceCrn = "your-resource-crn" + var resourceGroupID = "your-resource-group-id" + var serviceName = "your-service-name" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIbmConfigAggregatorConfigurationsDataSourceConfigBasic(instanceID, configType, location, resourceCrn, resourceGroupID, serviceName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_config_aggregator_configurations.config_aggregator_configurations_instance", "id"), + resource.TestCheckResourceAttr("data.ibm_config_aggregator_configurations.config_aggregator_configurations_instance", "config_type", configType), + resource.TestCheckResourceAttr("data.ibm_config_aggregator_configurations.config_aggregator_configurations_instance", "location", location), + resource.TestCheckResourceAttr("data.ibm_config_aggregator_configurations.config_aggregator_configurations_instance", "resource_crn", resourceCrn), + resource.TestCheckResourceAttr("data.ibm_config_aggregator_configurations.config_aggregator_configurations_instance", "resource_group_id", resourceGroupID), + resource.TestCheckResourceAttr("data.ibm_config_aggregator_configurations.config_aggregator_configurations_instance", "service_name", serviceName), + ), + }, + }, + }) +} + +func testAccCheckIbmConfigAggregatorConfigurationsDataSourceConfigBasic(instanceID, configType, location, resourceCrn, resourceGroupID, serviceName string) string { + return fmt.Sprintf(` + data "ibm_config_aggregator_configurations" "config_aggregator_configurations_instance" { + instance_id ="%s" + config_type = "%s" + location = "%s" + resource_crn = "%s" + resource_group_id = "%s" + service_name = "%s" + } + `, instanceID, configType, location, resourceCrn, resourceGroupID, serviceName) +} diff --git a/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_resource_collection_status.go b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_resource_collection_status.go new file mode 100644 index 0000000000..4b75ba2271 --- /dev/null +++ b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_resource_collection_status.go @@ -0,0 +1,80 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" +) + +func DataSourceIbmConfigAggregatorResourceCollectionStatus() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIbmConfigAggregatorResourceCollectionStatusRead, + + Schema: map[string]*schema.Schema{ + "last_config_refresh_time": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The timestamp at which the configuration was last refreshed.", + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Status of the resource collection.", + }, + }, + } +} + +func dataSourceIbmConfigAggregatorResourceCollectionStatusRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + configurationAggregatorClient, err := meta.(conns.ClientSession).ConfigurationAggregatorV1() + region := getConfigurationInstanceRegion(configurationAggregatorClient, d) + instanceId := d.Get("instance_id").(string) + configurationAggregatorClient = getClientWithConfigurationInstanceEndpoint(configurationAggregatorClient, instanceId, region) + if err != nil { + // Error is coming from SDK client, so it doesn't need to be discriminated. + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_config_aggregator_resource_collection_status", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + getResourceCollectionStatusOptions := &configurationaggregatorv1.GetResourceCollectionStatusOptions{} + + statusResponse, _, err := configurationAggregatorClient.GetResourceCollectionStatusWithContext(context, getResourceCollectionStatusOptions) + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetResourceCollectionStatusWithContext failed: %s", err.Error()), "(Data) ibm_config_aggregator_resource_collection_status", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + d.SetId(dataSourceIbmConfigAggregatorResourceCollectionStatusID(d)) + + if err = d.Set("last_config_refresh_time", flex.DateTimeToString(statusResponse.LastConfigRefreshTime)); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting last_config_refresh_time: %s", err), "(Data) ibm_config_aggregator_resource_collection_status", "read", "set-last_config_refresh_time").GetDiag() + } + + if err = d.Set("status", statusResponse.Status); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting status: %s", err), "(Data) ibm_config_aggregator_resource_collection_status", "read", "set-status").GetDiag() + } + + return nil +} + +// dataSourceIbmConfigAggregatorResourceCollectionStatusID returns a reasonable ID for the list. +func dataSourceIbmConfigAggregatorResourceCollectionStatusID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} diff --git a/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_resource_collection_status_test.go b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_resource_collection_status_test.go new file mode 100644 index 0000000000..6ed765fd86 --- /dev/null +++ b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_resource_collection_status_test.go @@ -0,0 +1,41 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIbmConfigAggregatorResourceCollectionStatusDataSourceBasic(t *testing.T) { + instanceID := "instance_id" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIbmConfigAggregatorResourceCollectionStatusDataSourceConfigBasic(instanceID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_config_aggregator_resource_collection_status.config_aggregator_resource_collection_status_instance", "id"), + ), + }, + }, + }) +} + +func testAccCheckIbmConfigAggregatorResourceCollectionStatusDataSourceConfigBasic(instanceID string) string { + return fmt.Sprintf(` + data "ibm_config_aggregator_resource_collection_status" "config_aggregator_resource_collection_status_instance" { + instance_id="%s" + } + `, instanceID) +} diff --git a/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_settings.go b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_settings.go new file mode 100644 index 0000000000..4c713637ca --- /dev/null +++ b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_settings.go @@ -0,0 +1,178 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" +) + +func DataSourceIbmConfigAggregatorSettings() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIbmConfigAggregatorSettingsRead, + + Schema: map[string]*schema.Schema{ + "resource_collection_enabled": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "The field to check if the resource collection is enabled.", + }, + "trusted_profile_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The trusted profile ID that provides access to App Configuration instance to retrieve resource metadata.", + }, + "last_updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The last time the settings was last updated.", + }, + "regions": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Regions for which the resource collection is enabled.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "additional_scope": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The additional scope that enables resource collection for Enterprise acccounts.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type of scope. Currently allowed value is Enterprise.", + }, + "enterprise_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Enterprise ID.", + }, + "profile_template": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The Profile Template details applied on the enterprise account.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The Profile Template ID created in the enterprise account that provides access to App Configuration instance for resource collection.", + }, + "trusted_profile_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The trusted profile ID that provides access to App Configuration instance to retrieve template information.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceIbmConfigAggregatorSettingsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + configurationAggregatorClient, err := meta.(conns.ClientSession).ConfigurationAggregatorV1() + region := getConfigurationInstanceRegion(configurationAggregatorClient, d) + instanceId := d.Get("instance_id").(string) + configurationAggregatorClient = getClientWithConfigurationInstanceEndpoint(configurationAggregatorClient, instanceId, region) + if err != nil { + // Error is coming from SDK client, so it doesn't need to be discriminated. + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_config_aggregator_settings", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + getSettingsOptions := &configurationaggregatorv1.GetSettingsOptions{} + + settingsResponse, _, err := configurationAggregatorClient.GetSettingsWithContext(context, getSettingsOptions) + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetSettingsWithContext failed: %s", err.Error()), "(Data) ibm_config_aggregator_settings", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + d.SetId(dataSourceIbmConfigAggregatorSettingsID(d)) + + if err = d.Set("resource_collection_enabled", settingsResponse.ResourceCollectionEnabled); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting resource_collection_enabled: %s", err), "(Data) ibm_config_aggregator_settings", "read", "set-resource_collection_enabled").GetDiag() + } + + if err = d.Set("trusted_profile_id", settingsResponse.TrustedProfileID); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting trusted_profile_id: %s", err), "(Data) ibm_config_aggregator_settings", "read", "set-trusted_profile_id").GetDiag() + } + + if err = d.Set("last_updated", flex.DateTimeToString(settingsResponse.LastUpdated)); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting last_updated: %s", err), "(Data) ibm_config_aggregator_settings", "read", "set-last_updated").GetDiag() + } + + additionalScope := []map[string]interface{}{} + if settingsResponse.AdditionalScope != nil { + for _, modelItem := range settingsResponse.AdditionalScope { + modelMap, err := DataSourceIbmConfigAggregatorSettingsAdditionalScopeToMap(&modelItem) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_config_aggregator_settings", "read", "additional_scope-to-map").GetDiag() + } + additionalScope = append(additionalScope, modelMap) + } + } + if err = d.Set("additional_scope", additionalScope); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting additional_scope: %s", err), "(Data) ibm_config_aggregator_settings", "read", "set-additional_scope").GetDiag() + } + + return nil +} + +// dataSourceIbmConfigAggregatorSettingsID returns a reasonable ID for the list. +func dataSourceIbmConfigAggregatorSettingsID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func DataSourceIbmConfigAggregatorSettingsAdditionalScopeToMap(model *configurationaggregatorv1.AdditionalScope) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.EnterpriseID != nil { + modelMap["enterprise_id"] = *model.EnterpriseID + } + if model.ProfileTemplate != nil { + profileTemplateMap, err := DataSourceIbmConfigAggregatorSettingsProfileTemplateToMap(model.ProfileTemplate) + if err != nil { + return modelMap, err + } + modelMap["profile_template"] = []map[string]interface{}{profileTemplateMap} + } + return modelMap, nil +} + +func DataSourceIbmConfigAggregatorSettingsProfileTemplateToMap(model *configurationaggregatorv1.ProfileTemplate) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.TrustedProfileID != nil { + modelMap["trusted_profile_id"] = *model.TrustedProfileID + } + return modelMap, nil +} diff --git a/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_settings_test.go b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_settings_test.go new file mode 100644 index 0000000000..7414f79616 --- /dev/null +++ b/ibm/service/configurationaggregator/data_source_ibm_config_aggregator_settings_test.go @@ -0,0 +1,91 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/configurationaggregator" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/stretchr/testify/assert" +) + +func TestAccIbmConfigAggregatorSettingsDataSourceBasic(t *testing.T) { + instanceID := "instance_id" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIbmConfigAggregatorSettingsDataSourceConfigBasic(instanceID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_config_aggregator_settings.config_aggregator_settings_instance", "id"), + ), + }, + }, + }) +} + +func testAccCheckIbmConfigAggregatorSettingsDataSourceConfigBasic(instanceID string) string { + return fmt.Sprintf(` + data "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { + instance_id="%s" + } + `, instanceID) +} + +func TestDataSourceIbmConfigAggregatorSettingsAdditionalScopeToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + profileTemplateModel := make(map[string]interface{}) + profileTemplateModel["id"] = "ProfileTemplate-adb55769-ae22-4c60-aead-bd1f84f93c57" + profileTemplateModel["trusted_profile_id"] = "Profile-6bb60124-8fc3-4d18-b63d-0b99560865d3" + + model := make(map[string]interface{}) + model["type"] = "Enterprise" + model["enterprise_id"] = "testString" + model["profile_template"] = []map[string]interface{}{profileTemplateModel} + + assert.Equal(t, result, model) + } + + profileTemplateModel := new(configurationaggregatorv1.ProfileTemplate) + profileTemplateModel.ID = core.StringPtr("ProfileTemplate-adb55769-ae22-4c60-aead-bd1f84f93c57") + profileTemplateModel.TrustedProfileID = core.StringPtr("Profile-6bb60124-8fc3-4d18-b63d-0b99560865d3") + + model := new(configurationaggregatorv1.AdditionalScope) + model.Type = core.StringPtr("Enterprise") + model.EnterpriseID = core.StringPtr("testString") + model.ProfileTemplate = profileTemplateModel + + result, err := configurationaggregator.DataSourceIbmConfigAggregatorSettingsAdditionalScopeToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestDataSourceIbmConfigAggregatorSettingsProfileTemplateToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["id"] = "ProfileTemplate-adb55769-ae22-4c60-aead-bd1f84f93c57" + model["trusted_profile_id"] = "Profile-6bb60124-8fc3-4d18-b63d-0b99560865d3" + + assert.Equal(t, result, model) + } + + model := new(configurationaggregatorv1.ProfileTemplate) + model.ID = core.StringPtr("ProfileTemplate-adb55769-ae22-4c60-aead-bd1f84f93c57") + model.TrustedProfileID = core.StringPtr("Profile-6bb60124-8fc3-4d18-b63d-0b99560865d3") + + result, err := configurationaggregator.DataSourceIbmConfigAggregatorSettingsProfileTemplateToMap(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/configurationaggregator/resource_ibm_config_aggregator_settings.go b/ibm/service/configurationaggregator/resource_ibm_config_aggregator_settings.go new file mode 100644 index 0000000000..17a29d4776 --- /dev/null +++ b/ibm/service/configurationaggregator/resource_ibm_config_aggregator_settings.go @@ -0,0 +1,293 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +/* + * IBM OpenAPI Terraform Generator Version: 3.92.0-af5c89a5-20240617-153232 + */ + +package configurationaggregator + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" + "github.com/IBM/go-sdk-core/v5/core" +) + +func ResourceIbmConfigAggregatorSettings() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIbmConfigAggregatorSettingsCreate, + ReadContext: resourceIbmConfigAggregatorSettingsRead, + DeleteContext: resourceIbmConfigAggregatorSettingsDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "resource_collection_enabled": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: "The field denoting if the resource collection is enabled.", + }, + "trusted_profile_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.InvokeValidator("ibm_config_aggregator_settings", "trusted_profile_id"), + Description: "The trusted profile id that provides Reader access to the App Configuration instance to collect resource metadata.", + }, + "resource_collection_regions": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The list of regions across which the resource collection is enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "additional_scope": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The additional scope that enables resource collection for Enterprise acccounts.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The type of scope. Currently allowed value is Enterprise.", + }, + "enterprise_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Enterprise ID.", + }, + "profile_template": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The Profile Template details applied on the enterprise account.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Profile Template ID created in the enterprise account that provides access to App Configuration instance for resource collection.", + }, + "trusted_profile_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The trusted profile ID that provides access to App Configuration instance to retrieve template information.", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func ResourceIbmConfigAggregatorSettingsValidator() *validate.ResourceValidator { + validateSchema := make([]validate.ValidateSchema, 0) + validateSchema = append(validateSchema, + validate.ValidateSchema{ + Identifier: "trusted_profile_id", + ValidateFunctionIdentifier: validate.ValidateRegexpLen, + Type: validate.TypeString, + Required: true, + Regexp: `^[a-zA-Z0-9-]*$`, + MinValueLength: 44, + MaxValueLength: 44, + }, + ) + + resourceValidator := validate.ResourceValidator{ResourceName: "ibm_config_aggregator_settings", Schema: validateSchema} + return &resourceValidator +} + +func resourceIbmConfigAggregatorSettingsCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + configurationAggregatorClient, err := meta.(conns.ClientSession).ConfigurationAggregatorV1() + region := getConfigurationInstanceRegion(configurationAggregatorClient, d) + instanceId := d.Get("instance_id").(string) + configurationAggregatorClient = getClientWithConfigurationInstanceEndpoint(configurationAggregatorClient, instanceId, region) + if err != nil { + // Error is coming from SDK client, so it doesn't need to be discriminated. + tfErr := flex.TerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + replaceSettingsOptions := &configurationaggregatorv1.ReplaceSettingsOptions{} + replaceSettingsOptions.SetResourceCollectionEnabled(d.Get("resource_collection_enabled").(bool)) + if _, ok := d.GetOk("trusted_profile_id"); ok { + replaceSettingsOptions.SetTrustedProfileID(d.Get("trusted_profile_id").(string)) + } + if _, ok := d.GetOk("resource_collection_regions"); ok { + var regions []string + for _, v := range d.Get("resource_collection_regions").([]interface{}) { + regionsItem := v.(string) + regions = append(regions, regionsItem) + } + replaceSettingsOptions.SetRegions(regions) + } + if _, ok := d.GetOk("additional_scope"); ok { + var additionalScope []configurationaggregatorv1.AdditionalScope + for _, v := range d.Get("additional_scope").([]interface{}) { + value := v.(map[string]interface{}) + additionalScopeItem, err := ResourceIbmConfigAggregatorSettingsMapToAdditionalScope(value) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "create", "parse-additional_scope").GetDiag() + } + additionalScope = append(additionalScope, *additionalScopeItem) + } + replaceSettingsOptions.SetAdditionalScope(additionalScope) + } + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("ReplaceSettingsWithContext failed: %s", err.Error()), "ibm_config_aggregator_settings", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + aggregatorID := fmt.Sprintf("%s/%s", region, instanceId) + d.SetId(aggregatorID) + + return resourceIbmConfigAggregatorSettingsRead(context, d, meta) +} + +func resourceIbmConfigAggregatorSettingsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + configurationAggregatorClient, err := meta.(conns.ClientSession).ConfigurationAggregatorV1() + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + getSettingsOptions := &configurationaggregatorv1.GetSettingsOptions{} + var region string + var instanceId string + configurationAggregatorClient, region, instanceId, err = updateClientURLWithInstanceEndpoint(d.Id(), configurationAggregatorClient, d) + if err != nil { + return diag.FromErr(err) + } + settingsResponse, response, err := configurationAggregatorClient.GetSettingsWithContext(context, getSettingsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetSettingsWithContext failed: %s", err.Error()), "ibm_config_aggregator_settings", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + if err = d.Set("instance_id", instanceId); err != nil { + return diag.FromErr(fmt.Errorf("Error setting instance_id: %s", err)) + } + if err = d.Set("region", region); err != nil { + return diag.FromErr(fmt.Errorf("Error setting region: %s", err)) + } + if !core.IsNil(settingsResponse.ResourceCollectionEnabled) { + if err = d.Set("resource_collection_enabled", settingsResponse.ResourceCollectionEnabled); err != nil { + err = fmt.Errorf("Error setting resource_collection_enabled: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "read", "set-resource_collection_enabled").GetDiag() + } + } + if !core.IsNil(settingsResponse.TrustedProfileID) { + if err = d.Set("trusted_profile_id", settingsResponse.TrustedProfileID); err != nil { + err = fmt.Errorf("Error setting trusted_profile_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "read", "set-trusted_profile_id").GetDiag() + } + } + if !core.IsNil(settingsResponse.Regions) { + if err = d.Set("resource_collection_regions", settingsResponse.Regions); err != nil { + err = fmt.Errorf("Error setting regions: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "read", "set-regions").GetDiag() + } + } + if !core.IsNil(settingsResponse.AdditionalScope) { + additionalScope := []map[string]interface{}{} + for _, additionalScopeItem := range settingsResponse.AdditionalScope { + additionalScopeItemMap, err := ResourceIbmConfigAggregatorSettingsAdditionalScopeToMap(&additionalScopeItem) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "read", "additional_scope-to-map").GetDiag() + } + additionalScope = append(additionalScope, additionalScopeItemMap) + } + if err = d.Set("additional_scope", additionalScope); err != nil { + err = fmt.Errorf("Error setting additional_scope: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_config_aggregator_settings", "read", "set-additional_scope").GetDiag() + } + } + + return nil +} + +func resourceIbmConfigAggregatorSettingsDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + // This resource does not support a "delete" operation. + d.SetId("") + return nil +} + +func ResourceIbmConfigAggregatorSettingsMapToAdditionalScope(modelMap map[string]interface{}) (*configurationaggregatorv1.AdditionalScope, error) { + model := &configurationaggregatorv1.AdditionalScope{} + if modelMap["type"] != nil && modelMap["type"].(string) != "" { + model.Type = core.StringPtr(modelMap["type"].(string)) + } + if modelMap["enterprise_id"] != nil && modelMap["enterprise_id"].(string) != "" { + model.EnterpriseID = core.StringPtr(modelMap["enterprise_id"].(string)) + } + if modelMap["profile_template"] != nil && len(modelMap["profile_template"].([]interface{})) > 0 { + ProfileTemplateModel, err := ResourceIbmConfigAggregatorSettingsMapToProfileTemplate(modelMap["profile_template"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ProfileTemplate = ProfileTemplateModel + } + return model, nil +} + +func ResourceIbmConfigAggregatorSettingsMapToProfileTemplate(modelMap map[string]interface{}) (*configurationaggregatorv1.ProfileTemplate, error) { + model := &configurationaggregatorv1.ProfileTemplate{} + if modelMap["id"] != nil && modelMap["id"].(string) != "" { + model.ID = core.StringPtr(modelMap["id"].(string)) + } + if modelMap["trusted_profile_id"] != nil && modelMap["trusted_profile_id"].(string) != "" { + model.TrustedProfileID = core.StringPtr(modelMap["trusted_profile_id"].(string)) + } + return model, nil +} + +func ResourceIbmConfigAggregatorSettingsAdditionalScopeToMap(model *configurationaggregatorv1.AdditionalScope) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.EnterpriseID != nil { + modelMap["enterprise_id"] = *model.EnterpriseID + } + if model.ProfileTemplate != nil { + profileTemplateMap, err := ResourceIbmConfigAggregatorSettingsProfileTemplateToMap(model.ProfileTemplate) + if err != nil { + return modelMap, err + } + modelMap["profile_template"] = []map[string]interface{}{profileTemplateMap} + } + return modelMap, nil +} + +func ResourceIbmConfigAggregatorSettingsProfileTemplateToMap(model *configurationaggregatorv1.ProfileTemplate) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.TrustedProfileID != nil { + modelMap["trusted_profile_id"] = *model.TrustedProfileID + } + return modelMap, nil +} diff --git a/ibm/service/configurationaggregator/resource_ibm_config_aggregator_settings_test.go b/ibm/service/configurationaggregator/resource_ibm_config_aggregator_settings_test.go new file mode 100644 index 0000000000..446116efd3 --- /dev/null +++ b/ibm/service/configurationaggregator/resource_ibm_config_aggregator_settings_test.go @@ -0,0 +1,46 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package configurationaggregator_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIbmConfigAggregatorSettingsBasic(t *testing.T) { + + instanceID := "instance_id" + resourceCollectionEnabled := false + trustedProfileID := "Profile-2546925a-7b46-40dd-81ff-48015a49ff43" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIbmConfigAggregatorSettingsConfigBasic(instanceID, resourceCollectionEnabled, trustedProfileID), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_config_aggregator_settings.config_aggregator_settings_instance", "resource_collection_enabled", fmt.Sprintf("%t", resourceCollectionEnabled)), + resource.TestCheckResourceAttr("ibm_config_aggregator_settings.config_aggregator_settings_instance", "trusted_profile_id", trustedProfileID), + resource.TestCheckResourceAttr("ibm_config_aggregator_settings.config_aggregator_settings_instance", "instance_id", instanceID), + ), + }, + }, + }) +} + +func testAccCheckIbmConfigAggregatorSettingsConfigBasic(instanceID string, resourceCollectionEnabled bool, trustedProfileID string) string { + return fmt.Sprintf(` + resource "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { + instance_id = "%s" + resource_collection_enabled = %t + trusted_profile_id = "%s" + resource_collection_regions = ["all"] + } + `, instanceID, resourceCollectionEnabled, trustedProfileID) +} diff --git a/ibm/service/configurationaggregator/utils.go b/ibm/service/configurationaggregator/utils.go new file mode 100644 index 0000000000..dd43b56214 --- /dev/null +++ b/ibm/service/configurationaggregator/utils.go @@ -0,0 +1,76 @@ +package configurationaggregator + +import ( + "fmt" + "os" + "strings" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/configuration-aggregator-go-sdk/configurationaggregatorv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + cloudEndpoint = "cloud.ibm.com" + testCloudEndpoint = "test.cloud.ibm.com" +) + +func getConfigurationInstanceRegion(originalClient *configurationaggregatorv1.ConfigurationAggregatorV1, d *schema.ResourceData) string { + _, ok := d.GetOk("region") + if ok { + return d.Get("region").(string) + } + baseUrl := originalClient.Service.GetServiceURL() + url_01 := strings.Split(baseUrl, ".")[0] + return (strings.Split(url_01, "://")[1]) +} + +func getClientWithConfigurationInstanceEndpoint(originalClient *configurationaggregatorv1.ConfigurationAggregatorV1, instanceId string, region string) *configurationaggregatorv1.ConfigurationAggregatorV1 { + // build the api endpoint + domain := cloudEndpoint + if strings.Contains(os.Getenv("IBMCLOUD_IAM_API_ENDPOINT"), "test") { + domain = testCloudEndpoint + } + endpoint := fmt.Sprintf("https://%s.apprapp.%s/apprapp/config_aggregator/v1/instances/%s", region, domain, instanceId) + + // clone the client and set endpoint + newClient := &configurationaggregatorv1.ConfigurationAggregatorV1{ + Service: originalClient.Service.Clone(), + } + + newClient.Service.SetServiceURL(endpoint) + + return newClient +} + +func AddConfigurationAggregatorInstanceFields(resource *schema.Resource) *schema.Resource { + resource.Schema["instance_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the configuration aggregator instance.", + } + resource.Schema["region"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "The region of the configuration aggregator instance.", + } + return resource +} + +func updateClientURLWithInstanceEndpoint(id string, configsClient *configurationaggregatorv1.ConfigurationAggregatorV1, d *schema.ResourceData) (*configurationaggregatorv1.ConfigurationAggregatorV1, string, string, error) { + + idList, err := flex.IdParts(id) + if err != nil || len(idList) < 2 { + return configsClient, "", "", fmt.Errorf("Invalid Id %s. Error: %s", id, err) + } + + region := idList[0] + instanceId := idList[1] + + configsClient = getClientWithConfigurationInstanceEndpoint(configsClient, instanceId, region) + + return configsClient, region, instanceId, nil +} diff --git a/ibm/service/cos/data_source_ibm_cos_bucket.go b/ibm/service/cos/data_source_ibm_cos_bucket.go index 653fe61390..156cf021c2 100644 --- a/ibm/service/cos/data_source_ibm_cos_bucket.go +++ b/ibm/service/cos/data_source_ibm_cos_bucket.go @@ -700,7 +700,7 @@ func dataSourceIBMCosBucketRead(d *schema.ResourceData, meta interface{}) error keyProtectFlag = true } - var satlc_id, apiEndpoint, apiEndpointPrivate, directApiEndpoint, visibility string + var satlc_id, apiEndpoint, apiEndpointPublic, apiEndpointPrivate, directApiEndpoint, visibility string if satlc, ok := d.GetOk("satellite_location_id"); ok { satlc_id = satlc.(string) @@ -714,7 +714,7 @@ func dataSourceIBMCosBucketRead(d *schema.ResourceData, meta interface{}) error apiEndpoint = SelectSatlocCosApi(bucketType, serviceID, satlc_id) } else { - apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(bucketLocationConvert(bucketType), bucketRegion, false) + apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(bucketLocationConvert(bucketType), bucketRegion) visibility = endpointType if endpointType == "private" { apiEndpoint = apiEndpointPrivate @@ -811,10 +811,16 @@ func dataSourceIBMCosBucketRead(d *schema.ResourceData, meta interface{}) error bucketCRN := fmt.Sprintf("%s:%s:%s", strings.Replace(serviceID, "::", "", -1), "bucket", bucketName) d.Set("crn", bucketCRN) d.Set("resource_instance_id", serviceID) - apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(bucketLocationConvert(bucketType), bucketRegion, strings.Contains(apiEndpoint, "test")) - d.Set("s3_endpoint_public", apiEndpoint) - d.Set("s3_endpoint_private", apiEndpointPrivate) - d.Set("s3_endpoint_direct", directApiEndpoint) + + testEnv := strings.Contains(apiEndpoint, ".test.") + apiEndpointPublic, apiEndpointPrivate, directApiEndpoint = SelectCosApi(bucketLocationConvert(bucketType), bucketRegion) + if testEnv { + d.Set(fmt.Sprintf("s3_endpoint_%s", endpointType), apiEndpoint) + } else { + d.Set("s3_endpoint_public", apiEndpointPublic) + d.Set("s3_endpoint_private", apiEndpointPrivate) + d.Set("s3_endpoint_direct", directApiEndpoint) + } sess, err := meta.(conns.ClientSession).CosConfigV1API() if err != nil { return err diff --git a/ibm/service/cos/resource_ibm_cos_bucket.go b/ibm/service/cos/resource_ibm_cos_bucket.go index 912f2b51dd..8151953f86 100644 --- a/ibm/service/cos/resource_ibm_cos_bucket.go +++ b/ibm/service/cos/resource_ibm_cos_bucket.go @@ -762,7 +762,7 @@ func resourceIBMCOSBucketUpdate(d *schema.ResourceData, meta interface{}) error if apiType == "sl" { apiEndpoint = SelectSatlocCosApi(apiType, serviceID, bLocation) } else { - apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation, false) + apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation) visibility = endpointType if endpointType == "private" { apiEndpoint = apiEndpointPrivate @@ -1129,7 +1129,7 @@ func resourceIBMCOSBucketRead(d *schema.ResourceData, meta interface{}) error { if apiType == "sl" { apiEndpoint = SelectSatlocCosApi(apiType, serviceID, bLocation) } else { - apiEndpointPublic, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation, false) + apiEndpointPublic, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation) apiEndpoint = apiEndpointPublic if endpointType == "private" { apiEndpoint = apiEndpointPrivate @@ -1218,10 +1218,16 @@ func resourceIBMCOSBucketRead(d *schema.ResourceData, meta interface{}) error { d.Set("resource_instance_id", serviceID) d.Set("bucket_name", bucketName) - apiEndpointPublic, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation, strings.Contains(apiEndpoint, "test")) - d.Set("s3_endpoint_public", apiEndpointPublic) - d.Set("s3_endpoint_private", apiEndpointPrivate) - d.Set("s3_endpoint_direct", directApiEndpoint) + testEnv := strings.Contains(apiEndpoint, ".test.") + apiEndpointPublic, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation) + + if testEnv { + d.Set(fmt.Sprintf("s3_endpoint_%s", endpointType), apiEndpoint) + } else { + d.Set("s3_endpoint_public", apiEndpointPublic) + d.Set("s3_endpoint_private", apiEndpointPrivate) + d.Set("s3_endpoint_direct", directApiEndpoint) + } if endpointType != "" { d.Set("endpoint_type", endpointType) } @@ -1241,7 +1247,6 @@ func resourceIBMCOSBucketRead(d *schema.ResourceData, meta interface{}) error { if apiType == "sl" { satconfig := fmt.Sprintf("https://config.%s.%s.cloud-object-storage.appdomain.cloud/v1", serviceID, bLocation) - sess.SetServiceURL(satconfig) } @@ -1427,7 +1432,7 @@ func resourceIBMCOSBucketCreate(d *schema.ResourceData, meta interface{}) error apiEndpoint = SelectSatlocCosApi(apiType, serviceID, bLocation) } else { - apiEndpoint, privateApiEndpoint, directApiEndpoint = SelectCosApi(apiType, bLocation, false) + apiEndpoint, privateApiEndpoint, directApiEndpoint = SelectCosApi(apiType, bLocation) visibility = endpointType if endpointType == "private" { apiEndpoint = privateApiEndpoint @@ -1552,7 +1557,7 @@ func resourceIBMCOSBucketDelete(d *schema.ResourceData, meta interface{}) error apiEndpoint = SelectSatlocCosApi(apiType, serviceID, bLocation) } else { - apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation, false) + apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation) visibility = endpointType if endpointType == "private" { apiEndpoint = apiEndpointPrivate @@ -1680,22 +1685,27 @@ func resourceIBMCOSBucketExists(d *schema.ResourceData, meta interface{}) (bool, serviceID = bucketsatcrn } - var apiEndpoint, apiEndpointPrivate, directApiEndpoint string + var apiEndpoint, apiEndpointPrivate, directApiEndpoint, visibility string if apiType == "sl" { apiEndpoint = SelectSatlocCosApi(apiType, serviceID, bLocation) } else { - apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation, false) + apiEndpoint, apiEndpointPrivate, directApiEndpoint = SelectCosApi(apiType, bLocation) + visibility = endpointType if endpointType == "private" { apiEndpoint = apiEndpointPrivate } if endpointType == "direct" { + // visibility type "direct" is not supported in endpoints file. + visibility = "private" apiEndpoint = directApiEndpoint } } + apiEndpoint = conns.FileFallBack(rsConClient.Config.EndpointsFile, visibility, "IBMCLOUD_COS_ENDPOINT", bLocation, apiEndpoint) + apiEndpoint = conns.EnvFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) if apiEndpoint == "" { @@ -1740,11 +1750,8 @@ func resourceIBMCOSBucketExists(d *schema.ResourceData, meta interface{}) (bool, return false, nil } -func SelectCosApi(apiType string, bLocation string, test bool) (string, string, string) { +func SelectCosApi(apiType string, bLocation string) (string, string, string) { hostUrl := "cloud-object-storage.appdomain.cloud" - if test { - hostUrl = "cloud-object-storage.test.appdomain.cloud" - } if apiType == "crl" { return fmt.Sprintf("s3.%s.%s", bLocation, hostUrl), fmt.Sprintf("s3.private.%s.%s", bLocation, hostUrl), fmt.Sprintf("s3.direct.%s.%s", bLocation, hostUrl) } diff --git a/ibm/service/cos/resource_ibm_cos_bucket_object.go b/ibm/service/cos/resource_ibm_cos_bucket_object.go index 4868e74064..e18678b28f 100644 --- a/ibm/service/cos/resource_ibm_cos_bucket_object.go +++ b/ibm/service/cos/resource_ibm_cos_bucket_object.go @@ -476,12 +476,9 @@ func resourceIBMCOSBucketObjectDelete(ctx context.Context, d *schema.ResourceDat return nil } -func getCosEndpoint(bucketLocation string, endpointType string, test bool) string { +func getCosEndpoint(bucketLocation string, endpointType string) string { if bucketLocation != "" { hostUrl := "cloud-object-storage.appdomain.cloud" - if test { - hostUrl = "cloud-object-storage.test.appdomain.cloud" - } switch endpointType { case "public": return fmt.Sprintf("s3.%s.%s", bucketLocation, hostUrl) @@ -502,7 +499,7 @@ func getS3Client(bxSession *bxsession.Session, bucketLocation string, endpointTy if endpointType == "direct" { visibility = "private" } - apiEndpoint := getCosEndpoint(bucketLocation, endpointType, false) + apiEndpoint := getCosEndpoint(bucketLocation, endpointType) apiEndpoint = conns.FileFallBack(bxSession.Config.EndpointsFile, visibility, "IBMCLOUD_COS_ENDPOINT", bucketLocation, apiEndpoint) apiEndpoint = conns.EnvFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) if apiEndpoint == "" { diff --git a/ibm/service/cos/resource_ibm_cos_bucket_test.go b/ibm/service/cos/resource_ibm_cos_bucket_test.go index 1a27c0ba49..8cc51802de 100644 --- a/ibm/service/cos/resource_ibm_cos_bucket_test.go +++ b/ibm/service/cos/resource_ibm_cos_bucket_test.go @@ -2242,7 +2242,7 @@ func testAccCheckIBMCosBucketExists(resource string, bucket string, regiontype s rt = "crl" } - apiEndpoint, _, _ := cos.SelectCosApi(rt, region, false) + apiEndpoint, _, _ := cos.SelectCosApi(rt, region) rsContClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).BluemixSession() if err != nil { diff --git a/ibm/service/cos/resource_ibm_cos_replication_configuration.go b/ibm/service/cos/resource_ibm_cos_replication_configuration.go index fbd461d899..d60b920565 100644 --- a/ibm/service/cos/resource_ibm_cos_replication_configuration.go +++ b/ibm/service/cos/resource_ibm_cos_replication_configuration.go @@ -334,13 +334,10 @@ func parseBucketReplId(id string, info string) string { return parseBucketId(bucketCRN, info) } -func getCosEndpointType(bucketLocation string, endpointType string, test bool) string { +func getCosEndpointType(bucketLocation string, endpointType string) string { if bucketLocation != "" { hostUrl := "cloud-object-storage.appdomain.cloud" - if test { - hostUrl = "cloud-object-storage.test.appdomain.cloud" - } switch endpointType { case "public": return fmt.Sprintf("s3.%s.%s", bucketLocation, hostUrl) @@ -363,7 +360,7 @@ func getS3ClientSession(bxSession *bxsession.Session, bucketLocation string, end if endpointType == "direct" { visibility = "private" } - apiEndpoint := getCosEndpointType(bucketLocation, endpointType, false) + apiEndpoint := getCosEndpointType(bucketLocation, endpointType) apiEndpoint = conns.FileFallBack(bxSession.Config.EndpointsFile, visibility, "IBMCLOUD_COS_ENDPOINT", bucketLocation, apiEndpoint) apiEndpoint = conns.EnvFallBack([]string{"IBMCLOUD_COS_ENDPOINT"}, apiEndpoint) if apiEndpoint == "" { diff --git a/ibm/service/eventstreams/data_source_ibm_event_streams_quota.go b/ibm/service/eventstreams/data_source_ibm_event_streams_quota.go new file mode 100644 index 0000000000..b521d261ca --- /dev/null +++ b/ibm/service/eventstreams/data_source_ibm_event_streams_quota.go @@ -0,0 +1,129 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package eventstreams + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/eventstreams-go-sdk/pkg/adminrestv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// A quota in an Event Streams service instance. +// The ID is the CRN with the last two components "quota:entity". +func DataSourceIBMEventStreamsQuota() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMEventStreamsQuotaRead, + + Schema: map[string]*schema.Schema{ + "resource_instance_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID or CRN of the Event Streams service instance", + }, + "entity": { + Type: schema.TypeString, + Required: true, + Description: "The entity for which the quota is set; 'default' or IAM ID", + }, + "producer_byte_rate": { + Type: schema.TypeInt, + Computed: true, + Description: "The producer quota in bytes per second, -1 means no quota", + }, + "consumer_byte_rate": { + Type: schema.TypeInt, + Computed: true, + Description: "The consumer quota in bytes per second, -1 means no quota", + }, + }, + } +} + +// read quota properties using the admin-rest API +func dataSourceIBMEventStreamsQuotaRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminrestClient, instanceCRN, entity, err := getQuotaClientInstanceEntity(d, meta) + if err != nil { + tfErr := flex.TerraformErrorf(err, "Error getting Event Streams instance", "ibm_event_streams_quota", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + getQuotaOptions := &adminrestv1.GetQuotaOptions{} + getQuotaOptions.SetEntityName(entity) + quota, response, err := adminrestClient.GetQuotaWithContext(context, getQuotaOptions) + if err != nil { + var tfErr *flex.TerraformProblem + if response != nil && response.StatusCode == 404 { + tfErr = flex.TerraformErrorf(err, fmt.Sprintf("Quota for '%s' does not exist", entity), "ibm_event_streams_quota", "read") + } else { + tfErr = flex.TerraformErrorf(err, fmt.Sprintf("GetQuota failed with response: %s", response), "ibm_event_streams_quota", "read") + } + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + d.Set("resource_instance_id", instanceCRN) + d.Set("entity", entity) + d.Set("producer_byte_rate", getQuotaValue(quota.ProducerByteRate)) + d.Set("consumer_byte_rate", getQuotaValue(quota.ConsumerByteRate)) + d.SetId(getQuotaID(instanceCRN, entity)) + + return nil +} + +// Returns +// admin-rest client (set to use the service instance) +// CRN for the service instance +// entity name +// Any error that occurred +func getQuotaClientInstanceEntity(d *schema.ResourceData, meta interface{}) (*adminrestv1.AdminrestV1, string, string, error) { + adminrestClient, err := meta.(conns.ClientSession).ESadminRestSession() + if err != nil { + return nil, "", "", err + } + instanceCRN := d.Get("resource_instance_id").(string) + if instanceCRN == "" { // importing + id := d.Id() + crnSegments := strings.Split(id, ":") + if len(crnSegments) != 10 || crnSegments[8] != "quota" || crnSegments[9] == "" { + return nil, "", "", fmt.Errorf("ID '%s' is not a quota resource", id) + } + entity := crnSegments[9] + crnSegments[8] = "" + crnSegments[9] = "" + instanceCRN = strings.Join(crnSegments, ":") + d.Set("resource_instance_id", instanceCRN) + d.Set("entity", entity) + } + + instance, err := getInstanceDetails(instanceCRN, meta) + if err != nil { + return nil, "", "", err + } + adminURL := instance.Extensions["kafka_http_url"].(string) + adminrestClient.SetServiceURL(adminURL) + return adminrestClient, instanceCRN, d.Get("entity").(string), nil +} + +func getQuotaID(instanceCRN string, entity string) string { + crnSegments := strings.Split(instanceCRN, ":") + crnSegments[8] = "quota" + crnSegments[9] = entity + return strings.Join(crnSegments, ":") +} + +// admin-rest API returns nil for undefined rate, convert that to -1 +func getQuotaValue(v *int64) int { + if v == nil { + return -1 + } + return int(*v) +} diff --git a/ibm/service/eventstreams/data_source_ibm_event_streams_quota_test.go b/ibm/service/eventstreams/data_source_ibm_event_streams_quota_test.go new file mode 100644 index 0000000000..8bff410848 --- /dev/null +++ b/ibm/service/eventstreams/data_source_ibm_event_streams_quota_test.go @@ -0,0 +1,84 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package eventstreams_test + +import ( + "fmt" + "strings" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const ( + // Data source test requires MZR instance have this quota with producer rate 10000, consumer rate 20000 + testQuotaEntity1 = "iam-ServiceId-00001111-2222-3333-4444-555566667777" + // Data source test requires MZR instance have this quota with producer rate 4096, consumer rate not defined + testQuotaEntity2 = "iam-ServiceId-77776666-5555-4444-3333-222211110000" + // Resource test requires MZR instance NOT have a quota for this + testQuotaEntity3 = "iam-ServiceId-99998888-7777-6666-5555-444433332222" + // Resource test requires MZR instance NOT have a quota for this + testQuotaEntity4 = "default" +) + +func TestAccIBMEventStreamsQuotaDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMEventStreamsQuotaDataSourceConfig(getTestInstanceName(mzrKey), testQuotaEntity1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMEventStreamsQuotaDataSourceProperties("data.ibm_event_streams_quota.es_quota", testQuotaEntity1, "10000", "20000"), + ), + }, + { + Config: testAccCheckIBMEventStreamsQuotaDataSourceConfig(getTestInstanceName(mzrKey), testQuotaEntity2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMEventStreamsQuotaDataSourceProperties("data.ibm_event_streams_quota.es_quota", testQuotaEntity2, "4096", "-1"), + ), + }, + }, + }) +} + +func testAccCheckIBMEventStreamsQuotaDataSourceConfig(instanceName string, entity string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "group" { + is_default=true + } + data "ibm_resource_instance" "es_instance" { + resource_group_id = data.ibm_resource_group.group.id + name = "%s" + } + data "ibm_event_streams_quota" "es_quota" { + resource_instance_id = data.ibm_resource_instance.es_instance.id + entity = "%s" + }`, instanceName, entity) +} + +// check properties of the terraform data source object +func testAccCheckIBMEventStreamsQuotaDataSourceProperties(name string, entity string, producerRate string, consumerRate string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + quotaID := rs.Primary.ID + if quotaID == "" { + return fmt.Errorf("[ERROR] Quota ID is not set") + } + if !strings.HasSuffix(quotaID, fmt.Sprintf(":quota:%s", entity)) { + return fmt.Errorf("[ERROR] Quota ID for %s not expected CRN", quotaID) + } + if producerRate != rs.Primary.Attributes["producer_byte_rate"] { + return fmt.Errorf("[ERROR] Quota for %s producer_byte_rate = %s, expected %s", entity, rs.Primary.Attributes["producer_byte_rate"], producerRate) + } + if consumerRate != rs.Primary.Attributes["consumer_byte_rate"] { + return fmt.Errorf("[ERROR] Quota for %s consumer_byte_rate = %s, expected %s", entity, rs.Primary.Attributes["consumer_byte_rate"], consumerRate) + } + return nil + } +} diff --git a/ibm/service/eventstreams/resource_ibm_event_streams_quota.go b/ibm/service/eventstreams/resource_ibm_event_streams_quota.go new file mode 100644 index 0000000000..be80fdf728 --- /dev/null +++ b/ibm/service/eventstreams/resource_ibm_event_streams_quota.go @@ -0,0 +1,166 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package eventstreams + +import ( + "context" + "fmt" + "log" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/eventstreams-go-sdk/pkg/adminrestv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +// A quota in an Event Streams service instance. +// The ID is the CRN with the last two components "quota:entity". +// The producer_byte_rate and consumer_byte_rate are the two quota properties, and must be at least -1; +// -1 means no quota applied. +func ResourceIBMEventStreamsQuota() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMEventStreamsQuotaCreate, + ReadContext: resourceIBMEventStreamsQuotaRead, + UpdateContext: resourceIBMEventStreamsQuotaUpdate, + DeleteContext: resourceIBMEventStreamsQuotaDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "resource_instance_id": { + Type: schema.TypeString, + Description: "The ID or the CRN of the Event Streams service instance", + Required: true, + ForceNew: true, + }, + "entity": { + Type: schema.TypeString, + Required: true, + Description: "The entity for which the quota is set; 'default' or IAM ID", + }, + "producer_byte_rate": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(-1), + Description: "The producer quota in bytes per second, -1 means no quota", + }, + "consumer_byte_rate": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(-1), + Description: "The consumer quota in bytes per second, -1 means no quota", + }, + }, + } +} + +func resourceIBMEventStreamsQuotaCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminrestClient, instanceCRN, entity, err := getQuotaClientInstanceEntity(d, meta) + if err != nil { + tfErr := flex.TerraformErrorf(err, "Error getting Event Streams instance", "ibm_event_streams_quota", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + createQuotaOptions := &adminrestv1.CreateQuotaOptions{} + createQuotaOptions.SetEntityName(entity) + pbr := d.Get("producer_byte_rate").(int) + cbr := d.Get("consumer_byte_rate").(int) + if pbr == -1 && cbr == -1 { + return diag.FromErr(fmt.Errorf("Quota for %s cannot be created: producer_byte_rate and consumer_byte_rate are both -1 (no quota)", entity)) + } + if pbr != -1 { + createQuotaOptions.SetProducerByteRate(int64(pbr)) + } + if cbr != -1 { + createQuotaOptions.SetConsumerByteRate(int64(cbr)) + } + + response, err := adminrestClient.CreateQuotaWithContext(context, createQuotaOptions) + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateQuota failed with response: %s", response), "ibm_event_streams_quota", "create") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + d.SetId(getQuotaID(instanceCRN, entity)) + + return resourceIBMEventStreamsQuotaRead(context, d, meta) +} + +func resourceIBMEventStreamsQuotaRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminrestClient, instanceCRN, entity, err := getQuotaClientInstanceEntity(d, meta) + if err != nil { + tfErr := flex.TerraformErrorf(err, "Error getting Event Streams instance", "ibm_event_streams_quota", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + getQuotaOptions := &adminrestv1.GetQuotaOptions{} + getQuotaOptions.SetEntityName(entity) + quota, response, err := adminrestClient.GetQuotaWithContext(context, getQuotaOptions) + if err != nil || quota == nil { + d.SetId("") + var tfErr *flex.TerraformProblem + if response != nil && response.StatusCode == 404 { + tfErr = flex.TerraformErrorf(err, fmt.Sprintf("Quota for '%s' does not exist", entity), "ibm_event_streams_quota", "read") + } else { + tfErr = flex.TerraformErrorf(err, fmt.Sprintf("GetQuota failed with response: %s", response), "ibm_event_streams_quota", "read") + } + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + d.Set("resource_instance_id", instanceCRN) + d.Set("entity", entity) + d.Set("producer_byte_rate", getQuotaValue(quota.ProducerByteRate)) + d.Set("consumer_byte_rate", getQuotaValue(quota.ConsumerByteRate)) + d.SetId(getQuotaID(instanceCRN, entity)) + + return nil +} + +func resourceIBMEventStreamsQuotaUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + if d.HasChange("producer_byte_rate") || d.HasChange("consumer_byte_rate") { + adminrestClient, _, entity, err := getQuotaClientInstanceEntity(d, meta) + if err != nil { + tfErr := flex.TerraformErrorf(err, "Error getting Event Streams instance", "ibm_event_streams_quota", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + updateQuotaOptions := &adminrestv1.UpdateQuotaOptions{} + updateQuotaOptions.SetEntityName(entity) + updateQuotaOptions.SetProducerByteRate(int64(d.Get("producer_byte_rate").(int))) + updateQuotaOptions.SetConsumerByteRate(int64(d.Get("consumer_byte_rate").(int))) + + response, err := adminrestClient.UpdateQuotaWithContext(context, updateQuotaOptions) + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("UpdateQuota failed with response: %s", response), "ibm_event_streams_quota", "update") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + } + return resourceIBMEventStreamsQuotaRead(context, d, meta) +} + +func resourceIBMEventStreamsQuotaDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminrestClient, _, entity, err := getQuotaClientInstanceEntity(d, meta) + if err != nil { + tfErr := flex.TerraformErrorf(err, "Error getting Event Streams instance", "ibm_event_streams_quota", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + + deleteQuotaOptions := &adminrestv1.DeleteQuotaOptions{} + deleteQuotaOptions.SetEntityName(entity) + + response, err := adminrestClient.DeleteQuotaWithContext(context, deleteQuotaOptions) + if err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteQuota failed with response: %s", response), "ibm_event_streams_quota", "delete") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() + } + d.SetId("") + return nil +} diff --git a/ibm/service/eventstreams/resource_ibm_event_streams_quota_test.go b/ibm/service/eventstreams/resource_ibm_event_streams_quota_test.go new file mode 100644 index 0000000000..83b1f5733d --- /dev/null +++ b/ibm/service/eventstreams/resource_ibm_event_streams_quota_test.go @@ -0,0 +1,138 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package eventstreams_test + +import ( + "fmt" + "strings" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/eventstreams-go-sdk/pkg/adminrestv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccIBMEventStreamsQuotaResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMEventStreamsQuotasDeletedFromInstance(testQuotaEntity3, testQuotaEntity4), + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMEventStreamsQuotaResourceConfig(getTestInstanceName(mzrKey), 0, testQuotaEntity3, 2048, 1024), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMEventStreamsQuotaResourceProperties("ibm_event_streams_quota.es_quota0", testQuotaEntity3), + testAccCheckIBMEventStreamsQuotaWasSetInInstance(testQuotaEntity3, 2048, 1024), + resource.TestCheckResourceAttrSet("ibm_event_streams_quota.es_quota0", "id"), + resource.TestCheckResourceAttr("ibm_event_streams_quota.es_quota0", "producer_byte_rate", "2048"), + resource.TestCheckResourceAttr("ibm_event_streams_quota.es_quota0", "consumer_byte_rate", "1024"), + ), + }, + { + Config: testAccCheckIBMEventStreamsQuotaResourceConfig(getTestInstanceName(mzrKey), 1, testQuotaEntity4, 100000000, -1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMEventStreamsQuotaResourceProperties("ibm_event_streams_quota.es_quota1", testQuotaEntity4), + testAccCheckIBMEventStreamsQuotaWasSetInInstance(testQuotaEntity4, 100000000, -1), + resource.TestCheckResourceAttrSet("ibm_event_streams_quota.es_quota1", "id"), + resource.TestCheckResourceAttr("ibm_event_streams_quota.es_quota1", "producer_byte_rate", "100000000"), + resource.TestCheckResourceAttr("ibm_event_streams_quota.es_quota1", "consumer_byte_rate", "-1"), + ), + }, + { + ResourceName: "ibm_event_streams_quota.es_quota1", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckIBMEventStreamsQuotaResourceConfig(instanceName string, tnum int, entity string, producerRate int, consumerRate int) string { + return fmt.Sprintf(` + data "ibm_resource_group" "group" { + is_default=true + } + data "ibm_resource_instance" "es_instance" { + resource_group_id = data.ibm_resource_group.group.id + name = "%s" + } + resource "ibm_event_streams_quota" "es_quota%d" { + resource_instance_id = data.ibm_resource_instance.es_instance.id + entity = "%s" + producer_byte_rate = %d + consumer_byte_rate = %d + }`, instanceName, tnum, entity, producerRate, consumerRate) +} + +// check properties of the terraform resource object +func testAccCheckIBMEventStreamsQuotaResourceProperties(name string, entity string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + quotaID := rs.Primary.ID + if quotaID == "" { + return fmt.Errorf("[ERROR] Quota ID is not set") + } + if !strings.HasSuffix(quotaID, fmt.Sprintf(":quota:%s", entity)) { + return fmt.Errorf("[ERROR] Quota ID %s not expected CRN", quotaID) + } + return nil + } +} + +// go to the Event Streams instance and check the quota has been set +func testAccCheckIBMEventStreamsQuotaWasSetInInstance(entity string, producerRate int, consumerRate int) resource.TestCheckFunc { + return func(s *terraform.State) error { + adminClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).ESadminRestSession() + if err != nil { + return fmt.Errorf("[ERROR] ESadminRestSession returned %v", err) + } + entityName := entity + qd, _, err := adminClient.GetQuota(&adminrestv1.GetQuotaOptions{EntityName: &entityName}) + if err != nil { + return fmt.Errorf("[ERROR] GetQuota returned %v", err) + } + qdp := testGetQuotaValue(qd.ProducerByteRate) + if producerRate != qdp { + return fmt.Errorf("[ERROR] quota producer byte rate expected %d, got %d", producerRate, qdp) + } + qdc := testGetQuotaValue(qd.ConsumerByteRate) + if consumerRate != qdc { + return fmt.Errorf("[ERROR] quota consumer byte rate expected %d, got %d", consumerRate, qdc) + } + return nil + } +} + +// go to the Event Streams instance and check the quota has been destroyed +func testAccCheckIBMEventStreamsQuotasDeletedFromInstance(entities ...string) func(*terraform.State) error { + return func(s *terraform.State) error { + adminClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).ESadminRestSession() + if err != nil { + return fmt.Errorf("[ERROR] ESadminRestSession returned %v", err) + } + for _, entity := range entities { + entityName := entity + qd, response, err := adminClient.GetQuota(&adminrestv1.GetQuotaOptions{EntityName: &entityName}) + if err == nil { + return fmt.Errorf("[ERROR] Expected no quota for %s, but GetQuota succeeded (%d,%d)", entity, testGetQuotaValue(qd.ProducerByteRate), testGetQuotaValue(qd.ConsumerByteRate)) + } + if response != nil && response.StatusCode != 404 { + return fmt.Errorf("[ERROR] Expected 404 NotFound for %s, but GetQuota response was %d", entity, response.StatusCode) + } + } + return nil + } +} + +func testGetQuotaValue(v *int64) int { + if v == nil { + return -1 + } + return int(*v) +} diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index b09ba8b9ae..0b9a09208f 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -211,7 +211,13 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Type: schema.TypeBool, Optional: true, DiffSuppressFunc: flex.ApplyOnce, - Description: "Import an existing WorkerPool from the cluster, instead of creating a new", + Description: "Import an existing workerpool from the cluster instead of creating a new", + }, + + "orphan_on_delete": { + Type: schema.TypeBool, + Optional: true, + Description: "Orphan the workerpool resource instead of deleting it", }, "autoscale_enabled": { @@ -715,14 +721,22 @@ func resourceIBMContainerVpcWorkerPoolDelete(d *schema.ResourceData, meta interf if err != nil { return err } - - err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) - if err != nil { - return err + var orphan_on_delete bool = false + if orod, ok := d.GetOk("orphan_on_delete"); ok { + orphan_on_delete = orod.(bool) } - _, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + + if orphan_on_delete { + log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID) + } else { + err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) + if err != nil { + return err + } + _, err = WaitForVpcWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutDelete), targetEnv) + if err != nil { + return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + } } d.SetId("") return nil @@ -788,7 +802,7 @@ func WaitForWorkerPoolAvailable(d *schema.ResourceData, meta interface{}, cluste func vpcWorkerPoolStateRefreshFunc(client v2.Workers, instanceID string, workerPoolNameOrID string, target v2.ClusterTargetHeader) resource.StateRefreshFunc { return func() (interface{}, string, error) { - workerFields, err := client.ListByWorkerPool(instanceID, "", false, target) + workerFields, err := client.ListByWorkerPool(instanceID, workerPoolNameOrID, false, target) if err != nil { return nil, "", fmt.Errorf("[ERROR] Error retrieving workers for cluster: %s", err) } diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go index 43f762b0c3..54418c2509 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go @@ -21,7 +21,7 @@ import ( func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { - name := fmt.Sprintf("tf-vpc-worker-%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-vpc-workerpoolbasic-%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -36,6 +36,16 @@ func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { "ibm_container_vpc_worker_pool.test_pool", "zones.#", "1"), resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "flavor", "cx2.2x4"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "zones.#", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "labels.%", "0"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "worker_count", "1"), ), }, { @@ -44,17 +54,32 @@ func TestAccIBMContainerVpcClusterWorkerPoolBasic(t *testing.T) { resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "flavor", "cx2.2x4"), resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "zones.#", "2"), + "ibm_container_vpc_worker_pool.test_pool", "zones.#", "1"), resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "labels.%", "3"), resource.TestCheckResourceAttr( "ibm_container_vpc_worker_pool.test_pool", "operating_system", "UBUNTU_24_64"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "2"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "flavor", "cx2.2x4"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "zones.#", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.default_pool", "worker_count", "2"), ), }, { - ResourceName: "ibm_container_vpc_worker_pool.test_pool", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_container_vpc_worker_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"orphan_on_delete", "import_on_create"}, + }, + { + Config: testAccCheckIBMVpcContainerWorkerPoolUpdate(name), + Destroy: true, }, }, }) @@ -149,56 +174,60 @@ func testAccCheckIBMVpcContainerWorkerPoolDestroy(s *terraform.State) error { return nil } -func testAccCheckIBMVpcContainerWorkerPoolBasic(name string) string { +func testAccCheckIBMVpcContainerWorkerPoolBasic(cluster_name string) string { + workerpool_name := cluster_name + "-wp" return fmt.Sprintf(` - provider "ibm" { - region="us-south" - } data "ibm_resource_group" "resource_group" { is_default=true } - data "ibm_is_vpc" "vpc" { - name = "cluster-squad-dallas-test" - } - - data "ibm_is_subnet" "subnet1" { - name = "cluster-squad-dallas-test-01" - } - - data "ibm_is_subnet" "subnet2" { - name = "cluster-squad-dallas-test-02" - } resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = data.ibm_is_vpc.vpc.id + name = "%[3]s" + vpc_id = "%[1]s" flavor = "cx2.2x4" worker_count = 1 resource_group_id = data.ibm_resource_group.resource_group.id wait_till = "MasterNodeReady" zones { - subnet_id = data.ibm_is_subnet.subnet1.id + subnet_id = "%[2]s" name = "us-south-1" } } + + resource "ibm_container_vpc_worker_pool" "default_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[1]s" + flavor = "cx2.2x4" + worker_count = 1 + worker_pool_name = "default" + zones { + subnet_id = "%[2]s" + name = "us-south-1" + } + import_on_create = "true" + } resource "ibm_container_vpc_worker_pool" "test_pool" { cluster = ibm_container_vpc_cluster.cluster.id - worker_pool_name = "%[1]s" + worker_pool_name = "%[4]s" flavor = "cx2.2x4" - vpc_id = data.ibm_is_vpc.vpc.id + vpc_id = "%[1]s" worker_count = 1 resource_group_id = data.ibm_resource_group.resource_group.id operating_system = "UBUNTU_20_64" zones { - name = "us-south-2" - subnet_id = data.ibm_is_subnet.subnet2.id + name = "us-south-1" + subnet_id = "%[2]s" } labels = { "test" = "test-pool" "test1" = "test-pool1" } + depends_on = [ + ibm_container_vpc_worker_pool.default_pool + ] } - `, name) + `, acc.IksClusterVpcID, acc.IksClusterSubnetID, cluster_name, workerpool_name) + } func testAccCheckIBMVpcContainerWorkerPoolSecurityGroups(name string) string { @@ -265,64 +294,64 @@ func testAccCheckIBMVpcContainerWorkerPoolSecurityGroups(name string) string { `, name) } -func testAccCheckIBMVpcContainerWorkerPoolUpdate(name string) string { +func testAccCheckIBMVpcContainerWorkerPoolUpdate(cluster_name string) string { + workerpool_name := cluster_name + "-wp" return fmt.Sprintf(` - provider "ibm" { - region="eu-de" - } data "ibm_resource_group" "resource_group" { is_default=true } - resource "ibm_is_vpc" "vpc" { - name = "%[1]s" - } - resource "ibm_is_subnet" "subnet1" { - name = "%[1]s-1" - vpc = ibm_is_vpc.vpc.id - zone = "eu-de-1" - total_ipv4_address_count = 256 - } - resource "ibm_is_subnet" "subnet2" { - name = "%[1]s-2" - vpc = ibm_is_vpc.vpc.id - zone = "eu-de-2" - total_ipv4_address_count = 256 - } + resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = ibm_is_vpc.vpc.id + name = "%[3]s" + vpc_id = "%[1]s" flavor = "cx2.2x4" worker_count = 1 resource_group_id = data.ibm_resource_group.resource_group.id wait_till = "MasterNodeReady" zones { - subnet_id = ibm_is_subnet.subnet1.id - name = "eu-de-1" + subnet_id = "%[2]s" + name = "us-south-1" + } + } + resource "ibm_container_vpc_worker_pool" "default_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[1]s" + flavor = "cx2.2x4" + worker_count = 2 + worker_pool_name = "default" + zones { + subnet_id = "%[2]s" + name = "us-south-1" + } + import_on_create = "true" + labels = { + "test" = "default-pool" + "test1" = "default-pool1" } } resource "ibm_container_vpc_worker_pool" "test_pool" { cluster = ibm_container_vpc_cluster.cluster.id - worker_pool_name = "%[1]s" + worker_pool_name = "%[4]s" flavor = "cx2.2x4" - vpc_id = ibm_is_vpc.vpc.id - worker_count = 1 + vpc_id = "%[1]s" + worker_count = 2 resource_group_id = data.ibm_resource_group.resource_group.id operating_system = "UBUNTU_24_64" zones { - name = "eu-de-2" - subnet_id = ibm_is_subnet.subnet2.id - } - zones { - subnet_id = ibm_is_subnet.subnet1.id - name = "eu-de-1" + name = "us-south-1" + subnet_id = "%[2]s" } labels = { "test" = "test-pool" "test1" = "test-pool1" "test2" = "test-pool2" } + depends_on = [ + ibm_container_vpc_worker_pool.default_pool + ] + orphan_on_delete = "true" } - `, name) + `, acc.IksClusterVpcID, acc.IksClusterSubnetID, cluster_name, workerpool_name) } func TestAccIBMContainerVpcClusterWorkerPoolEnvvar(t *testing.T) { @@ -548,147 +577,3 @@ func testAccCheckIBMOpcContainerWorkerPoolBasic(name, openshiftFlavour, openShif } `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID, openshiftFlavour, openShiftworkerCount, operatingSystem) } - -func TestAccIBMContainerVpcClusterWorkerPoolImportOnCreateEnvvar(t *testing.T) { - - name := fmt.Sprintf("tf-vpc-worker-%d", acctest.RandIntRange(10, 100)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIBMVpcContainerWorkerPoolDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreate(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), - ), - }, - { - Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreateClusterUpdate(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), - ), - }, - { - Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreateWPUpdate(name), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), - resource.TestCheckResourceAttr( - "ibm_container_vpc_worker_pool.test_pool", "worker_count", "3"), - ), - }, - }, - }) -} -func testAccCheckIBMOpcContainerWorkerPoolImportOnCreate(name string) string { - return fmt.Sprintf(` - resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - resource_group_id = "%[3]s" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - wait_till = "normal" - worker_labels = { - "test" = "test-pool" - } - } - - resource "ibm_container_vpc_worker_pool" "test_pool" { - cluster = ibm_container_vpc_cluster.cluster.id - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - worker_pool_name = "default" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - import_on_create = "true" - } - `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) -} - -func testAccCheckIBMOpcContainerWorkerPoolImportOnCreateClusterUpdate(name string) string { - return fmt.Sprintf(` - resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 3 - resource_group_id = "%[3]s" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - wait_till = "normal" - worker_labels = { - "test" = "test-pool" - } - } - - resource "ibm_container_vpc_worker_pool" "test_pool" { - cluster = ibm_container_vpc_cluster.cluster.id - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - worker_pool_name = "default" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - import_on_create = "true" - } - `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) -} - -func testAccCheckIBMOpcContainerWorkerPoolImportOnCreateWPUpdate(name string) string { - return fmt.Sprintf(` - resource "ibm_container_vpc_cluster" "cluster" { - name = "%[1]s" - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 1 - resource_group_id = "%[3]s" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - wait_till = "normal" - worker_labels = { - "test" = "test-pool" - } - } - - resource "ibm_container_vpc_worker_pool" "test_pool" { - cluster = ibm_container_vpc_cluster.cluster.id - vpc_id = "%[2]s" - flavor = "bx2.4x16" - worker_count = 3 - worker_pool_name = "default" - zones { - subnet_id = "%[4]s" - name = "us-south-1" - } - import_on_create = "true" - } - `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) -} diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go index 7e2aaf522c..46d33b7120 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go @@ -5,6 +5,7 @@ package kubernetes import ( "fmt" + "log" "strings" "time" @@ -196,7 +197,13 @@ func ResourceIBMContainerWorkerPool() *schema.Resource { Type: schema.TypeBool, Optional: true, DiffSuppressFunc: flex.ApplyOnce, - Description: "Import a workerpool from a cluster", + Description: "Import an existing workerpool from the cluster instead of creating a new", + }, + + "orphan_on_delete": { + Type: schema.TypeBool, + Optional: true, + Description: "Orphan the workerpool resource instead of deleting it", }, "autoscale_enabled": { @@ -475,13 +482,22 @@ func resourceIBMContainerWorkerPoolDelete(d *schema.ResourceData, meta interface return err } - err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) - if err != nil { - return err + var orphan_on_delete bool = false + if orod, ok := d.GetOk("orphan_on_delete"); ok { + orphan_on_delete = orod.(bool) } - _, err = WaitForWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + + if orphan_on_delete { + log.Printf("[WARN] orphaning %s workerpool", workerPoolNameorID) + } else { + err = workerPoolsAPI.DeleteWorkerPool(clusterNameorID, workerPoolNameorID, targetEnv) + if err != nil { + return err + } + _, err = WaitForWorkerDelete(clusterNameorID, workerPoolNameorID, meta, d.Timeout(schema.TimeoutUpdate), targetEnv) + if err != nil { + return fmt.Errorf("[ERROR] Error waiting for removing workers of worker pool (%s) of cluster (%s): %s", workerPoolNameorID, clusterNameorID, err) + } } return nil } diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go index 330d7c82ad..0dd451e81b 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go @@ -44,6 +44,12 @@ func TestAccIBMContainerWorkerPoolBasic(t *testing.T) { "ibm_container_worker_pool.test_pool", "disk_encryption", "true"), resource.TestCheckResourceAttr( "ibm_container_worker_pool.test_pool", "hardware", "shared"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "size_per_zone", "1"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "labels.%", "0"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "hardware", "shared"), ), }, { @@ -61,6 +67,12 @@ func TestAccIBMContainerWorkerPoolBasic(t *testing.T) { "ibm_container_worker_pool.test_pool", "disk_encryption", "true"), resource.TestCheckResourceAttr( "ibm_container_worker_pool.test_pool", "hardware", "shared"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "size_per_zone", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.default_pool", "hardware", "shared"), ), }, { @@ -181,13 +193,13 @@ func testAccCheckIBMContainerWorkerPoolBasic(clusterName, workerPoolName string) return fmt.Sprintf(` resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" + name = "%[1]s" + datacenter = "%[2]s" + machine_type = "%[3]s" hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" + public_vlan_id = "%[4]s" + private_vlan_id = "%[5]s" + kube_version = "%[6]s" wait_till = "OneWorkerNodeReady" operating_system = "UBUNTU_20_64" taints { @@ -197,9 +209,22 @@ resource "ibm_container_cluster" "testacc_cluster" { } } +resource "ibm_container_worker_pool" "default_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 1 + import_on_create = "true" + taints { + key = "key1" + value = "value1" + effect = "NoSchedule" + } +} + resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "%s" - machine_type = "%s" + worker_pool_name = "%[7]s" + machine_type = "%[8]s" cluster = ibm_container_cluster.testacc_cluster.id size_per_zone = 1 hardware = "shared" @@ -220,19 +245,40 @@ func testAccCheckIBMContainerWorkerPoolUpdate(clusterName, workerPoolName string return fmt.Sprintf(` resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" + name = "%[1]s" + datacenter = "%[2]s" + machine_type = "%[3]s" hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" + public_vlan_id = "%[4]s" + private_vlan_id = "%[5]s" + kube_version = "%[6]s" wait_till = "OneWorkerNodeReady" } +resource "ibm_container_worker_pool" "default_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 2 + import_on_create = "true" + taints { + key = "key1" + value = "value1" + effect = "NoSchedule" + } + labels = { + "test" = "test-pool" + "test1" = "test-pool1" + } + depends_on = [ + ibm_container_worker_pool.test_pool + ] + orphan_on_delete = "true" +} + resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "%s" - machine_type = "%s" + worker_pool_name = "%[7]s" + machine_type = "%[8]s" cluster = ibm_container_cluster.testacc_cluster.id size_per_zone = 2 hardware = "shared" @@ -327,139 +373,3 @@ resource "ibm_container_worker_pool" "test_pool" { } }`, workerPoolName, acc.MachineType, clusterName) } - -func TestAccIBMContainerWorkerPoolImportOnCreate(t *testing.T) { - - clusterName := fmt.Sprintf("tf-cluster-worker-%d", acctest.RandIntRange(10, 100)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIBMContainerWorkerPoolDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckIBMContainerWorkerPoolImportOnCreate(clusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "size_per_zone", "1"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "labels.%", "2"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "hardware", "shared"), - ), - }, - { - Config: testAccCheckIBMContainerWorkerPoolImportOnCreateClusterUpdate(clusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "size_per_zone", "1"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "labels.%", "2"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "hardware", "shared"), - ), - }, - { - Config: testAccCheckIBMContainerWorkerPoolImportOnCreateWPUpdate(clusterName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "size_per_zone", "3"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "labels.%", "2"), - resource.TestCheckResourceAttr( - "ibm_container_worker_pool.test_pool", "hardware", "shared"), - ), - }, - }, - }) -} - -func testAccCheckIBMContainerWorkerPoolImportOnCreate(clusterName string) string { - return fmt.Sprintf(` - -resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" - hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" - wait_till = "OneWorkerNodeReady" - default_pool_size = 1 - labels = { - "test" = "test-pool" - "test1" = "test-pool1" - } -} - -resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "default" - machine_type = "%[3]s" - cluster = ibm_container_cluster.testacc_cluster.id - size_per_zone = 1 - import_on_create = "true" -}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) -} - -func testAccCheckIBMContainerWorkerPoolImportOnCreateClusterUpdate(clusterName string) string { - return fmt.Sprintf(` - -resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" - hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" - wait_till = "OneWorkerNodeReady" - default_pool_size = 3 - labels = { - "test" = "test-pool" - "test1" = "test-pool1" - } -} - -resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "default" - machine_type = "%[3]s" - cluster = ibm_container_cluster.testacc_cluster.id - size_per_zone = 1 - import_on_create = "true" -}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) -} - -func testAccCheckIBMContainerWorkerPoolImportOnCreateWPUpdate(clusterName string) string { - return fmt.Sprintf(` - -resource "ibm_container_cluster" "testacc_cluster" { - name = "%s" - datacenter = "%s" - machine_type = "%s" - hardware = "shared" - public_vlan_id = "%s" - private_vlan_id = "%s" - kube_version = "%s" - wait_till = "OneWorkerNodeReady" - default_pool_size = 1 - labels = { - "test" = "test-pool" - "test1" = "test-pool1" - } -} - -resource "ibm_container_worker_pool" "test_pool" { - worker_pool_name = "default" - machine_type = "%[3]s" - cluster = ibm_container_cluster.testacc_cluster.id - size_per_zone = 3 - import_on_create = "true" -}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) -} diff --git a/ibm/service/logs/data_source_ibm_logs_view_folders_test.go b/ibm/service/logs/data_source_ibm_logs_view_folders_test.go index b82bbaf4ef..7f744a8516 100644 --- a/ibm/service/logs/data_source_ibm_logs_view_folders_test.go +++ b/ibm/service/logs/data_source_ibm_logs_view_folders_test.go @@ -19,7 +19,7 @@ import ( ) func TestAccIbmLogsViewFoldersDataSourceBasic(t *testing.T) { - viewFolderName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) + viewFolderName := fmt.Sprintf("TF_LOG_アクセスログ_%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheckCloudLogs(t) }, diff --git a/ibm/service/logs/resource_ibm_logs_alert.go b/ibm/service/logs/resource_ibm_logs_alert.go index 7cc44686be..291fa6d61d 100644 --- a/ibm/service/logs/resource_ibm_logs_alert.go +++ b/ibm/service/logs/resource_ibm_logs_alert.go @@ -1568,7 +1568,7 @@ func ResourceIbmLogsAlertValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_alert_test.go b/ibm/service/logs/resource_ibm_logs_alert_test.go index 63ae36e3c0..ad6c752a77 100644 --- a/ibm/service/logs/resource_ibm_logs_alert_test.go +++ b/ibm/service/logs/resource_ibm_logs_alert_test.go @@ -21,7 +21,7 @@ import ( func TestAccIbmLogsAlertBasic(t *testing.T) { var conf logsv0.Alert - name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf_name_応答時間モニター!_%d", acctest.RandIntRange(10, 100)) isActive := "false" severity := "info_or_unspecified" nameUpdate := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) diff --git a/ibm/service/logs/resource_ibm_logs_dashboard.go b/ibm/service/logs/resource_ibm_logs_dashboard.go index 3c97745a19..aaa4e8f964 100644 --- a/ibm/service/logs/resource_ibm_logs_dashboard.go +++ b/ibm/service/logs/resource_ibm_logs_dashboard.go @@ -6564,7 +6564,7 @@ func ResourceIbmLogsDashboardValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 100, }, @@ -6573,7 +6573,7 @@ func ResourceIbmLogsDashboardValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Optional: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 200, }, diff --git a/ibm/service/logs/resource_ibm_logs_dashboard_folder.go b/ibm/service/logs/resource_ibm_logs_dashboard_folder.go index eaa79d5316..b1ec638890 100644 --- a/ibm/service/logs/resource_ibm_logs_dashboard_folder.go +++ b/ibm/service/logs/resource_ibm_logs_dashboard_folder.go @@ -56,7 +56,7 @@ func ResourceIbmLogsDashboardFolderValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_data_access_rule.go b/ibm/service/logs/resource_ibm_logs_data_access_rule.go index 1b1e5c415d..179420e2e1 100644 --- a/ibm/service/logs/resource_ibm_logs_data_access_rule.go +++ b/ibm/service/logs/resource_ibm_logs_data_access_rule.go @@ -87,7 +87,7 @@ func ResourceIbmLogsDataAccessRuleValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_e2m.go b/ibm/service/logs/resource_ibm_logs_e2m.go index 069ddafc29..c60f6e7406 100644 --- a/ibm/service/logs/resource_ibm_logs_e2m.go +++ b/ibm/service/logs/resource_ibm_logs_e2m.go @@ -227,7 +227,7 @@ func ResourceIbmLogsE2mValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_enrichment.go b/ibm/service/logs/resource_ibm_logs_enrichment.go index 6b8218d9fe..2609782584 100644 --- a/ibm/service/logs/resource_ibm_logs_enrichment.go +++ b/ibm/service/logs/resource_ibm_logs_enrichment.go @@ -97,7 +97,7 @@ func ResourceIbmLogsEnrichmentValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_outgoing_webhook.go b/ibm/service/logs/resource_ibm_logs_outgoing_webhook.go index 321390061d..55fa0ce936 100644 --- a/ibm/service/logs/resource_ibm_logs_outgoing_webhook.go +++ b/ibm/service/logs/resource_ibm_logs_outgoing_webhook.go @@ -117,7 +117,7 @@ func ResourceIbmLogsOutgoingWebhookValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, @@ -126,7 +126,7 @@ func ResourceIbmLogsOutgoingWebhookValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Optional: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_policy.go b/ibm/service/logs/resource_ibm_logs_policy.go index d04e1c02e1..025cd701b1 100644 --- a/ibm/service/logs/resource_ibm_logs_policy.go +++ b/ibm/service/logs/resource_ibm_logs_policy.go @@ -164,7 +164,7 @@ func ResourceIbmLogsPolicyValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_rule_group.go b/ibm/service/logs/resource_ibm_logs_rule_group.go index 5ef4362516..9e10dfe464 100644 --- a/ibm/service/logs/resource_ibm_logs_rule_group.go +++ b/ibm/service/logs/resource_ibm_logs_rule_group.go @@ -393,7 +393,7 @@ func ResourceIbmLogsRuleGroupValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 255, }, diff --git a/ibm/service/logs/resource_ibm_logs_view.go b/ibm/service/logs/resource_ibm_logs_view.go index 45c08bce6f..5e91815e4a 100644 --- a/ibm/service/logs/resource_ibm_logs_view.go +++ b/ibm/service/logs/resource_ibm_logs_view.go @@ -155,7 +155,7 @@ func ResourceIbmLogsViewValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_view_folder.go b/ibm/service/logs/resource_ibm_logs_view_folder.go index 03c6625f38..390fac0929 100644 --- a/ibm/service/logs/resource_ibm_logs_view_folder.go +++ b/ibm/service/logs/resource_ibm_logs_view_folder.go @@ -51,7 +51,7 @@ func ResourceIbmLogsViewFolderValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateRegexpLen, Type: validate.TypeString, Required: true, - Regexp: `^[A-Za-z0-9_\.,\-"{}()\[\]=!:#\/$|' ]+$`, + Regexp: `^[\p{L}\p{N}\p{P}\p{Z}\p{S}\p{M}]+$`, MinValueLength: 1, MaxValueLength: 4096, }, diff --git a/ibm/service/logs/resource_ibm_logs_view_folder_test.go b/ibm/service/logs/resource_ibm_logs_view_folder_test.go index 7c1e1bc35b..cf709b406b 100644 --- a/ibm/service/logs/resource_ibm_logs_view_folder_test.go +++ b/ibm/service/logs/resource_ibm_logs_view_folder_test.go @@ -21,7 +21,7 @@ import ( func TestAccIbmLogsViewFolderBasic(t *testing.T) { var conf logsv0.ViewFolder - name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf_name_応答時間モニター_%d", acctest.RandIntRange(10, 100)) // nameUpdate := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment.go b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment.go index 4bcc77ef7c..9c0981712f 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -241,6 +241,11 @@ func ResourceIbmOnboardingCatalogDeployment() *schema.Resource { }, }, }, + "embeddable_dashboard": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "On a service kind record this controls if your service has a custom dashboard or Resource Detail page.", + }, }, }, }, @@ -251,19 +256,44 @@ func ResourceIbmOnboardingCatalogDeployment() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, - Description: "The UI based URLs.", + Description: "Metadata with URLs related to a service.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "doc_url": &schema.Schema{ Type: schema.TypeString, Optional: true, - Description: "The URL for your product documentation.", + Description: "The URL for your product's documentation.", + }, + "apidocs_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL for your product's API documentation.", }, "terms_url": &schema.Schema{ Type: schema.TypeString, Optional: true, Description: "The URL for your product's end user license agreement.", }, + "instructions_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Getting Started tab on the Resource Details page. Setting it the content is loaded from the specified URL.", + }, + "catalog_details_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service.", + }, + "custom_create_page_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service.", + }, + "dashboard": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls if your service has a custom dashboard or Resource Detail page.", + }, }, }, }, @@ -297,6 +327,66 @@ func ResourceIbmOnboardingCatalogDeployment() *schema.Resource { Optional: true, Description: "Whether the service is compatible with the IAM service.", }, + "bindable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Deprecated. Controls the Connections tab on the Resource Details page.", + }, + "plan_updateable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Indicates plan update support and controls the Plan tab on the Resource Details page.", + }, + "service_key_supported": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Indicates service credentials support and controls the Service Credential tab on Resource Details page.", + }, + }, + }, + }, + "deployment": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The global catalog metadata of the deployment.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "broker": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The global catalog metadata of the deployment.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the resource broker.", + }, + "guid": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Crn or guid of the resource broker.", + }, + }, + }, + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The global catalog deployment location.", + }, + "location_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The global catalog deployment URL of location.", + }, + "target_crn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Region crn.", + }, }, }, }, @@ -393,6 +483,9 @@ func resourceIbmOnboardingCatalogDeploymentCreate(context context.Context, d *sc createCatalogDeploymentOptions.SetActive(d.Get("active").(bool)) createCatalogDeploymentOptions.SetDisabled(d.Get("disabled").(bool)) createCatalogDeploymentOptions.SetKind(d.Get("kind").(string)) + if _, ok := d.GetOk("env"); ok { + createCatalogDeploymentOptions.SetEnv(d.Get("env").(string)) + } var tags []string for _, v := range d.Get("tags").([]interface{}) { tagsItem := v.(string) @@ -522,6 +615,24 @@ func resourceIbmOnboardingCatalogDeploymentRead(context context.Context, d *sche return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_deployment", "read", "set-url").GetDiag() } } + if parts[0] != "" { + if err = d.Set("product_id", parts[0]); err != nil { + err = fmt.Errorf("Error setting product_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_deployment", "read", "set-product_id").GetDiag() + } + } + if parts[1] != "" { + if err = d.Set("catalog_product_id", parts[1]); err != nil { + err = fmt.Errorf("Error setting catalog_product_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_deployment", "read", "set-catalog_product_id").GetDiag() + } + } + if parts[2] != "" { + if err = d.Set("catalog_plan_id", parts[2]); err != nil { + err = fmt.Errorf("Error setting catalog_plan_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_deployment", "read", "set-catalog_plan_id").GetDiag() + } + } if !core.IsNil(globalCatalogDeployment.ID) { if err = d.Set("catalog_deployment_id", globalCatalogDeployment.ID); err != nil { err = fmt.Errorf("Error setting catalog_deployment_id: %s", err) @@ -725,6 +836,13 @@ func ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogDeploymentMetadata( } model.Service = ServiceModel } + if modelMap["deployment"] != nil && len(modelMap["deployment"].([]interface{})) > 0 { + DeploymentModel, err := ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeployment(modelMap["deployment"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Deployment = DeploymentModel + } return model, nil } @@ -789,6 +907,9 @@ func ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIStringsCo } model.Media = media } + if modelMap["embeddable_dashboard"] != nil && modelMap["embeddable_dashboard"].(string) != "" { + model.EmbeddableDashboard = core.StringPtr(modelMap["embeddable_dashboard"].(string)) + } return model, nil } @@ -843,9 +964,24 @@ func ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIUrls(mode if modelMap["doc_url"] != nil && modelMap["doc_url"].(string) != "" { model.DocURL = core.StringPtr(modelMap["doc_url"].(string)) } + if modelMap["apidocs_url"] != nil && modelMap["apidocs_url"].(string) != "" { + model.ApidocsURL = core.StringPtr(modelMap["apidocs_url"].(string)) + } if modelMap["terms_url"] != nil && modelMap["terms_url"].(string) != "" { model.TermsURL = core.StringPtr(modelMap["terms_url"].(string)) } + if modelMap["instructions_url"] != nil && modelMap["instructions_url"].(string) != "" { + model.InstructionsURL = core.StringPtr(modelMap["instructions_url"].(string)) + } + if modelMap["catalog_details_url"] != nil && modelMap["catalog_details_url"].(string) != "" { + model.CatalogDetailsURL = core.StringPtr(modelMap["catalog_details_url"].(string)) + } + if modelMap["custom_create_page_url"] != nil && modelMap["custom_create_page_url"].(string) != "" { + model.CustomCreatePageURL = core.StringPtr(modelMap["custom_create_page_url"].(string)) + } + if modelMap["dashboard"] != nil && modelMap["dashboard"].(string) != "" { + model.Dashboard = core.StringPtr(modelMap["dashboard"].(string)) + } return model, nil } @@ -857,6 +993,47 @@ func ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataService(mod if modelMap["iam_compatible"] != nil { model.IamCompatible = core.BoolPtr(modelMap["iam_compatible"].(bool)) } + if modelMap["bindable"] != nil { + model.Bindable = core.BoolPtr(modelMap["bindable"].(bool)) + } + if modelMap["plan_updateable"] != nil { + model.PlanUpdateable = core.BoolPtr(modelMap["plan_updateable"].(bool)) + } + if modelMap["service_key_supported"] != nil { + model.ServiceKeySupported = core.BoolPtr(modelMap["service_key_supported"].(bool)) + } + return model, nil +} + +func ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeployment(modelMap map[string]interface{}) (*partnercentersellv1.GlobalCatalogMetadataDeployment, error) { + model := &partnercentersellv1.GlobalCatalogMetadataDeployment{} + if modelMap["broker"] != nil && len(modelMap["broker"].([]interface{})) > 0 { + BrokerModel, err := ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeploymentBroker(modelMap["broker"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Broker = BrokerModel + } + if modelMap["location"] != nil && modelMap["location"].(string) != "" { + model.Location = core.StringPtr(modelMap["location"].(string)) + } + if modelMap["location_url"] != nil && modelMap["location_url"].(string) != "" { + model.LocationURL = core.StringPtr(modelMap["location_url"].(string)) + } + if modelMap["target_crn"] != nil && modelMap["target_crn"].(string) != "" { + model.TargetCrn = core.StringPtr(modelMap["target_crn"].(string)) + } + return model, nil +} + +func ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeploymentBroker(modelMap map[string]interface{}) (*partnercentersellv1.GlobalCatalogMetadataDeploymentBroker, error) { + model := &partnercentersellv1.GlobalCatalogMetadataDeploymentBroker{} + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } + if modelMap["guid"] != nil && modelMap["guid"].(string) != "" { + model.Guid = core.StringPtr(modelMap["guid"].(string)) + } return model, nil } @@ -916,6 +1093,13 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataToMap( } modelMap["service"] = []map[string]interface{}{serviceMap} } + if model.Deployment != nil { + deploymentMap, err := ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentToMap(model.Deployment) + if err != nil { + return modelMap, err + } + modelMap["deployment"] = []map[string]interface{}{deploymentMap} + } return modelMap, nil } @@ -980,6 +1164,9 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsContent } modelMap["media"] = media } + if model.EmbeddableDashboard != nil { + modelMap["embeddable_dashboard"] = *model.EmbeddableDashboard + } return modelMap, nil } @@ -1031,9 +1218,24 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIUrlsToMap(mode if model.DocURL != nil { modelMap["doc_url"] = *model.DocURL } + if model.ApidocsURL != nil { + modelMap["apidocs_url"] = *model.ApidocsURL + } if model.TermsURL != nil { modelMap["terms_url"] = *model.TermsURL } + if model.InstructionsURL != nil { + modelMap["instructions_url"] = *model.InstructionsURL + } + if model.CatalogDetailsURL != nil { + modelMap["catalog_details_url"] = *model.CatalogDetailsURL + } + if model.CustomCreatePageURL != nil { + modelMap["custom_create_page_url"] = *model.CustomCreatePageURL + } + if model.Dashboard != nil { + modelMap["dashboard"] = *model.Dashboard + } return modelMap, nil } @@ -1045,6 +1247,47 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceToMap(mod if model.IamCompatible != nil { modelMap["iam_compatible"] = *model.IamCompatible } + if model.Bindable != nil { + modelMap["bindable"] = *model.Bindable + } + if model.PlanUpdateable != nil { + modelMap["plan_updateable"] = *model.PlanUpdateable + } + if model.ServiceKeySupported != nil { + modelMap["service_key_supported"] = *model.ServiceKeySupported + } + return modelMap, nil +} + +func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentToMap(model *partnercentersellv1.GlobalCatalogMetadataDeployment) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Broker != nil { + brokerMap, err := ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentBrokerToMap(model.Broker) + if err != nil { + return modelMap, err + } + modelMap["broker"] = []map[string]interface{}{brokerMap} + } + if model.Location != nil { + modelMap["location"] = *model.Location + } + if model.LocationURL != nil { + modelMap["location_url"] = *model.LocationURL + } + if model.TargetCrn != nil { + modelMap["target_crn"] = *model.TargetCrn + } + return modelMap, nil +} + +func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentBrokerToMap(model *partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.Guid != nil { + modelMap["guid"] = *model.Guid + } return modelMap, nil } @@ -1105,6 +1348,48 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataAsPatc } else if exists && patch["service"] != nil { ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceAsPatch(patch["service"].(map[string]interface{}), d) } + path = "metadata.0.deployment" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["deployment"] = nil + } else if exists && patch["deployment"] != nil { + ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentAsPatch(patch["deployment"].(map[string]interface{}), d) + } +} + +func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "metadata.0.deployment.0.broker" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["broker"] = nil + } else if exists && patch["broker"] != nil { + ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentBrokerAsPatch(patch["broker"].(map[string]interface{}), d) + } + path = "metadata.0.deployment.0.location" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["location"] = nil + } + path = "metadata.0.deployment.0.location_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["location_url"] = nil + } + path = "metadata.0.deployment.0.target_crn" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["target_crn"] = nil + } +} + +func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentBrokerAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "metadata.0.deployment.0.broker.0.name" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["name"] = nil + } + path = "metadata.0.deployment.0.broker.0.guid" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["guid"] = nil + } } func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1118,6 +1403,18 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceAsPatch(p if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["iam_compatible"] = nil } + path = "metadata.0.service.0.bindable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["bindable"] = nil + } + path = "metadata.0.service.0.plan_updateable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["plan_updateable"] = nil + } + path = "metadata.0.service.0.service_key_supported" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["service_key_supported"] = nil + } } func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1152,10 +1449,30 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIUrlsAsPatch(pa if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["doc_url"] = nil } + path = "metadata.0.ui.0.urls.0.apidocs_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["apidocs_url"] = nil + } path = "metadata.0.ui.0.urls.0.terms_url" if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["terms_url"] = nil } + path = "metadata.0.ui.0.urls.0.instructions_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["instructions_url"] = nil + } + path = "metadata.0.ui.0.urls.0.catalog_details_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["catalog_details_url"] = nil + } + path = "metadata.0.ui.0.urls.0.custom_create_page_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["custom_create_page_url"] = nil + } + path = "metadata.0.ui.0.urls.0.dashboard" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["dashboard"] = nil + } } func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1184,6 +1501,10 @@ func ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsContent } else if exists && patch["media"] != nil { ResourceIbmOnboardingCatalogDeploymentCatalogProductMediaItemAsPatch(patch["media"].([]interface{})[0].(map[string]interface{}), d) } + path = "metadata.0.ui.0.strings.0.en.0.embeddable_dashboard" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["embeddable_dashboard"] = nil + } } func ResourceIbmOnboardingCatalogDeploymentCatalogProductMediaItemAsPatch(patch map[string]interface{}, d *schema.ResourceData) { diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment_test.go index 7565649f89..75b0569ad4 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_deployment_test.go @@ -146,7 +146,7 @@ func testAccCheckIbmOnboardingCatalogDeploymentConfigBasic(productID string, cat service { rc_provisionable = true iam_compatible = true - } + } rc_compatible = false } } @@ -155,7 +155,6 @@ func testAccCheckIbmOnboardingCatalogDeploymentConfigBasic(productID string, cat func testAccCheckIbmOnboardingCatalogDeploymentConfig(productID string, catalogProductID string, catalogPlanID string, env string, name string, active string, disabled string, kind string) string { return fmt.Sprintf(` - resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_instance" { product_id = "%s" catalog_product_id = "%s" @@ -187,6 +186,15 @@ func testAccCheckIbmOnboardingCatalogDeploymentConfig(productID string, catalogP hidden = true side_by_side_index = 1.0 } + deployment { + broker { + name = "broker-petra-1" + guid = "guid" + } + location = "ams03" + location_url = "https://globalcatalog.test.cloud.ibm.com/api/v1/ams03" + target_crn = "crn:v1:staging:public::ams03:::environment:staging-ams03" + } } } `, productID, catalogProductID, catalogPlanID, env, name, active, disabled, kind) @@ -344,13 +352,19 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataTo globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" globalCatalogMetadataUiModel := make(map[string]interface{}) globalCatalogMetadataUiModel["strings"] = []map[string]interface{}{globalCatalogMetadataUiStringsModel} @@ -361,11 +375,25 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataTo globalCatalogMetadataServiceModel := make(map[string]interface{}) globalCatalogMetadataServiceModel["rc_provisionable"] = true globalCatalogMetadataServiceModel["iam_compatible"] = true + globalCatalogMetadataServiceModel["bindable"] = true + globalCatalogMetadataServiceModel["plan_updateable"] = true + globalCatalogMetadataServiceModel["service_key_supported"] = true + + globalCatalogMetadataDeploymentBrokerModel := make(map[string]interface{}) + globalCatalogMetadataDeploymentBrokerModel["name"] = "testString" + globalCatalogMetadataDeploymentBrokerModel["guid"] = "testString" + + globalCatalogMetadataDeploymentModel := make(map[string]interface{}) + globalCatalogMetadataDeploymentModel["broker"] = []map[string]interface{}{globalCatalogMetadataDeploymentBrokerModel} + globalCatalogMetadataDeploymentModel["location"] = "testString" + globalCatalogMetadataDeploymentModel["location_url"] = "testString" + globalCatalogMetadataDeploymentModel["target_crn"] = "testString" model := make(map[string]interface{}) model["rc_compatible"] = true model["ui"] = []map[string]interface{}{globalCatalogMetadataUiModel} model["service"] = []map[string]interface{}{globalCatalogMetadataServiceModel} + model["deployment"] = []map[string]interface{}{globalCatalogMetadataDeploymentModel} assert.Equal(t, result, model) } @@ -386,13 +414,19 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataTo globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") globalCatalogMetadataUiModel := new(partnercentersellv1.GlobalCatalogMetadataUI) globalCatalogMetadataUiModel.Strings = globalCatalogMetadataUiStringsModel @@ -403,11 +437,25 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataTo globalCatalogMetadataServiceModel := new(partnercentersellv1.GlobalCatalogMetadataService) globalCatalogMetadataServiceModel.RcProvisionable = core.BoolPtr(true) globalCatalogMetadataServiceModel.IamCompatible = core.BoolPtr(true) + globalCatalogMetadataServiceModel.Bindable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.PlanUpdateable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.ServiceKeySupported = core.BoolPtr(true) + + globalCatalogMetadataDeploymentBrokerModel := new(partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) + globalCatalogMetadataDeploymentBrokerModel.Name = core.StringPtr("testString") + globalCatalogMetadataDeploymentBrokerModel.Guid = core.StringPtr("testString") + + globalCatalogMetadataDeploymentModel := new(partnercentersellv1.GlobalCatalogMetadataDeployment) + globalCatalogMetadataDeploymentModel.Broker = globalCatalogMetadataDeploymentBrokerModel + globalCatalogMetadataDeploymentModel.Location = core.StringPtr("testString") + globalCatalogMetadataDeploymentModel.LocationURL = core.StringPtr("testString") + globalCatalogMetadataDeploymentModel.TargetCrn = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogDeploymentMetadata) model.RcCompatible = core.BoolPtr(true) model.Ui = globalCatalogMetadataUiModel model.Service = globalCatalogMetadataServiceModel + model.Deployment = globalCatalogMetadataDeploymentModel result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentGlobalCatalogDeploymentMetadataToMap(model) assert.Nil(t, err) @@ -432,13 +480,19 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIToMap(t *t globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" model := make(map[string]interface{}) model["strings"] = []map[string]interface{}{globalCatalogMetadataUiStringsModel} @@ -465,13 +519,19 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIToMap(t *t globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUI) model.Strings = globalCatalogMetadataUiStringsModel @@ -502,6 +562,7 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsToM globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" model := make(map[string]interface{}) model["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} @@ -525,6 +586,7 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsToM globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) model.En = globalCatalogMetadataUiStringsContentModel @@ -552,6 +614,7 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsCon model := make(map[string]interface{}) model["bullets"] = []map[string]interface{}{catalogHighlightItemModel} model["media"] = []map[string]interface{}{catalogProductMediaItemModel} + model["embeddable_dashboard"] = "testString" assert.Equal(t, result, model) } @@ -572,6 +635,7 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsCon model := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) model.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} model.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + model.EmbeddableDashboard = core.StringPtr("testString") result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIStringsContentToMap(model) assert.Nil(t, err) @@ -628,14 +692,24 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIUrlsToMap( checkResult := func(result map[string]interface{}) { model := make(map[string]interface{}) model["doc_url"] = "testString" + model["apidocs_url"] = "testString" model["terms_url"] = "testString" + model["instructions_url"] = "testString" + model["catalog_details_url"] = "testString" + model["custom_create_page_url"] = "testString" + model["dashboard"] = "testString" assert.Equal(t, result, model) } model := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) model.DocURL = core.StringPtr("testString") + model.ApidocsURL = core.StringPtr("testString") model.TermsURL = core.StringPtr("testString") + model.InstructionsURL = core.StringPtr("testString") + model.CatalogDetailsURL = core.StringPtr("testString") + model.CustomCreatePageURL = core.StringPtr("testString") + model.Dashboard = core.StringPtr("testString") result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataUIUrlsToMap(model) assert.Nil(t, err) @@ -647,6 +721,9 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceToMap model := make(map[string]interface{}) model["rc_provisionable"] = true model["iam_compatible"] = true + model["bindable"] = true + model["plan_updateable"] = true + model["service_key_supported"] = true assert.Equal(t, result, model) } @@ -654,12 +731,63 @@ func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceToMap model := new(partnercentersellv1.GlobalCatalogMetadataService) model.RcProvisionable = core.BoolPtr(true) model.IamCompatible = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + model.PlanUpdateable = core.BoolPtr(true) + model.ServiceKeySupported = core.BoolPtr(true) result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataServiceToMap(model) assert.Nil(t, err) checkResult(result) } +func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + globalCatalogMetadataDeploymentBrokerModel := make(map[string]interface{}) + globalCatalogMetadataDeploymentBrokerModel["name"] = "testString" + globalCatalogMetadataDeploymentBrokerModel["guid"] = "testString" + + model := make(map[string]interface{}) + model["broker"] = []map[string]interface{}{globalCatalogMetadataDeploymentBrokerModel} + model["location"] = "testString" + model["location_url"] = "testString" + model["target_crn"] = "testString" + + assert.Equal(t, result, model) + } + + globalCatalogMetadataDeploymentBrokerModel := new(partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) + globalCatalogMetadataDeploymentBrokerModel.Name = core.StringPtr("testString") + globalCatalogMetadataDeploymentBrokerModel.Guid = core.StringPtr("testString") + + model := new(partnercentersellv1.GlobalCatalogMetadataDeployment) + model.Broker = globalCatalogMetadataDeploymentBrokerModel + model.Location = core.StringPtr("testString") + model.LocationURL = core.StringPtr("testString") + model.TargetCrn = core.StringPtr("testString") + + result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentBrokerToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["name"] = "testString" + model["guid"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) + model.Name = core.StringPtr("testString") + model.Guid = core.StringPtr("testString") + + result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentGlobalCatalogMetadataDeploymentBrokerToMap(model) + assert.Nil(t, err) + checkResult(result) +} + func TestResourceIbmOnboardingCatalogDeploymentMapToCatalogProductProvider(t *testing.T) { checkResult := func(result *partnercentersellv1.CatalogProductProvider) { model := new(partnercentersellv1.CatalogProductProvider) @@ -742,13 +870,19 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogDeploymentMetad globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") globalCatalogMetadataUiModel := new(partnercentersellv1.GlobalCatalogMetadataUI) globalCatalogMetadataUiModel.Strings = globalCatalogMetadataUiStringsModel @@ -759,11 +893,25 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogDeploymentMetad globalCatalogMetadataServiceModel := new(partnercentersellv1.GlobalCatalogMetadataService) globalCatalogMetadataServiceModel.RcProvisionable = core.BoolPtr(true) globalCatalogMetadataServiceModel.IamCompatible = core.BoolPtr(true) + globalCatalogMetadataServiceModel.Bindable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.PlanUpdateable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.ServiceKeySupported = core.BoolPtr(true) + + globalCatalogMetadataDeploymentBrokerModel := new(partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) + globalCatalogMetadataDeploymentBrokerModel.Name = core.StringPtr("testString") + globalCatalogMetadataDeploymentBrokerModel.Guid = core.StringPtr("testString") + + globalCatalogMetadataDeploymentModel := new(partnercentersellv1.GlobalCatalogMetadataDeployment) + globalCatalogMetadataDeploymentModel.Broker = globalCatalogMetadataDeploymentBrokerModel + globalCatalogMetadataDeploymentModel.Location = core.StringPtr("testString") + globalCatalogMetadataDeploymentModel.LocationURL = core.StringPtr("testString") + globalCatalogMetadataDeploymentModel.TargetCrn = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogDeploymentMetadata) model.RcCompatible = core.BoolPtr(true) model.Ui = globalCatalogMetadataUiModel model.Service = globalCatalogMetadataServiceModel + model.Deployment = globalCatalogMetadataDeploymentModel assert.Equal(t, result, model) } @@ -784,13 +932,19 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogDeploymentMetad globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" globalCatalogMetadataUiModel := make(map[string]interface{}) globalCatalogMetadataUiModel["strings"] = []interface{}{globalCatalogMetadataUiStringsModel} @@ -801,11 +955,25 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogDeploymentMetad globalCatalogMetadataServiceModel := make(map[string]interface{}) globalCatalogMetadataServiceModel["rc_provisionable"] = true globalCatalogMetadataServiceModel["iam_compatible"] = true + globalCatalogMetadataServiceModel["bindable"] = true + globalCatalogMetadataServiceModel["plan_updateable"] = true + globalCatalogMetadataServiceModel["service_key_supported"] = true + + globalCatalogMetadataDeploymentBrokerModel := make(map[string]interface{}) + globalCatalogMetadataDeploymentBrokerModel["name"] = "testString" + globalCatalogMetadataDeploymentBrokerModel["guid"] = "testString" + + globalCatalogMetadataDeploymentModel := make(map[string]interface{}) + globalCatalogMetadataDeploymentModel["broker"] = []interface{}{globalCatalogMetadataDeploymentBrokerModel} + globalCatalogMetadataDeploymentModel["location"] = "testString" + globalCatalogMetadataDeploymentModel["location_url"] = "testString" + globalCatalogMetadataDeploymentModel["target_crn"] = "testString" model := make(map[string]interface{}) model["rc_compatible"] = true model["ui"] = []interface{}{globalCatalogMetadataUiModel} model["service"] = []interface{}{globalCatalogMetadataServiceModel} + model["deployment"] = []interface{}{globalCatalogMetadataDeploymentModel} result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogDeploymentMetadata(model) assert.Nil(t, err) @@ -830,13 +998,19 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUI(t *t globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUI) model.Strings = globalCatalogMetadataUiStringsModel @@ -863,13 +1037,19 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUI(t *t globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" model := make(map[string]interface{}) model["strings"] = []interface{}{globalCatalogMetadataUiStringsModel} @@ -900,6 +1080,7 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIStrin globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) model.En = globalCatalogMetadataUiStringsContentModel @@ -923,6 +1104,7 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIStrin globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" model := make(map[string]interface{}) model["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} @@ -950,6 +1132,7 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIStrin model := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) model.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} model.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + model.EmbeddableDashboard = core.StringPtr("testString") assert.Equal(t, result, model) } @@ -970,6 +1153,7 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIStrin model := make(map[string]interface{}) model["bullets"] = []interface{}{catalogHighlightItemModel} model["media"] = []interface{}{catalogProductMediaItemModel} + model["embeddable_dashboard"] = "testString" result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIStringsContent(model) assert.Nil(t, err) @@ -1026,14 +1210,24 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIUrls( checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataUIUrls) { model := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) model.DocURL = core.StringPtr("testString") + model.ApidocsURL = core.StringPtr("testString") model.TermsURL = core.StringPtr("testString") + model.InstructionsURL = core.StringPtr("testString") + model.CatalogDetailsURL = core.StringPtr("testString") + model.CustomCreatePageURL = core.StringPtr("testString") + model.Dashboard = core.StringPtr("testString") assert.Equal(t, result, model) } model := make(map[string]interface{}) model["doc_url"] = "testString" + model["apidocs_url"] = "testString" model["terms_url"] = "testString" + model["instructions_url"] = "testString" + model["catalog_details_url"] = "testString" + model["custom_create_page_url"] = "testString" + model["dashboard"] = "testString" result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataUIUrls(model) assert.Nil(t, err) @@ -1045,6 +1239,9 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataService model := new(partnercentersellv1.GlobalCatalogMetadataService) model.RcProvisionable = core.BoolPtr(true) model.IamCompatible = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + model.PlanUpdateable = core.BoolPtr(true) + model.ServiceKeySupported = core.BoolPtr(true) assert.Equal(t, result, model) } @@ -1052,8 +1249,59 @@ func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataService model := make(map[string]interface{}) model["rc_provisionable"] = true model["iam_compatible"] = true + model["bindable"] = true + model["plan_updateable"] = true + model["service_key_supported"] = true result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataService(model) assert.Nil(t, err) checkResult(result) } + +func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeployment(t *testing.T) { + checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataDeployment) { + globalCatalogMetadataDeploymentBrokerModel := new(partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) + globalCatalogMetadataDeploymentBrokerModel.Name = core.StringPtr("testString") + globalCatalogMetadataDeploymentBrokerModel.Guid = core.StringPtr("testString") + + model := new(partnercentersellv1.GlobalCatalogMetadataDeployment) + model.Broker = globalCatalogMetadataDeploymentBrokerModel + model.Location = core.StringPtr("testString") + model.LocationURL = core.StringPtr("testString") + model.TargetCrn = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + globalCatalogMetadataDeploymentBrokerModel := make(map[string]interface{}) + globalCatalogMetadataDeploymentBrokerModel["name"] = "testString" + globalCatalogMetadataDeploymentBrokerModel["guid"] = "testString" + + model := make(map[string]interface{}) + model["broker"] = []interface{}{globalCatalogMetadataDeploymentBrokerModel} + model["location"] = "testString" + model["location_url"] = "testString" + model["target_crn"] = "testString" + + result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeployment(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeploymentBroker(t *testing.T) { + checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) { + model := new(partnercentersellv1.GlobalCatalogMetadataDeploymentBroker) + model.Name = core.StringPtr("testString") + model.Guid = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["name"] = "testString" + model["guid"] = "testString" + + result, err := partnercentersell.ResourceIbmOnboardingCatalogDeploymentMapToGlobalCatalogMetadataDeploymentBroker(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan.go b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan.go index 9b41aeebce..2bee5aaafc 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -234,6 +234,11 @@ func ResourceIbmOnboardingCatalogPlan() *schema.Resource { }, }, }, + "embeddable_dashboard": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "On a service kind record this controls if your service has a custom dashboard or Resource Detail page.", + }, }, }, }, @@ -244,19 +249,44 @@ func ResourceIbmOnboardingCatalogPlan() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, - Description: "The UI based URLs.", + Description: "Metadata with URLs related to a service.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "doc_url": &schema.Schema{ Type: schema.TypeString, Optional: true, - Description: "The URL for your product documentation.", + Description: "The URL for your product's documentation.", + }, + "apidocs_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL for your product's API documentation.", }, "terms_url": &schema.Schema{ Type: schema.TypeString, Optional: true, Description: "The URL for your product's end user license agreement.", }, + "instructions_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Getting Started tab on the Resource Details page. Setting it the content is loaded from the specified URL.", + }, + "catalog_details_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service.", + }, + "custom_create_page_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service.", + }, + "dashboard": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls if your service has a custom dashboard or Resource Detail page.", + }, }, }, }, @@ -273,6 +303,41 @@ func ResourceIbmOnboardingCatalogPlan() *schema.Resource { }, }, }, + "service": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The global catalog metadata of the service.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rc_provisionable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Whether the service is provisionable by the resource controller service.", + }, + "iam_compatible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Whether the service is compatible with the IAM service.", + }, + "bindable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Deprecated. Controls the Connections tab on the Resource Details page.", + }, + "plan_updateable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Indicates plan update support and controls the Plan tab on the Resource Details page.", + }, + "service_key_supported": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Indicates service credentials support and controls the Service Credential tab on Resource Details page.", + }, + }, + }, + }, "pricing": &schema.Schema{ Type: schema.TypeList, MaxItems: 1, @@ -293,6 +358,26 @@ func ResourceIbmOnboardingCatalogPlan() *schema.Resource { }, }, }, + "plan": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Metadata controlling Plan related settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_internal_users": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Controls if IBMers are allowed to provision this plan.", + }, + "bindable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Deprecated. Controls the Connections tab on the Resource Details page.", + }, + }, + }, + }, }, }, }, @@ -504,6 +589,18 @@ func resourceIbmOnboardingCatalogPlanRead(context context.Context, d *schema.Res return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_plan", "read", "set-url").GetDiag() } } + if parts[0] != "" { + if err = d.Set("product_id", parts[0]); err != nil { + err = fmt.Errorf("Error setting product_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_plan", "read", "set-product_id").GetDiag() + } + } + if parts[1] != "" { + if err = d.Set("catalog_product_id", parts[1]); err != nil { + err = fmt.Errorf("Error setting catalog_product_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_plan", "read", "set-catalog_product_id").GetDiag() + } + } if !core.IsNil(globalCatalogPlan.ID) { if err = d.Set("catalog_plan_id", globalCatalogPlan.ID); err != nil { err = fmt.Errorf("Error setting catalog_plan_id: %s", err) @@ -693,6 +790,13 @@ func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(modelMap map } model.Ui = UiModel } + if modelMap["service"] != nil && len(modelMap["service"].([]interface{})) > 0 && modelMap["service"].([]interface{})[0] != nil { + ServiceModel, err := ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataService(modelMap["service"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Service = ServiceModel + } if modelMap["pricing"] != nil && len(modelMap["pricing"].([]interface{})) > 0 && modelMap["pricing"].([]interface{})[0] != nil { PricingModel, err := ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataPricing(modelMap["pricing"].([]interface{})[0].(map[string]interface{})) if err != nil { @@ -700,6 +804,13 @@ func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(modelMap map } model.Pricing = PricingModel } + if modelMap["plan"] != nil && len(modelMap["plan"].([]interface{})) > 0 { + PlanModel, err := ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadataPlan(modelMap["plan"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Plan = PlanModel + } return model, nil } @@ -764,6 +875,9 @@ func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIStringsContent( } model.Media = media } + if modelMap["embeddable_dashboard"] != nil && modelMap["embeddable_dashboard"].(string) != "" { + model.EmbeddableDashboard = core.StringPtr(modelMap["embeddable_dashboard"].(string)) + } return model, nil } @@ -818,9 +932,44 @@ func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIUrls(modelMap m if modelMap["doc_url"] != nil && modelMap["doc_url"].(string) != "" { model.DocURL = core.StringPtr(modelMap["doc_url"].(string)) } + if modelMap["apidocs_url"] != nil && modelMap["apidocs_url"].(string) != "" { + model.ApidocsURL = core.StringPtr(modelMap["apidocs_url"].(string)) + } if modelMap["terms_url"] != nil && modelMap["terms_url"].(string) != "" { model.TermsURL = core.StringPtr(modelMap["terms_url"].(string)) } + if modelMap["instructions_url"] != nil && modelMap["instructions_url"].(string) != "" { + model.InstructionsURL = core.StringPtr(modelMap["instructions_url"].(string)) + } + if modelMap["catalog_details_url"] != nil && modelMap["catalog_details_url"].(string) != "" { + model.CatalogDetailsURL = core.StringPtr(modelMap["catalog_details_url"].(string)) + } + if modelMap["custom_create_page_url"] != nil && modelMap["custom_create_page_url"].(string) != "" { + model.CustomCreatePageURL = core.StringPtr(modelMap["custom_create_page_url"].(string)) + } + if modelMap["dashboard"] != nil && modelMap["dashboard"].(string) != "" { + model.Dashboard = core.StringPtr(modelMap["dashboard"].(string)) + } + return model, nil +} + +func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataService(modelMap map[string]interface{}) (*partnercentersellv1.GlobalCatalogMetadataService, error) { + model := &partnercentersellv1.GlobalCatalogMetadataService{} + if modelMap["rc_provisionable"] != nil { + model.RcProvisionable = core.BoolPtr(modelMap["rc_provisionable"].(bool)) + } + if modelMap["iam_compatible"] != nil { + model.IamCompatible = core.BoolPtr(modelMap["iam_compatible"].(bool)) + } + if modelMap["bindable"] != nil { + model.Bindable = core.BoolPtr(modelMap["bindable"].(bool)) + } + if modelMap["plan_updateable"] != nil { + model.PlanUpdateable = core.BoolPtr(modelMap["plan_updateable"].(bool)) + } + if modelMap["service_key_supported"] != nil { + model.ServiceKeySupported = core.BoolPtr(modelMap["service_key_supported"].(bool)) + } return model, nil } @@ -835,6 +984,17 @@ func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataPricing(modelMap return model, nil } +func ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadataPlan(modelMap map[string]interface{}) (*partnercentersellv1.GlobalCatalogPlanMetadataPlan, error) { + model := &partnercentersellv1.GlobalCatalogPlanMetadataPlan{} + if modelMap["allow_internal_users"] != nil { + model.AllowInternalUsers = core.BoolPtr(modelMap["allow_internal_users"].(bool)) + } + if modelMap["bindable"] != nil { + model.Bindable = core.BoolPtr(modelMap["bindable"].(bool)) + } + return model, nil +} + func ResourceIbmOnboardingCatalogPlanGlobalCatalogOverviewUIToMap(model *partnercentersellv1.GlobalCatalogOverviewUI) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.En != nil { @@ -884,6 +1044,13 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(model *partn } modelMap["ui"] = []map[string]interface{}{uiMap} } + if model.Service != nil { + serviceMap, err := ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataServiceToMap(model.Service) + if err != nil { + return modelMap, err + } + modelMap["service"] = []map[string]interface{}{serviceMap} + } if model.Pricing != nil { pricingMap, err := ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingToMap(model.Pricing) if err != nil { @@ -891,6 +1058,13 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(model *partn } modelMap["pricing"] = []map[string]interface{}{pricingMap} } + if model.Plan != nil { + planMap, err := ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataPlanToMap(model.Plan) + if err != nil { + return modelMap, err + } + modelMap["plan"] = []map[string]interface{}{planMap} + } return modelMap, nil } @@ -955,6 +1129,9 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsContentToMap( } modelMap["media"] = media } + if model.EmbeddableDashboard != nil { + modelMap["embeddable_dashboard"] = *model.EmbeddableDashboard + } return modelMap, nil } @@ -1006,9 +1183,44 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIUrlsToMap(model *par if model.DocURL != nil { modelMap["doc_url"] = *model.DocURL } + if model.ApidocsURL != nil { + modelMap["apidocs_url"] = *model.ApidocsURL + } if model.TermsURL != nil { modelMap["terms_url"] = *model.TermsURL } + if model.InstructionsURL != nil { + modelMap["instructions_url"] = *model.InstructionsURL + } + if model.CatalogDetailsURL != nil { + modelMap["catalog_details_url"] = *model.CatalogDetailsURL + } + if model.CustomCreatePageURL != nil { + modelMap["custom_create_page_url"] = *model.CustomCreatePageURL + } + if model.Dashboard != nil { + modelMap["dashboard"] = *model.Dashboard + } + return modelMap, nil +} + +func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataServiceToMap(model *partnercentersellv1.GlobalCatalogMetadataService) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.RcProvisionable != nil { + modelMap["rc_provisionable"] = *model.RcProvisionable + } + if model.IamCompatible != nil { + modelMap["iam_compatible"] = *model.IamCompatible + } + if model.Bindable != nil { + modelMap["bindable"] = *model.Bindable + } + if model.PlanUpdateable != nil { + modelMap["plan_updateable"] = *model.PlanUpdateable + } + if model.ServiceKeySupported != nil { + modelMap["service_key_supported"] = *model.ServiceKeySupported + } return modelMap, nil } @@ -1023,6 +1235,17 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingToMap(model *pa return modelMap, nil } +func ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataPlanToMap(model *partnercentersellv1.GlobalCatalogPlanMetadataPlan) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.AllowInternalUsers != nil { + modelMap["allow_internal_users"] = *model.AllowInternalUsers + } + if model.Bindable != nil { + modelMap["bindable"] = *model.Bindable + } + return modelMap, nil +} + func ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanPatchAsPatch(patchVals *partnercentersellv1.GlobalCatalogPlanPatch, d *schema.ResourceData) map[string]interface{} { patch, _ := patchVals.AsPatch() var path string @@ -1074,12 +1297,37 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataAsPatch(patch map[ } else if exists && patch["ui"] != nil { ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIAsPatch(patch["ui"].(map[string]interface{}), d) } + path = "metadata.0.service" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["service"] = nil + } else if exists && patch["service"] != nil { + ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataServiceAsPatch(patch["service"].(map[string]interface{}), d) + } path = "metadata.0.pricing" if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["pricing"] = nil } else if exists && patch["pricing"] != nil { ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingAsPatch(patch["pricing"].(map[string]interface{}), d) } + path = "metadata.0.plan" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["plan"] = nil + } else if exists && patch["plan"] != nil { + ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataPlanAsPatch(patch["plan"].(map[string]interface{}), d) + } +} + +func ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataPlanAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "metadata.0.plan.0.allow_internal_users" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["allow_internal_users"] = nil + } + path = "metadata.0.plan.0.bindable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["bindable"] = nil + } } func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1095,6 +1343,31 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingAsPatch(patch m } } +func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataServiceAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "metadata.0.service.0.rc_provisionable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["rc_provisionable"] = nil + } + path = "metadata.0.service.0.iam_compatible" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["iam_compatible"] = nil + } + path = "metadata.0.service.0.bindable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["bindable"] = nil + } + path = "metadata.0.service.0.plan_updateable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["plan_updateable"] = nil + } + path = "metadata.0.service.0.service_key_supported" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["service_key_supported"] = nil + } +} + func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIAsPatch(patch map[string]interface{}, d *schema.ResourceData) { var path string @@ -1127,10 +1400,30 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIUrlsAsPatch(patch ma if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["doc_url"] = nil } + path = "metadata.0.ui.0.urls.0.apidocs_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["apidocs_url"] = nil + } path = "metadata.0.ui.0.urls.0.terms_url" if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["terms_url"] = nil } + path = "metadata.0.ui.0.urls.0.instructions_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["instructions_url"] = nil + } + path = "metadata.0.ui.0.urls.0.catalog_details_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["catalog_details_url"] = nil + } + path = "metadata.0.ui.0.urls.0.custom_create_page_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["custom_create_page_url"] = nil + } + path = "metadata.0.ui.0.urls.0.dashboard" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["dashboard"] = nil + } } func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1159,6 +1452,10 @@ func ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsContentAsPatc } else if exists && patch["media"] != nil { ResourceIbmOnboardingCatalogPlanCatalogProductMediaItemAsPatch(patch["media"].([]interface{})[0].(map[string]interface{}), d) } + path = "metadata.0.ui.0.strings.0.en.0.embeddable_dashboard" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["embeddable_dashboard"] = nil + } } func ResourceIbmOnboardingCatalogPlanCatalogProductMediaItemAsPatch(patch map[string]interface{}, d *schema.ResourceData) { diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan_test.go index 640f4ffae9..336b60d5bb 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_plan_test.go @@ -138,7 +138,7 @@ func testAccCheckIbmOnboardingCatalogPlanConfigBasic(productID string, catalogPr metadata { rc_compatible = false pricing { - type = "Paid" + type = "paid" origin = "pricing_catalog" } } @@ -175,6 +175,17 @@ func testAccCheckIbmOnboardingCatalogPlanConfig(productID string, catalogProduct type = "paid" origin = "global_catalog" } + service { + rc_provisionable = true + iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true + } + plan { + allow_internal_users = true + bindable = true + } } } `, productID, catalogProductID, env, name, active, disabled, kind) @@ -330,13 +341,19 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(t *testi globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" globalCatalogMetadataUiModel := make(map[string]interface{}) globalCatalogMetadataUiModel["strings"] = []map[string]interface{}{globalCatalogMetadataUiStringsModel} @@ -344,14 +361,27 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(t *testi globalCatalogMetadataUiModel["hidden"] = true globalCatalogMetadataUiModel["side_by_side_index"] = float64(72.5) + globalCatalogMetadataServiceModel := make(map[string]interface{}) + globalCatalogMetadataServiceModel["rc_provisionable"] = true + globalCatalogMetadataServiceModel["iam_compatible"] = true + globalCatalogMetadataServiceModel["bindable"] = true + globalCatalogMetadataServiceModel["plan_updateable"] = true + globalCatalogMetadataServiceModel["service_key_supported"] = true + globalCatalogMetadataPricingModel := make(map[string]interface{}) globalCatalogMetadataPricingModel["type"] = "free" globalCatalogMetadataPricingModel["origin"] = "global_catalog" + globalCatalogPlanMetadataPlanModel := make(map[string]interface{}) + globalCatalogPlanMetadataPlanModel["allow_internal_users"] = true + globalCatalogPlanMetadataPlanModel["bindable"] = true + model := make(map[string]interface{}) model["rc_compatible"] = true model["ui"] = []map[string]interface{}{globalCatalogMetadataUiModel} + model["service"] = []map[string]interface{}{globalCatalogMetadataServiceModel} model["pricing"] = []map[string]interface{}{globalCatalogMetadataPricingModel} + model["plan"] = []map[string]interface{}{globalCatalogPlanMetadataPlanModel} assert.Equal(t, result, model) } @@ -372,13 +402,19 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(t *testi globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") globalCatalogMetadataUiModel := new(partnercentersellv1.GlobalCatalogMetadataUI) globalCatalogMetadataUiModel.Strings = globalCatalogMetadataUiStringsModel @@ -386,14 +422,27 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(t *testi globalCatalogMetadataUiModel.Hidden = core.BoolPtr(true) globalCatalogMetadataUiModel.SideBySideIndex = core.Float64Ptr(float64(72.5)) + globalCatalogMetadataServiceModel := new(partnercentersellv1.GlobalCatalogMetadataService) + globalCatalogMetadataServiceModel.RcProvisionable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.IamCompatible = core.BoolPtr(true) + globalCatalogMetadataServiceModel.Bindable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.PlanUpdateable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.ServiceKeySupported = core.BoolPtr(true) + globalCatalogMetadataPricingModel := new(partnercentersellv1.GlobalCatalogMetadataPricing) globalCatalogMetadataPricingModel.Type = core.StringPtr("free") globalCatalogMetadataPricingModel.Origin = core.StringPtr("global_catalog") + globalCatalogPlanMetadataPlanModel := new(partnercentersellv1.GlobalCatalogPlanMetadataPlan) + globalCatalogPlanMetadataPlanModel.AllowInternalUsers = core.BoolPtr(true) + globalCatalogPlanMetadataPlanModel.Bindable = core.BoolPtr(true) + model := new(partnercentersellv1.GlobalCatalogPlanMetadata) model.RcCompatible = core.BoolPtr(true) model.Ui = globalCatalogMetadataUiModel + model.Service = globalCatalogMetadataServiceModel model.Pricing = globalCatalogMetadataPricingModel + model.Plan = globalCatalogPlanMetadataPlanModel result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataToMap(model) assert.Nil(t, err) @@ -418,13 +467,19 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIToMap(t *testing globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" model := make(map[string]interface{}) model["strings"] = []map[string]interface{}{globalCatalogMetadataUiStringsModel} @@ -451,13 +506,19 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIToMap(t *testing globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUI) model.Strings = globalCatalogMetadataUiStringsModel @@ -488,6 +549,7 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsToMap(t * globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" model := make(map[string]interface{}) model["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} @@ -511,6 +573,7 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsToMap(t * globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) model.En = globalCatalogMetadataUiStringsContentModel @@ -538,6 +601,7 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsContentTo model := make(map[string]interface{}) model["bullets"] = []map[string]interface{}{catalogHighlightItemModel} model["media"] = []map[string]interface{}{catalogProductMediaItemModel} + model["embeddable_dashboard"] = "testString" assert.Equal(t, result, model) } @@ -558,6 +622,7 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsContentTo model := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) model.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} model.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + model.EmbeddableDashboard = core.StringPtr("testString") result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIStringsContentToMap(model) assert.Nil(t, err) @@ -614,20 +679,54 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIUrlsToMap(t *tes checkResult := func(result map[string]interface{}) { model := make(map[string]interface{}) model["doc_url"] = "testString" + model["apidocs_url"] = "testString" model["terms_url"] = "testString" + model["instructions_url"] = "testString" + model["catalog_details_url"] = "testString" + model["custom_create_page_url"] = "testString" + model["dashboard"] = "testString" assert.Equal(t, result, model) } model := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) model.DocURL = core.StringPtr("testString") + model.ApidocsURL = core.StringPtr("testString") model.TermsURL = core.StringPtr("testString") + model.InstructionsURL = core.StringPtr("testString") + model.CatalogDetailsURL = core.StringPtr("testString") + model.CustomCreatePageURL = core.StringPtr("testString") + model.Dashboard = core.StringPtr("testString") result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataUIUrlsToMap(model) assert.Nil(t, err) checkResult(result) } +func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataServiceToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["rc_provisionable"] = true + model["iam_compatible"] = true + model["bindable"] = true + model["plan_updateable"] = true + model["service_key_supported"] = true + + assert.Equal(t, result, model) + } + + model := new(partnercentersellv1.GlobalCatalogMetadataService) + model.RcProvisionable = core.BoolPtr(true) + model.IamCompatible = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + model.PlanUpdateable = core.BoolPtr(true) + model.ServiceKeySupported = core.BoolPtr(true) + + result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataServiceToMap(model) + assert.Nil(t, err) + checkResult(result) +} + func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingToMap(t *testing.T) { checkResult := func(result map[string]interface{}) { model := make(map[string]interface{}) @@ -646,6 +745,24 @@ func TestResourceIbmOnboardingCatalogPlanGlobalCatalogMetadataPricingToMap(t *te checkResult(result) } +func TestResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataPlanToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["allow_internal_users"] = true + model["bindable"] = true + + assert.Equal(t, result, model) + } + + model := new(partnercentersellv1.GlobalCatalogPlanMetadataPlan) + model.AllowInternalUsers = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + + result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanGlobalCatalogPlanMetadataPlanToMap(model) + assert.Nil(t, err) + checkResult(result) +} + func TestResourceIbmOnboardingCatalogPlanMapToCatalogProductProvider(t *testing.T) { checkResult := func(result *partnercentersellv1.CatalogProductProvider) { model := new(partnercentersellv1.CatalogProductProvider) @@ -728,13 +845,19 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(t *testi globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") globalCatalogMetadataUiModel := new(partnercentersellv1.GlobalCatalogMetadataUI) globalCatalogMetadataUiModel.Strings = globalCatalogMetadataUiStringsModel @@ -742,14 +865,27 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(t *testi globalCatalogMetadataUiModel.Hidden = core.BoolPtr(true) globalCatalogMetadataUiModel.SideBySideIndex = core.Float64Ptr(float64(72.5)) + globalCatalogMetadataServiceModel := new(partnercentersellv1.GlobalCatalogMetadataService) + globalCatalogMetadataServiceModel.RcProvisionable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.IamCompatible = core.BoolPtr(true) + globalCatalogMetadataServiceModel.Bindable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.PlanUpdateable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.ServiceKeySupported = core.BoolPtr(true) + globalCatalogMetadataPricingModel := new(partnercentersellv1.GlobalCatalogMetadataPricing) globalCatalogMetadataPricingModel.Type = core.StringPtr("free") globalCatalogMetadataPricingModel.Origin = core.StringPtr("global_catalog") + globalCatalogPlanMetadataPlanModel := new(partnercentersellv1.GlobalCatalogPlanMetadataPlan) + globalCatalogPlanMetadataPlanModel.AllowInternalUsers = core.BoolPtr(true) + globalCatalogPlanMetadataPlanModel.Bindable = core.BoolPtr(true) + model := new(partnercentersellv1.GlobalCatalogPlanMetadata) model.RcCompatible = core.BoolPtr(true) model.Ui = globalCatalogMetadataUiModel + model.Service = globalCatalogMetadataServiceModel model.Pricing = globalCatalogMetadataPricingModel + model.Plan = globalCatalogPlanMetadataPlanModel assert.Equal(t, result, model) } @@ -770,13 +906,19 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(t *testi globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" globalCatalogMetadataUiModel := make(map[string]interface{}) globalCatalogMetadataUiModel["strings"] = []interface{}{globalCatalogMetadataUiStringsModel} @@ -784,14 +926,27 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(t *testi globalCatalogMetadataUiModel["hidden"] = true globalCatalogMetadataUiModel["side_by_side_index"] = float64(72.5) + globalCatalogMetadataServiceModel := make(map[string]interface{}) + globalCatalogMetadataServiceModel["rc_provisionable"] = true + globalCatalogMetadataServiceModel["iam_compatible"] = true + globalCatalogMetadataServiceModel["bindable"] = true + globalCatalogMetadataServiceModel["plan_updateable"] = true + globalCatalogMetadataServiceModel["service_key_supported"] = true + globalCatalogMetadataPricingModel := make(map[string]interface{}) globalCatalogMetadataPricingModel["type"] = "free" globalCatalogMetadataPricingModel["origin"] = "global_catalog" + globalCatalogPlanMetadataPlanModel := make(map[string]interface{}) + globalCatalogPlanMetadataPlanModel["allow_internal_users"] = true + globalCatalogPlanMetadataPlanModel["bindable"] = true + model := make(map[string]interface{}) model["rc_compatible"] = true model["ui"] = []interface{}{globalCatalogMetadataUiModel} + model["service"] = []interface{}{globalCatalogMetadataServiceModel} model["pricing"] = []interface{}{globalCatalogMetadataPricingModel} + model["plan"] = []interface{}{globalCatalogPlanMetadataPlanModel} result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadata(model) assert.Nil(t, err) @@ -816,13 +971,19 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUI(t *testing globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUI) model.Strings = globalCatalogMetadataUiStringsModel @@ -849,13 +1010,19 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUI(t *testing globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" model := make(map[string]interface{}) model["strings"] = []interface{}{globalCatalogMetadataUiStringsModel} @@ -886,6 +1053,7 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIStrings(t * globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) model.En = globalCatalogMetadataUiStringsContentModel @@ -909,6 +1077,7 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIStrings(t * globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" model := make(map[string]interface{}) model["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} @@ -936,6 +1105,7 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIStringsCont model := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) model.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} model.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + model.EmbeddableDashboard = core.StringPtr("testString") assert.Equal(t, result, model) } @@ -956,6 +1126,7 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIStringsCont model := make(map[string]interface{}) model["bullets"] = []interface{}{catalogHighlightItemModel} model["media"] = []interface{}{catalogProductMediaItemModel} + model["embeddable_dashboard"] = "testString" result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIStringsContent(model) assert.Nil(t, err) @@ -1012,20 +1183,54 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIUrls(t *tes checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataUIUrls) { model := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) model.DocURL = core.StringPtr("testString") + model.ApidocsURL = core.StringPtr("testString") model.TermsURL = core.StringPtr("testString") + model.InstructionsURL = core.StringPtr("testString") + model.CatalogDetailsURL = core.StringPtr("testString") + model.CustomCreatePageURL = core.StringPtr("testString") + model.Dashboard = core.StringPtr("testString") assert.Equal(t, result, model) } model := make(map[string]interface{}) model["doc_url"] = "testString" + model["apidocs_url"] = "testString" model["terms_url"] = "testString" + model["instructions_url"] = "testString" + model["catalog_details_url"] = "testString" + model["custom_create_page_url"] = "testString" + model["dashboard"] = "testString" result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataUIUrls(model) assert.Nil(t, err) checkResult(result) } +func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataService(t *testing.T) { + checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataService) { + model := new(partnercentersellv1.GlobalCatalogMetadataService) + model.RcProvisionable = core.BoolPtr(true) + model.IamCompatible = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + model.PlanUpdateable = core.BoolPtr(true) + model.ServiceKeySupported = core.BoolPtr(true) + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["rc_provisionable"] = true + model["iam_compatible"] = true + model["bindable"] = true + model["plan_updateable"] = true + model["service_key_supported"] = true + + result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataService(model) + assert.Nil(t, err) + checkResult(result) +} + func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataPricing(t *testing.T) { checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataPricing) { model := new(partnercentersellv1.GlobalCatalogMetadataPricing) @@ -1043,3 +1248,21 @@ func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogMetadataPricing(t *te assert.Nil(t, err) checkResult(result) } + +func TestResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadataPlan(t *testing.T) { + checkResult := func(result *partnercentersellv1.GlobalCatalogPlanMetadataPlan) { + model := new(partnercentersellv1.GlobalCatalogPlanMetadataPlan) + model.AllowInternalUsers = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["allow_internal_users"] = true + model["bindable"] = true + + result, err := partnercentersell.ResourceIbmOnboardingCatalogPlanMapToGlobalCatalogPlanMetadataPlan(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product.go b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product.go index 04e550a0a5..8b98386dbe 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -242,6 +242,11 @@ func ResourceIbmOnboardingCatalogProduct() *schema.Resource { }, }, }, + "embeddable_dashboard": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "On a service kind record this controls if your service has a custom dashboard or Resource Detail page.", + }, }, }, }, @@ -252,19 +257,44 @@ func ResourceIbmOnboardingCatalogProduct() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, - Description: "The UI based URLs.", + Description: "Metadata with URLs related to a service.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "doc_url": &schema.Schema{ Type: schema.TypeString, Optional: true, - Description: "The URL for your product documentation.", + Description: "The URL for your product's documentation.", + }, + "apidocs_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The URL for your product's API documentation.", }, "terms_url": &schema.Schema{ Type: schema.TypeString, Optional: true, Description: "The URL for your product's end user license agreement.", }, + "instructions_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Getting Started tab on the Resource Details page. Setting it the content is loaded from the specified URL.", + }, + "catalog_details_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service.", + }, + "custom_create_page_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service.", + }, + "dashboard": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Controls if your service has a custom dashboard or Resource Detail page.", + }, }, }, }, @@ -298,6 +328,21 @@ func ResourceIbmOnboardingCatalogProduct() *schema.Resource { Optional: true, Description: "Whether the service is compatible with the IAM service.", }, + "bindable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Deprecated. Controls the Connections tab on the Resource Details page.", + }, + "plan_updateable": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Indicates plan update support and controls the Plan tab on the Resource Details page.", + }, + "service_key_supported": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Indicates service credentials support and controls the Service Credential tab on Resource Details page.", + }, }, }, }, @@ -504,6 +549,44 @@ func ResourceIbmOnboardingCatalogProduct() *schema.Resource { }, }, }, + "composite": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Optional metadata of the service defining it as a composite.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "composite_kind": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The type of the composite service.", + }, + "composite_tag": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The tag used for the composite parent and its children.", + }, + "children": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kind": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The type of the composite child.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The name of the composite child.", + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -557,7 +640,7 @@ func ResourceIbmOnboardingCatalogProductValidator() *validate.ResourceValidator ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, Type: validate.TypeString, Required: true, - AllowedValues: "platform_service, service", + AllowedValues: "composite, platform_service, service", }, ) @@ -724,6 +807,12 @@ func resourceIbmOnboardingCatalogProductRead(context context.Context, d *schema. return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_product", "read", "set-url").GetDiag() } } + if parts[0] != "" { + if err = d.Set("product_id", parts[0]); err != nil { + err = fmt.Errorf("Error setting product_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_catalog_product", "read", "set-product_id").GetDiag() + } + } if !core.IsNil(globalCatalogProduct.ID) { if err = d.Set("catalog_product_id", globalCatalogProduct.ID); err != nil { err = fmt.Errorf("Error setting catalog_product_id: %s", err) @@ -1000,6 +1089,9 @@ func ResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIStringsConte } model.Media = media } + if modelMap["embeddable_dashboard"] != nil && modelMap["embeddable_dashboard"].(string) != "" { + model.EmbeddableDashboard = core.StringPtr(modelMap["embeddable_dashboard"].(string)) + } return model, nil } @@ -1054,9 +1146,24 @@ func ResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIUrls(modelMa if modelMap["doc_url"] != nil && modelMap["doc_url"].(string) != "" { model.DocURL = core.StringPtr(modelMap["doc_url"].(string)) } + if modelMap["apidocs_url"] != nil && modelMap["apidocs_url"].(string) != "" { + model.ApidocsURL = core.StringPtr(modelMap["apidocs_url"].(string)) + } if modelMap["terms_url"] != nil && modelMap["terms_url"].(string) != "" { model.TermsURL = core.StringPtr(modelMap["terms_url"].(string)) } + if modelMap["instructions_url"] != nil && modelMap["instructions_url"].(string) != "" { + model.InstructionsURL = core.StringPtr(modelMap["instructions_url"].(string)) + } + if modelMap["catalog_details_url"] != nil && modelMap["catalog_details_url"].(string) != "" { + model.CatalogDetailsURL = core.StringPtr(modelMap["catalog_details_url"].(string)) + } + if modelMap["custom_create_page_url"] != nil && modelMap["custom_create_page_url"].(string) != "" { + model.CustomCreatePageURL = core.StringPtr(modelMap["custom_create_page_url"].(string)) + } + if modelMap["dashboard"] != nil && modelMap["dashboard"].(string) != "" { + model.Dashboard = core.StringPtr(modelMap["dashboard"].(string)) + } return model, nil } @@ -1068,6 +1175,15 @@ func ResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataService(modelM if modelMap["iam_compatible"] != nil { model.IamCompatible = core.BoolPtr(modelMap["iam_compatible"].(bool)) } + if modelMap["bindable"] != nil { + model.Bindable = core.BoolPtr(modelMap["bindable"].(bool)) + } + if modelMap["plan_updateable"] != nil { + model.PlanUpdateable = core.BoolPtr(modelMap["plan_updateable"].(bool)) + } + if modelMap["service_key_supported"] != nil { + model.ServiceKeySupported = core.BoolPtr(modelMap["service_key_supported"].(bool)) + } return model, nil } @@ -1080,6 +1196,13 @@ func ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOther(m } model.PC = PCModel } + if modelMap["composite"] != nil && len(modelMap["composite"].([]interface{})) > 0 { + CompositeModel, err := ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherComposite(modelMap["composite"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Composite = CompositeModel + } return model, nil } @@ -1193,7 +1316,7 @@ func ResourceIbmOnboardingCatalogProductMapToSupportDetailsItem(modelMap map[str if modelMap["contact"] != nil && modelMap["contact"].(string) != "" { model.Contact = core.StringPtr(modelMap["contact"].(string)) } - if modelMap["response_wait_time"] != nil && len(modelMap["response_wait_time"].([]interface{})) > 0 { + if modelMap["response_wait_time"] != nil && len(modelMap["response_wait_time"].([]interface{})) > 0 && modelMap["response_wait_time"].([]interface{})[0] != nil { ResponseWaitTimeModel, err := ResourceIbmOnboardingCatalogProductMapToSupportTimeInterval(modelMap["response_wait_time"].([]interface{})[0].(map[string]interface{})) if err != nil { return model, err @@ -1246,6 +1369,39 @@ func ResourceIbmOnboardingCatalogProductMapToSupportDetailsItemAvailabilityTime( return model, nil } +func ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherComposite(modelMap map[string]interface{}) (*partnercentersellv1.GlobalCatalogProductMetadataOtherComposite, error) { + model := &partnercentersellv1.GlobalCatalogProductMetadataOtherComposite{} + if modelMap["composite_kind"] != nil && modelMap["composite_kind"].(string) != "" { + model.CompositeKind = core.StringPtr(modelMap["composite_kind"].(string)) + } + if modelMap["composite_tag"] != nil && modelMap["composite_tag"].(string) != "" { + model.CompositeTag = core.StringPtr(modelMap["composite_tag"].(string)) + } + if modelMap["children"] != nil { + children := []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{} + for _, childrenItem := range modelMap["children"].([]interface{}) { + childrenItemModel, err := ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherCompositeChild(childrenItem.(map[string]interface{})) + if err != nil { + return model, err + } + children = append(children, *childrenItemModel) + } + model.Children = children + } + return model, nil +} + +func ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherCompositeChild(modelMap map[string]interface{}) (*partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild, error) { + model := &partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{} + if modelMap["kind"] != nil && modelMap["kind"].(string) != "" { + model.Kind = core.StringPtr(modelMap["kind"].(string)) + } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } + return model, nil +} + func ResourceIbmOnboardingCatalogProductGlobalCatalogOverviewUIToMap(model *partnercentersellv1.GlobalCatalogOverviewUI) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.En != nil { @@ -1381,6 +1537,9 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsContentToM } modelMap["media"] = media } + if model.EmbeddableDashboard != nil { + modelMap["embeddable_dashboard"] = *model.EmbeddableDashboard + } return modelMap, nil } @@ -1432,9 +1591,24 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIUrlsToMap(model * if model.DocURL != nil { modelMap["doc_url"] = *model.DocURL } + if model.ApidocsURL != nil { + modelMap["apidocs_url"] = *model.ApidocsURL + } if model.TermsURL != nil { modelMap["terms_url"] = *model.TermsURL } + if model.InstructionsURL != nil { + modelMap["instructions_url"] = *model.InstructionsURL + } + if model.CatalogDetailsURL != nil { + modelMap["catalog_details_url"] = *model.CatalogDetailsURL + } + if model.CustomCreatePageURL != nil { + modelMap["custom_create_page_url"] = *model.CustomCreatePageURL + } + if model.Dashboard != nil { + modelMap["dashboard"] = *model.Dashboard + } return modelMap, nil } @@ -1446,6 +1620,15 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataServiceToMap(model if model.IamCompatible != nil { modelMap["iam_compatible"] = *model.IamCompatible } + if model.Bindable != nil { + modelMap["bindable"] = *model.Bindable + } + if model.PlanUpdateable != nil { + modelMap["plan_updateable"] = *model.PlanUpdateable + } + if model.ServiceKeySupported != nil { + modelMap["service_key_supported"] = *model.ServiceKeySupported + } return modelMap, nil } @@ -1458,6 +1641,13 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherToMap(m } modelMap["pc"] = []map[string]interface{}{pcMap} } + if model.Composite != nil { + compositeMap, err := ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeToMap(model.Composite) + if err != nil { + return modelMap, err + } + modelMap["composite"] = []map[string]interface{}{compositeMap} + } return modelMap, nil } @@ -1615,6 +1805,39 @@ func ResourceIbmOnboardingCatalogProductSupportDetailsItemAvailabilityTimeToMap( return modelMap, nil } +func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeToMap(model *partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.CompositeKind != nil { + modelMap["composite_kind"] = *model.CompositeKind + } + if model.CompositeTag != nil { + modelMap["composite_tag"] = *model.CompositeTag + } + if model.Children != nil { + children := []map[string]interface{}{} + for _, childrenItem := range model.Children { + childrenItemMap, err := ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeChildToMap(&childrenItem) // #nosec G601 + if err != nil { + return modelMap, err + } + children = append(children, childrenItemMap) + } + modelMap["children"] = children + } + return modelMap, nil +} + +func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeChildToMap(model *partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Kind != nil { + modelMap["kind"] = *model.Kind + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + return modelMap, nil +} + func ResourceIbmOnboardingCatalogProductGlobalCatalogProductPatchAsPatch(patchVals *partnercentersellv1.GlobalCatalogProductPatch, d *schema.ResourceData) map[string]interface{} { patch, _ := patchVals.AsPatch() var path string @@ -1695,6 +1918,44 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherAsPatch } else if exists && patch["pc"] != nil { ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherPCAsPatch(patch["pc"].(map[string]interface{}), d) } + path = "metadata.0.other.0.composite" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["composite"] = nil + } else if exists && patch["composite"] != nil { + ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeAsPatch(patch["composite"].(map[string]interface{}), d) + } +} + +func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "metadata.0.other.0.composite.0.composite_kind" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["composite_kind"] = nil + } + path = "metadata.0.other.0.composite.0.composite_tag" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["composite_tag"] = nil + } + path = "metadata.0.other.0.composite.0.children" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["children"] = nil + } else if exists && patch["children"] != nil { + ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeChildAsPatch(patch["children"].([]interface{})[0].(map[string]interface{}), d) + } +} + +func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeChildAsPatch(patch map[string]interface{}, d *schema.ResourceData) { + var path string + + path = "metadata.0.other.0.composite.0.children.0.kind" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["kind"] = nil + } + path = "metadata.0.other.0.composite.0.children.0.name" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["name"] = nil + } } func ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherPCAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1857,6 +2118,18 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataServiceAsPatch(patc if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["iam_compatible"] = nil } + path = "metadata.0.service.0.bindable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["bindable"] = nil + } + path = "metadata.0.service.0.plan_updateable" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["plan_updateable"] = nil + } + path = "metadata.0.service.0.service_key_supported" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["service_key_supported"] = nil + } } func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1891,10 +2164,30 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIUrlsAsPatch(patch if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["doc_url"] = nil } + path = "metadata.0.ui.0.urls.0.apidocs_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["apidocs_url"] = nil + } path = "metadata.0.ui.0.urls.0.terms_url" if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["terms_url"] = nil } + path = "metadata.0.ui.0.urls.0.instructions_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["instructions_url"] = nil + } + path = "metadata.0.ui.0.urls.0.catalog_details_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["catalog_details_url"] = nil + } + path = "metadata.0.ui.0.urls.0.custom_create_page_url" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["custom_create_page_url"] = nil + } + path = "metadata.0.ui.0.urls.0.dashboard" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["dashboard"] = nil + } } func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsAsPatch(patch map[string]interface{}, d *schema.ResourceData) { @@ -1923,6 +2216,10 @@ func ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsContentAsP } else if exists && patch["media"] != nil { ResourceIbmOnboardingCatalogProductCatalogProductMediaItemAsPatch(patch["media"].([]interface{})[0].(map[string]interface{}), d) } + path = "metadata.0.ui.0.strings.0.en.0.embeddable_dashboard" + if _, exists := d.GetOk(path); d.HasChange(path) && !exists { + patch["embeddable_dashboard"] = nil + } } func ResourceIbmOnboardingCatalogProductCatalogProductMediaItemAsPatch(patch map[string]interface{}, d *schema.ResourceData) { diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product_test.go index 2d2f993e74..521c2bdbf0 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_catalog_product_test.go @@ -385,13 +385,19 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataToMap(t globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" globalCatalogMetadataUiModel := make(map[string]interface{}) globalCatalogMetadataUiModel["strings"] = []map[string]interface{}{globalCatalogMetadataUiStringsModel} @@ -402,6 +408,9 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataToMap(t globalCatalogMetadataServiceModel := make(map[string]interface{}) globalCatalogMetadataServiceModel["rc_provisionable"] = true globalCatalogMetadataServiceModel["iam_compatible"] = true + globalCatalogMetadataServiceModel["bindable"] = true + globalCatalogMetadataServiceModel["plan_updateable"] = true + globalCatalogMetadataServiceModel["service_key_supported"] = true supportTimeIntervalModel := make(map[string]interface{}) supportTimeIntervalModel["value"] = float64(72.5) @@ -442,8 +451,18 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataToMap(t globalCatalogProductMetadataOtherPcModel := make(map[string]interface{}) globalCatalogProductMetadataOtherPcModel["support"] = []map[string]interface{}{globalCatalogProductMetadataOtherPcSupportModel} + globalCatalogProductMetadataOtherCompositeChildModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeChildModel["kind"] = "service" + globalCatalogProductMetadataOtherCompositeChildModel["name"] = "testString" + + globalCatalogProductMetadataOtherCompositeModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeModel["composite_kind"] = "service" + globalCatalogProductMetadataOtherCompositeModel["composite_tag"] = "testString" + globalCatalogProductMetadataOtherCompositeModel["children"] = []map[string]interface{}{globalCatalogProductMetadataOtherCompositeChildModel} + globalCatalogProductMetadataOtherModel := make(map[string]interface{}) globalCatalogProductMetadataOtherModel["pc"] = []map[string]interface{}{globalCatalogProductMetadataOtherPcModel} + globalCatalogProductMetadataOtherModel["composite"] = []map[string]interface{}{globalCatalogProductMetadataOtherCompositeModel} model := make(map[string]interface{}) model["rc_compatible"] = true @@ -470,13 +489,19 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataToMap(t globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") globalCatalogMetadataUiModel := new(partnercentersellv1.GlobalCatalogMetadataUI) globalCatalogMetadataUiModel.Strings = globalCatalogMetadataUiStringsModel @@ -487,6 +512,9 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataToMap(t globalCatalogMetadataServiceModel := new(partnercentersellv1.GlobalCatalogMetadataService) globalCatalogMetadataServiceModel.RcProvisionable = core.BoolPtr(true) globalCatalogMetadataServiceModel.IamCompatible = core.BoolPtr(true) + globalCatalogMetadataServiceModel.Bindable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.PlanUpdateable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.ServiceKeySupported = core.BoolPtr(true) supportTimeIntervalModel := new(partnercentersellv1.SupportTimeInterval) supportTimeIntervalModel.Value = core.Float64Ptr(float64(72.5)) @@ -527,8 +555,18 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataToMap(t globalCatalogProductMetadataOtherPcModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherPC) globalCatalogProductMetadataOtherPcModel.Support = globalCatalogProductMetadataOtherPcSupportModel + globalCatalogProductMetadataOtherCompositeChildModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + globalCatalogProductMetadataOtherCompositeChildModel.Kind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeChildModel.Name = core.StringPtr("testString") + + globalCatalogProductMetadataOtherCompositeModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) + globalCatalogProductMetadataOtherCompositeModel.CompositeKind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeModel.CompositeTag = core.StringPtr("testString") + globalCatalogProductMetadataOtherCompositeModel.Children = []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{*globalCatalogProductMetadataOtherCompositeChildModel} + globalCatalogProductMetadataOtherModel := new(partnercentersellv1.GlobalCatalogProductMetadataOther) globalCatalogProductMetadataOtherModel.PC = globalCatalogProductMetadataOtherPcModel + globalCatalogProductMetadataOtherModel.Composite = globalCatalogProductMetadataOtherCompositeModel model := new(partnercentersellv1.GlobalCatalogProductMetadata) model.RcCompatible = core.BoolPtr(true) @@ -559,13 +597,19 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIToMap(t *test globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" model := make(map[string]interface{}) model["strings"] = []map[string]interface{}{globalCatalogMetadataUiStringsModel} @@ -592,13 +636,19 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIToMap(t *test globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUI) model.Strings = globalCatalogMetadataUiStringsModel @@ -629,6 +679,7 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsToMap( globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []map[string]interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []map[string]interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" model := make(map[string]interface{}) model["en"] = []map[string]interface{}{globalCatalogMetadataUiStringsContentModel} @@ -652,6 +703,7 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsToMap( globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) model.En = globalCatalogMetadataUiStringsContentModel @@ -679,6 +731,7 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsConten model := make(map[string]interface{}) model["bullets"] = []map[string]interface{}{catalogHighlightItemModel} model["media"] = []map[string]interface{}{catalogProductMediaItemModel} + model["embeddable_dashboard"] = "testString" assert.Equal(t, result, model) } @@ -699,6 +752,7 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsConten model := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) model.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} model.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + model.EmbeddableDashboard = core.StringPtr("testString") result, err := partnercentersell.ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIStringsContentToMap(model) assert.Nil(t, err) @@ -755,14 +809,24 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIUrlsToMap(t * checkResult := func(result map[string]interface{}) { model := make(map[string]interface{}) model["doc_url"] = "testString" + model["apidocs_url"] = "testString" model["terms_url"] = "testString" + model["instructions_url"] = "testString" + model["catalog_details_url"] = "testString" + model["custom_create_page_url"] = "testString" + model["dashboard"] = "testString" assert.Equal(t, result, model) } model := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) model.DocURL = core.StringPtr("testString") + model.ApidocsURL = core.StringPtr("testString") model.TermsURL = core.StringPtr("testString") + model.InstructionsURL = core.StringPtr("testString") + model.CatalogDetailsURL = core.StringPtr("testString") + model.CustomCreatePageURL = core.StringPtr("testString") + model.Dashboard = core.StringPtr("testString") result, err := partnercentersell.ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataUIUrlsToMap(model) assert.Nil(t, err) @@ -774,6 +838,9 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataServiceToMap(t model := make(map[string]interface{}) model["rc_provisionable"] = true model["iam_compatible"] = true + model["bindable"] = true + model["plan_updateable"] = true + model["service_key_supported"] = true assert.Equal(t, result, model) } @@ -781,6 +848,9 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogMetadataServiceToMap(t model := new(partnercentersellv1.GlobalCatalogMetadataService) model.RcProvisionable = core.BoolPtr(true) model.IamCompatible = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + model.PlanUpdateable = core.BoolPtr(true) + model.ServiceKeySupported = core.BoolPtr(true) result, err := partnercentersell.ResourceIbmOnboardingCatalogProductGlobalCatalogMetadataServiceToMap(model) assert.Nil(t, err) @@ -828,8 +898,18 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherToM globalCatalogProductMetadataOtherPcModel := make(map[string]interface{}) globalCatalogProductMetadataOtherPcModel["support"] = []map[string]interface{}{globalCatalogProductMetadataOtherPcSupportModel} + globalCatalogProductMetadataOtherCompositeChildModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeChildModel["kind"] = "service" + globalCatalogProductMetadataOtherCompositeChildModel["name"] = "testString" + + globalCatalogProductMetadataOtherCompositeModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeModel["composite_kind"] = "service" + globalCatalogProductMetadataOtherCompositeModel["composite_tag"] = "testString" + globalCatalogProductMetadataOtherCompositeModel["children"] = []map[string]interface{}{globalCatalogProductMetadataOtherCompositeChildModel} + model := make(map[string]interface{}) model["pc"] = []map[string]interface{}{globalCatalogProductMetadataOtherPcModel} + model["composite"] = []map[string]interface{}{globalCatalogProductMetadataOtherCompositeModel} assert.Equal(t, result, model) } @@ -873,8 +953,18 @@ func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherToM globalCatalogProductMetadataOtherPcModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherPC) globalCatalogProductMetadataOtherPcModel.Support = globalCatalogProductMetadataOtherPcSupportModel + globalCatalogProductMetadataOtherCompositeChildModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + globalCatalogProductMetadataOtherCompositeChildModel.Kind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeChildModel.Name = core.StringPtr("testString") + + globalCatalogProductMetadataOtherCompositeModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) + globalCatalogProductMetadataOtherCompositeModel.CompositeKind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeModel.CompositeTag = core.StringPtr("testString") + globalCatalogProductMetadataOtherCompositeModel.Children = []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{*globalCatalogProductMetadataOtherCompositeChildModel} + model := new(partnercentersellv1.GlobalCatalogProductMetadataOther) model.PC = globalCatalogProductMetadataOtherPcModel + model.Composite = globalCatalogProductMetadataOtherCompositeModel result, err := partnercentersell.ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherToMap(model) assert.Nil(t, err) @@ -1197,6 +1287,52 @@ func TestResourceIbmOnboardingCatalogProductSupportDetailsItemAvailabilityTimeTo checkResult(result) } +func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + globalCatalogProductMetadataOtherCompositeChildModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeChildModel["kind"] = "service" + globalCatalogProductMetadataOtherCompositeChildModel["name"] = "testString" + + model := make(map[string]interface{}) + model["composite_kind"] = "service" + model["composite_tag"] = "testString" + model["children"] = []map[string]interface{}{globalCatalogProductMetadataOtherCompositeChildModel} + + assert.Equal(t, result, model) + } + + globalCatalogProductMetadataOtherCompositeChildModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + globalCatalogProductMetadataOtherCompositeChildModel.Kind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeChildModel.Name = core.StringPtr("testString") + + model := new(partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) + model.CompositeKind = core.StringPtr("service") + model.CompositeTag = core.StringPtr("testString") + model.Children = []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{*globalCatalogProductMetadataOtherCompositeChildModel} + + result, err := partnercentersell.ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeToMap(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeChildToMap(t *testing.T) { + checkResult := func(result map[string]interface{}) { + model := make(map[string]interface{}) + model["kind"] = "service" + model["name"] = "testString" + + assert.Equal(t, result, model) + } + + model := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + model.Kind = core.StringPtr("service") + model.Name = core.StringPtr("testString") + + result, err := partnercentersell.ResourceIbmOnboardingCatalogProductGlobalCatalogProductMetadataOtherCompositeChildToMap(model) + assert.Nil(t, err) + checkResult(result) +} + func TestResourceIbmOnboardingCatalogProductMapToCatalogProductProvider(t *testing.T) { checkResult := func(result *partnercentersellv1.CatalogProductProvider) { model := new(partnercentersellv1.CatalogProductProvider) @@ -1295,13 +1431,19 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadata(t globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") globalCatalogMetadataUiModel := new(partnercentersellv1.GlobalCatalogMetadataUI) globalCatalogMetadataUiModel.Strings = globalCatalogMetadataUiStringsModel @@ -1312,6 +1454,9 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadata(t globalCatalogMetadataServiceModel := new(partnercentersellv1.GlobalCatalogMetadataService) globalCatalogMetadataServiceModel.RcProvisionable = core.BoolPtr(true) globalCatalogMetadataServiceModel.IamCompatible = core.BoolPtr(true) + globalCatalogMetadataServiceModel.Bindable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.PlanUpdateable = core.BoolPtr(true) + globalCatalogMetadataServiceModel.ServiceKeySupported = core.BoolPtr(true) supportTimeIntervalModel := new(partnercentersellv1.SupportTimeInterval) supportTimeIntervalModel.Value = core.Float64Ptr(float64(72.5)) @@ -1352,8 +1497,18 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadata(t globalCatalogProductMetadataOtherPcModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherPC) globalCatalogProductMetadataOtherPcModel.Support = globalCatalogProductMetadataOtherPcSupportModel + globalCatalogProductMetadataOtherCompositeChildModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + globalCatalogProductMetadataOtherCompositeChildModel.Kind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeChildModel.Name = core.StringPtr("testString") + + globalCatalogProductMetadataOtherCompositeModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) + globalCatalogProductMetadataOtherCompositeModel.CompositeKind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeModel.CompositeTag = core.StringPtr("testString") + globalCatalogProductMetadataOtherCompositeModel.Children = []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{*globalCatalogProductMetadataOtherCompositeChildModel} + globalCatalogProductMetadataOtherModel := new(partnercentersellv1.GlobalCatalogProductMetadataOther) globalCatalogProductMetadataOtherModel.PC = globalCatalogProductMetadataOtherPcModel + globalCatalogProductMetadataOtherModel.Composite = globalCatalogProductMetadataOtherCompositeModel model := new(partnercentersellv1.GlobalCatalogProductMetadata) model.RcCompatible = core.BoolPtr(true) @@ -1380,13 +1535,19 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadata(t globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" globalCatalogMetadataUiModel := make(map[string]interface{}) globalCatalogMetadataUiModel["strings"] = []interface{}{globalCatalogMetadataUiStringsModel} @@ -1397,6 +1558,9 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadata(t globalCatalogMetadataServiceModel := make(map[string]interface{}) globalCatalogMetadataServiceModel["rc_provisionable"] = true globalCatalogMetadataServiceModel["iam_compatible"] = true + globalCatalogMetadataServiceModel["bindable"] = true + globalCatalogMetadataServiceModel["plan_updateable"] = true + globalCatalogMetadataServiceModel["service_key_supported"] = true supportTimeIntervalModel := make(map[string]interface{}) supportTimeIntervalModel["value"] = float64(72.5) @@ -1437,8 +1601,18 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadata(t globalCatalogProductMetadataOtherPcModel := make(map[string]interface{}) globalCatalogProductMetadataOtherPcModel["support"] = []interface{}{globalCatalogProductMetadataOtherPcSupportModel} + globalCatalogProductMetadataOtherCompositeChildModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeChildModel["kind"] = "service" + globalCatalogProductMetadataOtherCompositeChildModel["name"] = "testString" + + globalCatalogProductMetadataOtherCompositeModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeModel["composite_kind"] = "service" + globalCatalogProductMetadataOtherCompositeModel["composite_tag"] = "testString" + globalCatalogProductMetadataOtherCompositeModel["children"] = []interface{}{globalCatalogProductMetadataOtherCompositeChildModel} + globalCatalogProductMetadataOtherModel := make(map[string]interface{}) globalCatalogProductMetadataOtherModel["pc"] = []interface{}{globalCatalogProductMetadataOtherPcModel} + globalCatalogProductMetadataOtherModel["composite"] = []interface{}{globalCatalogProductMetadataOtherCompositeModel} model := make(map[string]interface{}) model["rc_compatible"] = true @@ -1469,13 +1643,19 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUI(t *test globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") globalCatalogMetadataUiStringsModel := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) globalCatalogMetadataUiStringsModel.En = globalCatalogMetadataUiStringsContentModel globalCatalogMetadataUiUrlsModel := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) globalCatalogMetadataUiUrlsModel.DocURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.ApidocsURL = core.StringPtr("testString") globalCatalogMetadataUiUrlsModel.TermsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.InstructionsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CatalogDetailsURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.CustomCreatePageURL = core.StringPtr("testString") + globalCatalogMetadataUiUrlsModel.Dashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUI) model.Strings = globalCatalogMetadataUiStringsModel @@ -1502,13 +1682,19 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUI(t *test globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" globalCatalogMetadataUiStringsModel := make(map[string]interface{}) globalCatalogMetadataUiStringsModel["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} globalCatalogMetadataUiUrlsModel := make(map[string]interface{}) globalCatalogMetadataUiUrlsModel["doc_url"] = "testString" + globalCatalogMetadataUiUrlsModel["apidocs_url"] = "testString" globalCatalogMetadataUiUrlsModel["terms_url"] = "testString" + globalCatalogMetadataUiUrlsModel["instructions_url"] = "testString" + globalCatalogMetadataUiUrlsModel["catalog_details_url"] = "testString" + globalCatalogMetadataUiUrlsModel["custom_create_page_url"] = "testString" + globalCatalogMetadataUiUrlsModel["dashboard"] = "testString" model := make(map[string]interface{}) model["strings"] = []interface{}{globalCatalogMetadataUiStringsModel} @@ -1539,6 +1725,7 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIStrings( globalCatalogMetadataUiStringsContentModel := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) globalCatalogMetadataUiStringsContentModel.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel.EmbeddableDashboard = core.StringPtr("testString") model := new(partnercentersellv1.GlobalCatalogMetadataUIStrings) model.En = globalCatalogMetadataUiStringsContentModel @@ -1562,6 +1749,7 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIStrings( globalCatalogMetadataUiStringsContentModel := make(map[string]interface{}) globalCatalogMetadataUiStringsContentModel["bullets"] = []interface{}{catalogHighlightItemModel} globalCatalogMetadataUiStringsContentModel["media"] = []interface{}{catalogProductMediaItemModel} + globalCatalogMetadataUiStringsContentModel["embeddable_dashboard"] = "testString" model := make(map[string]interface{}) model["en"] = []interface{}{globalCatalogMetadataUiStringsContentModel} @@ -1589,6 +1777,7 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIStringsC model := new(partnercentersellv1.GlobalCatalogMetadataUIStringsContent) model.Bullets = []partnercentersellv1.CatalogHighlightItem{*catalogHighlightItemModel} model.Media = []partnercentersellv1.CatalogProductMediaItem{*catalogProductMediaItemModel} + model.EmbeddableDashboard = core.StringPtr("testString") assert.Equal(t, result, model) } @@ -1609,6 +1798,7 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIStringsC model := make(map[string]interface{}) model["bullets"] = []interface{}{catalogHighlightItemModel} model["media"] = []interface{}{catalogProductMediaItemModel} + model["embeddable_dashboard"] = "testString" result, err := partnercentersell.ResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIStringsContent(model) assert.Nil(t, err) @@ -1665,14 +1855,24 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIUrls(t * checkResult := func(result *partnercentersellv1.GlobalCatalogMetadataUIUrls) { model := new(partnercentersellv1.GlobalCatalogMetadataUIUrls) model.DocURL = core.StringPtr("testString") + model.ApidocsURL = core.StringPtr("testString") model.TermsURL = core.StringPtr("testString") + model.InstructionsURL = core.StringPtr("testString") + model.CatalogDetailsURL = core.StringPtr("testString") + model.CustomCreatePageURL = core.StringPtr("testString") + model.Dashboard = core.StringPtr("testString") assert.Equal(t, result, model) } model := make(map[string]interface{}) model["doc_url"] = "testString" + model["apidocs_url"] = "testString" model["terms_url"] = "testString" + model["instructions_url"] = "testString" + model["catalog_details_url"] = "testString" + model["custom_create_page_url"] = "testString" + model["dashboard"] = "testString" result, err := partnercentersell.ResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataUIUrls(model) assert.Nil(t, err) @@ -1684,6 +1884,9 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataService(t model := new(partnercentersellv1.GlobalCatalogMetadataService) model.RcProvisionable = core.BoolPtr(true) model.IamCompatible = core.BoolPtr(true) + model.Bindable = core.BoolPtr(true) + model.PlanUpdateable = core.BoolPtr(true) + model.ServiceKeySupported = core.BoolPtr(true) assert.Equal(t, result, model) } @@ -1691,6 +1894,9 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataService(t model := make(map[string]interface{}) model["rc_provisionable"] = true model["iam_compatible"] = true + model["bindable"] = true + model["plan_updateable"] = true + model["service_key_supported"] = true result, err := partnercentersell.ResourceIbmOnboardingCatalogProductMapToGlobalCatalogMetadataService(model) assert.Nil(t, err) @@ -1738,8 +1944,18 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOth globalCatalogProductMetadataOtherPcModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherPC) globalCatalogProductMetadataOtherPcModel.Support = globalCatalogProductMetadataOtherPcSupportModel + globalCatalogProductMetadataOtherCompositeChildModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + globalCatalogProductMetadataOtherCompositeChildModel.Kind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeChildModel.Name = core.StringPtr("testString") + + globalCatalogProductMetadataOtherCompositeModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) + globalCatalogProductMetadataOtherCompositeModel.CompositeKind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeModel.CompositeTag = core.StringPtr("testString") + globalCatalogProductMetadataOtherCompositeModel.Children = []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{*globalCatalogProductMetadataOtherCompositeChildModel} + model := new(partnercentersellv1.GlobalCatalogProductMetadataOther) model.PC = globalCatalogProductMetadataOtherPcModel + model.Composite = globalCatalogProductMetadataOtherCompositeModel assert.Equal(t, result, model) } @@ -1783,8 +1999,18 @@ func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOth globalCatalogProductMetadataOtherPcModel := make(map[string]interface{}) globalCatalogProductMetadataOtherPcModel["support"] = []interface{}{globalCatalogProductMetadataOtherPcSupportModel} + globalCatalogProductMetadataOtherCompositeChildModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeChildModel["kind"] = "service" + globalCatalogProductMetadataOtherCompositeChildModel["name"] = "testString" + + globalCatalogProductMetadataOtherCompositeModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeModel["composite_kind"] = "service" + globalCatalogProductMetadataOtherCompositeModel["composite_tag"] = "testString" + globalCatalogProductMetadataOtherCompositeModel["children"] = []interface{}{globalCatalogProductMetadataOtherCompositeChildModel} + model := make(map[string]interface{}) model["pc"] = []interface{}{globalCatalogProductMetadataOtherPcModel} + model["composite"] = []interface{}{globalCatalogProductMetadataOtherCompositeModel} result, err := partnercentersell.ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOther(model) assert.Nil(t, err) @@ -2106,3 +2332,49 @@ func TestResourceIbmOnboardingCatalogProductMapToSupportDetailsItemAvailabilityT assert.Nil(t, err) checkResult(result) } + +func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherComposite(t *testing.T) { + checkResult := func(result *partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) { + globalCatalogProductMetadataOtherCompositeChildModel := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + globalCatalogProductMetadataOtherCompositeChildModel.Kind = core.StringPtr("service") + globalCatalogProductMetadataOtherCompositeChildModel.Name = core.StringPtr("testString") + + model := new(partnercentersellv1.GlobalCatalogProductMetadataOtherComposite) + model.CompositeKind = core.StringPtr("service") + model.CompositeTag = core.StringPtr("testString") + model.Children = []partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild{*globalCatalogProductMetadataOtherCompositeChildModel} + + assert.Equal(t, result, model) + } + + globalCatalogProductMetadataOtherCompositeChildModel := make(map[string]interface{}) + globalCatalogProductMetadataOtherCompositeChildModel["kind"] = "service" + globalCatalogProductMetadataOtherCompositeChildModel["name"] = "testString" + + model := make(map[string]interface{}) + model["composite_kind"] = "service" + model["composite_tag"] = "testString" + model["children"] = []interface{}{globalCatalogProductMetadataOtherCompositeChildModel} + + result, err := partnercentersell.ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherComposite(model) + assert.Nil(t, err) + checkResult(result) +} + +func TestResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherCompositeChild(t *testing.T) { + checkResult := func(result *partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) { + model := new(partnercentersellv1.GlobalCatalogProductMetadataOtherCompositeChild) + model.Kind = core.StringPtr("service") + model.Name = core.StringPtr("testString") + + assert.Equal(t, result, model) + } + + model := make(map[string]interface{}) + model["kind"] = "service" + model["name"] = "testString" + + result, err := partnercentersell.ResourceIbmOnboardingCatalogProductMapToGlobalCatalogProductMetadataOtherCompositeChild(model) + assert.Nil(t, err) + checkResult(result) +} diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration.go b/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration.go index bf70b35b85..3e69ffdd1e 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -46,7 +46,7 @@ func ResourceIbmOnboardingIamRegistration() *schema.Resource { }, "name": &schema.Schema{ Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validate.InvokeValidator("ibm_onboarding_iam_registration", "name"), Description: "The IAM registration name, which must be the programmatic name of the product.", }, @@ -938,10 +938,9 @@ func ResourceIbmOnboardingIamRegistration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "access_policy": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, + Type: schema.TypeBool, + Required: true, Description: "Optional opt-in to require access control on the role.", - Elem: &schema.Schema{Type: schema.TypeString}, }, "policy_type": &schema.Schema{ Type: schema.TypeList, @@ -1034,7 +1033,7 @@ func ResourceIbmOnboardingIamRegistrationValidator() *validate.ResourceValidator Identifier: "name", ValidateFunctionIdentifier: validate.ValidateRegexp, Type: validate.TypeString, - Optional: true, + Required: true, Regexp: `^[a-z0-9\-.]+$`, }, validate.ValidateSchema{ @@ -1061,9 +1060,7 @@ func resourceIbmOnboardingIamRegistrationCreate(context context.Context, d *sche createIamRegistrationOptions := &partnercentersellv1.CreateIamRegistrationOptions{} createIamRegistrationOptions.SetProductID(d.Get("product_id").(string)) - if _, ok := d.GetOk("name"); ok { - createIamRegistrationOptions.SetName(d.Get("name").(string)) - } + createIamRegistrationOptions.SetName(d.Get("name").(string)) if _, ok := d.GetOk("enabled"); ok { createIamRegistrationOptions.SetEnabled(d.Get("enabled").(bool)) } @@ -1215,12 +1212,17 @@ func resourceIbmOnboardingIamRegistrationRead(context context.Context, d *schema return tfErr.GetDiag() } - if !core.IsNil(iamServiceRegistration.Name) { - if err = d.Set("name", iamServiceRegistration.Name); err != nil { - err = fmt.Errorf("Error setting name: %s", err) - return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_iam_registration", "read", "set-name").GetDiag() + if parts[0] != "" { + if err = d.Set("product_id", parts[0]); err != nil { + err = fmt.Errorf("Error setting product_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_iam_registration", "read", "set-product_id").GetDiag() } } + + if err = d.Set("name", iamServiceRegistration.Name); err != nil { + err = fmt.Errorf("Error setting name: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_iam_registration", "read", "set-name").GetDiag() + } if !core.IsNil(iamServiceRegistration.Enabled) { if err = d.Set("enabled", iamServiceRegistration.Enabled); err != nil { err = fmt.Errorf("Error setting enabled: %s", err) @@ -1978,14 +1980,7 @@ func ResourceIbmOnboardingIamRegistrationMapToIamServiceRegistrationSupportedRol func ResourceIbmOnboardingIamRegistrationMapToSupportedRoleOptions(modelMap map[string]interface{}) (*partnercentersellv1.SupportedRoleOptions, error) { model := &partnercentersellv1.SupportedRoleOptions{} - if modelMap["access_policy"] != nil { - model.AccessPolicy = make(map[string]string) - for key, value := range modelMap["access_policy"].(map[string]interface{}) { - if str, ok := value.(string); ok { - model.AccessPolicy[key] = str - } - } - } + model.AccessPolicy = core.BoolPtr(modelMap["access_policy"].(bool)) if modelMap["policy_type"] != nil { policyType := []string{} for _, policyTypeItem := range modelMap["policy_type"].([]interface{}) { @@ -2453,13 +2448,7 @@ func ResourceIbmOnboardingIamRegistrationIamServiceRegistrationSupportedRoleToMa func ResourceIbmOnboardingIamRegistrationSupportedRoleOptionsToMap(model *partnercentersellv1.SupportedRoleOptions) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.AccessPolicy != nil { - accessPolicy := make(map[string]interface{}) - for k, v := range model.AccessPolicy { - accessPolicy[k] = flex.Stringify(v) - } - modelMap["access_policy"] = accessPolicy - } + modelMap["access_policy"] = *model.AccessPolicy if model.PolicyType != nil { modelMap["policy_type"] = model.PolicyType } @@ -2650,10 +2639,6 @@ func ResourceIbmOnboardingIamRegistrationIamServiceRegistrationSupportedRoleAsPa func ResourceIbmOnboardingIamRegistrationSupportedRoleOptionsAsPatch(patch map[string]interface{}, d *schema.ResourceData) { var path string - path = "supported_roles.0.options.0.access_policy" - if _, exists := d.GetOk(path); d.HasChange(path) && !exists { - patch["access_policy"] = nil - } path = "supported_roles.0.options.0.policy_type" if _, exists := d.GetOk(path); d.HasChange(path) && !exists { patch["policy_type"] = nil diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration_test.go index 1fc2d5326c..9dedaf0c5c 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_iam_registration_test.go @@ -242,7 +242,7 @@ func testAccCheckIbmOnboardingIamRegistrationConfig(productID string, env string default = "%s" } options { - access_policy = { "key" = "inner" } + access_policy = true policy_type = [ "access" ] } } @@ -1158,7 +1158,7 @@ func TestResourceIbmOnboardingIamRegistrationIamServiceRegistrationSupportedRole iamServiceRegistrationDisplayNameObjectModel["zh_cn"] = "testString" supportedRoleOptionsModel := make(map[string]interface{}) - supportedRoleOptionsModel["access_policy"] = map[string]interface{}{"key1": "testString"} + supportedRoleOptionsModel["access_policy"] = true supportedRoleOptionsModel["policy_type"] = []string{"access"} supportedRoleOptionsModel["account_type"] = "enterprise" @@ -1198,7 +1198,7 @@ func TestResourceIbmOnboardingIamRegistrationIamServiceRegistrationSupportedRole iamServiceRegistrationDisplayNameObjectModel.ZhCn = core.StringPtr("testString") supportedRoleOptionsModel := new(partnercentersellv1.SupportedRoleOptions) - supportedRoleOptionsModel.AccessPolicy = map[string]string{"key1": "testString"} + supportedRoleOptionsModel.AccessPolicy = core.BoolPtr(true) supportedRoleOptionsModel.PolicyType = []string{"access"} supportedRoleOptionsModel.AccountType = core.StringPtr("enterprise") @@ -1216,7 +1216,7 @@ func TestResourceIbmOnboardingIamRegistrationIamServiceRegistrationSupportedRole func TestResourceIbmOnboardingIamRegistrationSupportedRoleOptionsToMap(t *testing.T) { checkResult := func(result map[string]interface{}) { model := make(map[string]interface{}) - model["access_policy"] = map[string]interface{}{"key1": "testString"} + model["access_policy"] = true model["policy_type"] = []string{"access"} model["account_type"] = "enterprise" @@ -1224,7 +1224,7 @@ func TestResourceIbmOnboardingIamRegistrationSupportedRoleOptionsToMap(t *testin } model := new(partnercentersellv1.SupportedRoleOptions) - model.AccessPolicy = map[string]string{"key1": "testString"} + model.AccessPolicy = core.BoolPtr(true) model.PolicyType = []string{"access"} model.AccountType = core.StringPtr("enterprise") @@ -2140,7 +2140,7 @@ func TestResourceIbmOnboardingIamRegistrationMapToIamServiceRegistrationSupporte iamServiceRegistrationDisplayNameObjectModel.ZhCn = core.StringPtr("testString") supportedRoleOptionsModel := new(partnercentersellv1.SupportedRoleOptions) - supportedRoleOptionsModel.AccessPolicy = map[string]string{"key1": "testString"} + supportedRoleOptionsModel.AccessPolicy = core.BoolPtr(true) supportedRoleOptionsModel.PolicyType = []string{"access"} supportedRoleOptionsModel.AccountType = core.StringPtr("enterprise") @@ -2180,7 +2180,7 @@ func TestResourceIbmOnboardingIamRegistrationMapToIamServiceRegistrationSupporte iamServiceRegistrationDisplayNameObjectModel["zh_cn"] = "testString" supportedRoleOptionsModel := make(map[string]interface{}) - supportedRoleOptionsModel["access_policy"] = map[string]interface{}{"key1": "testString"} + supportedRoleOptionsModel["access_policy"] = true supportedRoleOptionsModel["policy_type"] = []interface{}{"access"} supportedRoleOptionsModel["account_type"] = "enterprise" @@ -2198,7 +2198,7 @@ func TestResourceIbmOnboardingIamRegistrationMapToIamServiceRegistrationSupporte func TestResourceIbmOnboardingIamRegistrationMapToSupportedRoleOptions(t *testing.T) { checkResult := func(result *partnercentersellv1.SupportedRoleOptions) { model := new(partnercentersellv1.SupportedRoleOptions) - model.AccessPolicy = map[string]string{"key1": "testString"} + model.AccessPolicy = core.BoolPtr(true) model.PolicyType = []string{"access"} model.AccountType = core.StringPtr("enterprise") @@ -2206,7 +2206,7 @@ func TestResourceIbmOnboardingIamRegistrationMapToSupportedRoleOptions(t *testin } model := make(map[string]interface{}) - model["access_policy"] = map[string]interface{}{"key1": "testString"} + model["access_policy"] = true model["policy_type"] = []interface{}{"access"} model["account_type"] = "enterprise" diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_product.go b/ibm/service/partnercentersell/resource_ibm_onboarding_product.go index 221f7f0b8c..9477675ef2 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_product.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_product.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -142,6 +142,11 @@ func ResourceIbmOnboardingProduct() *schema.Resource { Computed: true, Description: "The ID of the approval workflow of your product.", }, + "iam_registration_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "IAM registration identifier.", + }, }, } } @@ -315,6 +320,12 @@ func resourceIbmOnboardingProductRead(context context.Context, d *schema.Resourc return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_product", "read", "set-approver_resource_id").GetDiag() } } + if !core.IsNil(onboardingProduct.IamRegistrationID) { + if err = d.Set("iam_registration_id", onboardingProduct.IamRegistrationID); err != nil { + err = fmt.Errorf("Error setting iam_registration_id: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_product", "read", "set-iam_registration_id").GetDiag() + } + } return nil } diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_product_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_product_test.go index c69634c93e..8666264901 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_product_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_product_test.go @@ -24,7 +24,7 @@ func TestAccIbmOnboardingProductBasic(t *testing.T) { typeVarUpdate := "service" resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckPartnerCenterSell(t) }, + PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, CheckDestroy: testAccCheckIbmOnboardingProductDestroy, Steps: []resource.TestStep{ @@ -57,7 +57,7 @@ func TestAccIbmOnboardingProductAllArgs(t *testing.T) { taxAssessmentUpdate := "PAAS" resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckPartnerCenterSell(t) }, + PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, CheckDestroy: testAccCheckIbmOnboardingProductDestroy, Steps: []resource.TestStep{ @@ -81,7 +81,7 @@ func TestAccIbmOnboardingProductAllArgs(t *testing.T) { ), }, resource.TestStep{ - ResourceName: "ibm_onboarding_product.onboarding_product_instance", + ResourceName: "ibm_onboarding_product.onboarding_product", ImportState: true, ImportStateVerify: true, }, @@ -94,8 +94,8 @@ func testAccCheckIbmOnboardingProductConfigBasic(typeVar string) string { resource "ibm_onboarding_product" "onboarding_product_instance" { type = "%s" primary_contact { - name = "petra" - email = "petra@ibm.com" + name = "name" + email = "email" } } `, typeVar) @@ -108,12 +108,19 @@ func testAccCheckIbmOnboardingProductConfig(typeVar string, eccnNumber string, e type = "%s" primary_contact { name = "name" - email = "petra@email.com" + email = "email" } eccn_number = "%s" ero_class = "%s" - unspsc = "25191503" + unspsc = "FIXME" tax_assessment = "%s" + support { + escalation_contacts { + name = "name" + email = "email" + role = "role" + } + } } `, typeVar, eccnNumber, eroClass, taxAssessment) } diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_registration.go b/ibm/service/partnercentersell/resource_ibm_onboarding_registration.go index 4a52eed5f2..5b6958a5c9 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_registration.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_registration.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -76,16 +76,6 @@ func ResourceIbmOnboardingRegistration() *schema.Resource { ValidateFunc: validate.InvokeValidator("ibm_onboarding_registration", "provider_access_group"), Description: "The onboarding access group for your team.", }, - "account_dra_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "The ID of the IBM Digital Platform Reseller Agreement.", - }, - "account_dpa_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "The ID of the IBM Digital Provider Agreement.", - }, "created_at": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -151,7 +141,6 @@ func resourceIbmOnboardingRegistrationCreate(context context.Context, d *schema. } createRegistrationOptions := &partnercentersellv1.CreateRegistrationOptions{} - createRegistrationOptions.SetAccountID(d.Get("account_id").(string)) createRegistrationOptions.SetCompanyName(d.Get("company_name").(string)) primaryContactModel, err := ResourceIbmOnboardingRegistrationMapToPrimaryContact(d.Get("primary_contact.0").(map[string]interface{})) @@ -229,18 +218,6 @@ func resourceIbmOnboardingRegistrationRead(context context.Context, d *schema.Re return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_registration", "read", "set-provider_access_group").GetDiag() } } - if !core.IsNil(registration.AccountDraID) { - if err = d.Set("account_dra_id", registration.AccountDraID); err != nil { - err = fmt.Errorf("Error setting account_dra_id: %s", err) - return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_registration", "read", "set-account_dra_id").GetDiag() - } - } - if !core.IsNil(registration.AccountDpaID) { - if err = d.Set("account_dpa_id", registration.AccountDpaID); err != nil { - err = fmt.Errorf("Error setting account_dpa_id: %s", err) - return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_registration", "read", "set-account_dpa_id").GetDiag() - } - } if !core.IsNil(registration.CreatedAt) { if err = d.Set("created_at", registration.CreatedAt); err != nil { err = fmt.Errorf("Error setting created_at: %s", err) diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_registration_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_registration_test.go index 4c66f2f0dd..b0e9ce9aa4 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_registration_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_registration_test.go @@ -26,7 +26,7 @@ func TestAccIbmOnboardingRegistrationBasic(t *testing.T) { companyNameUpdate := "Test_company_up" resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckPartnerCenterSell(t) }, + PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, CheckDestroy: testAccCheckIbmOnboardingRegistrationDestroy, Steps: []resource.TestStep{ @@ -61,7 +61,7 @@ func TestAccIbmOnboardingRegistrationAllArgs(t *testing.T) { providerAccessGroupUpdate := "AccessGroupId-b08e7bb5-d480-4c26-b193-d57dd9311608" resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckPartnerCenterSell(t) }, + PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, CheckDestroy: testAccCheckIbmOnboardingRegistrationDestroy, Steps: []resource.TestStep{ diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker.go b/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker.go index 4701d91dbe..4879222cc9 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker.go @@ -2,7 +2,7 @@ // Licensed under the Mozilla Public License v2.0 /* - * IBM OpenAPI Terraform Generator Version: 3.94.1-71478489-20240820-161623 + * IBM OpenAPI Terraform Generator Version: 3.96.0-d6dec9d7-20241008-212902 */ package partnercentersell @@ -38,19 +38,21 @@ func ResourceIbmOnboardingResourceBroker() *schema.Resource { Description: "The environment to fetch this object from.", }, "auth_username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The authentication username to reach the broker.", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validate.InvokeValidator("ibm_onboarding_resource_broker", "auth_username"), + Description: "The authentication username to reach the broker.", }, "auth_password": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, Description: "The authentication password to reach the broker.", }, "auth_scheme": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The supported authentication scheme for the broker.", + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.InvokeValidator("ibm_onboarding_resource_broker", "auth_scheme"), + Description: "The supported authentication scheme for the broker.", }, "resource_group_crn": &schema.Schema{ Type: schema.TypeString, @@ -203,6 +205,20 @@ func ResourceIbmOnboardingResourceBrokerValidator() *validate.ResourceValidator MinValueLength: 1, MaxValueLength: 64, }, + validate.ValidateSchema{ + Identifier: "auth_username", + ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, + Type: validate.TypeString, + Optional: true, + AllowedValues: "apikey", + }, + validate.ValidateSchema{ + Identifier: "auth_scheme", + ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, + Type: validate.TypeString, + Required: true, + AllowedValues: "bearer, bearer-crn", + }, validate.ValidateSchema{ Identifier: "state", ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, @@ -242,12 +258,16 @@ func resourceIbmOnboardingResourceBrokerCreate(context context.Context, d *schem createResourceBrokerOptions := &partnercentersellv1.CreateResourceBrokerOptions{} - createResourceBrokerOptions.SetAuthUsername(d.Get("auth_username").(string)) - createResourceBrokerOptions.SetAuthPassword(d.Get("auth_password").(string)) createResourceBrokerOptions.SetAuthScheme(d.Get("auth_scheme").(string)) createResourceBrokerOptions.SetName(d.Get("name").(string)) createResourceBrokerOptions.SetBrokerURL(d.Get("broker_url").(string)) createResourceBrokerOptions.SetType(d.Get("type").(string)) + if _, ok := d.GetOk("auth_username"); ok { + createResourceBrokerOptions.SetAuthUsername(d.Get("auth_username").(string)) + } + if _, ok := d.GetOk("auth_password"); ok { + createResourceBrokerOptions.SetAuthPassword(d.Get("auth_password").(string)) + } if _, ok := d.GetOk("resource_group_crn"); ok { createResourceBrokerOptions.SetResourceGroupCrn(d.Get("resource_group_crn").(string)) } @@ -305,13 +325,17 @@ func resourceIbmOnboardingResourceBrokerRead(context context.Context, d *schema. return tfErr.GetDiag() } - if err = d.Set("auth_username", broker.AuthUsername); err != nil { - err = fmt.Errorf("Error setting auth_username: %s", err) - return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_resource_broker", "read", "set-auth_username").GetDiag() + if !core.IsNil(broker.AuthUsername) { + if err = d.Set("auth_username", broker.AuthUsername); err != nil { + err = fmt.Errorf("Error setting auth_username: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_resource_broker", "read", "set-auth_username").GetDiag() + } } - if err = d.Set("auth_password", broker.AuthPassword); err != nil { - err = fmt.Errorf("Error setting auth_password: %s", err) - return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_resource_broker", "read", "set-auth_password").GetDiag() + if !core.IsNil(broker.AuthPassword) { + if err = d.Set("auth_password", broker.AuthPassword); err != nil { + err = fmt.Errorf("Error setting auth_password: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_onboarding_resource_broker", "read", "set-auth_password").GetDiag() + } } if err = d.Set("auth_scheme", broker.AuthScheme); err != nil { err = fmt.Errorf("Error setting auth_scheme: %s", err) diff --git a/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker_test.go b/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker_test.go index 33d8d998cf..463d94a714 100644 --- a/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker_test.go +++ b/ibm/service/partnercentersell/resource_ibm_onboarding_resource_broker_test.go @@ -22,13 +22,13 @@ import ( func TestAccIbmOnboardingResourceBrokerBasic(t *testing.T) { var conf partnercentersellv1.Broker authUsername := "apikey" - authPassword := "random1234" + authPassword := "K00lH00iautoT0K0TAxXzExxOXXxXxXxXXZz-lOL0sd5" authScheme := "bearer" brokerURL := fmt.Sprintf("https://broker-url-for-my-service.com/%d", acctest.RandIntRange(10, 100)) typeVar := "provision_through" name := "broker-petra-1" authUsernameUpdate := "apikey" - authPasswordUpdate := "random1234" + authPasswordUpdate := "K00lH00iautoT0K0TAxXzExxOXXxXxXxXXZz-lOL0sd5" authSchemeUpdate := "bearer" brokerURLUpdate := fmt.Sprintf("https://broker-url-for-my-service.com/%d", acctest.RandIntRange(10, 100)) typeVarUpdate := "provision_behind" @@ -70,7 +70,7 @@ func TestAccIbmOnboardingResourceBrokerAllArgs(t *testing.T) { var conf partnercentersellv1.Broker env := "current" authUsername := "apikey" - authPassword := "random1234" + authPassword := "K00lH00iautoT0K0TAxXzExxOXXxXxXxXXZz-lOL0sd5" authScheme := "bearer" state := "active" brokerURL := fmt.Sprintf("https://broker-url-for-my-service.com/%d", acctest.RandIntRange(10, 100)) @@ -81,7 +81,7 @@ func TestAccIbmOnboardingResourceBrokerAllArgs(t *testing.T) { region := "global" envUpdate := "current" authUsernameUpdate := "apikey" - authPasswordUpdate := "random1234" + authPasswordUpdate := "K00lH00iautoT0K0TAxXzExxOXXxXxXxXXZz-lOL0sd5" authSchemeUpdate := "bearer" stateUpdate := "active" brokerURLUpdate := fmt.Sprintf("https://broker-url-for-my-service.com/%d", acctest.RandIntRange(10, 100)) @@ -134,7 +134,7 @@ func TestAccIbmOnboardingResourceBrokerAllArgs(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "env", + "env", "auth_password", }, }, }, diff --git a/ibm/service/power/data_source_ibm_pi_catalog_images.go b/ibm/service/power/data_source_ibm_pi_catalog_images.go index 194d52311f..5cff94f648 100644 --- a/ibm/service/power/data_source_ibm_pi_catalog_images.go +++ b/ibm/service/power/data_source_ibm_pi_catalog_images.go @@ -59,6 +59,11 @@ func DataSourceIBMPICatalogImages() *schema.Resource { Description: "Date of image creation", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "CRN of this resource.", + Type: schema.TypeString, + }, Attr_Description: { Computed: true, Description: "The description of an image.", @@ -159,6 +164,9 @@ func dataSourceIBMPICatalogImagesRead(ctx context.Context, d *schema.ResourceDat if i.CreationDate != nil { image[Attr_CreationDate] = i.CreationDate.String() } + if i.Crn != "" { + image[Attr_CRN] = i.Crn + } if i.Href != nil { image[Attr_Href] = *i.Href } diff --git a/ibm/service/power/data_source_ibm_pi_cloud_instance.go b/ibm/service/power/data_source_ibm_pi_cloud_instance.go index 796eb9fb71..16f0a09471 100644 --- a/ibm/service/power/data_source_ibm_pi_cloud_instance.go +++ b/ibm/service/power/data_source_ibm_pi_cloud_instance.go @@ -5,10 +5,12 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -48,6 +50,11 @@ func DataSourceIBMPICloudInstance() *schema.Resource { Description: "Date of PVM instance creation.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Href: { Computed: true, Description: "Link to Cloud Instance resource.", @@ -134,7 +141,7 @@ func dataSourceIBMPICloudInstanceRead(ctx context.Context, d *schema.ResourceDat d.Set(Attr_Capabilities, cloud_instance_data.Capabilities) d.Set(Attr_Enabled, cloud_instance_data.Enabled) - d.Set(Attr_PVMInstances, flattenpvminstances(cloud_instance_data.PvmInstances)) + d.Set(Attr_PVMInstances, flattenpvminstances(cloud_instance_data.PvmInstances, meta)) d.Set(Attr_Region, cloud_instance_data.Region) d.Set(Attr_TenantID, (cloud_instance_data.TenantID)) d.Set(Attr_TotalInstances, cloud_instance_data.Usage.Instances) @@ -146,7 +153,7 @@ func dataSourceIBMPICloudInstanceRead(ctx context.Context, d *schema.ResourceDat return nil } -func flattenpvminstances(list []*models.PVMInstanceReference) []map[string]interface{} { +func flattenpvminstances(list []*models.PVMInstanceReference, meta interface{}) []map[string]interface{} { pvms := make([]map[string]interface{}, 0) for _, lpars := range list { l := map[string]interface{}{ @@ -157,6 +164,14 @@ func flattenpvminstances(list []*models.PVMInstanceReference) []map[string]inter Attr_Status: *lpars.Status, Attr_Systype: lpars.SysType, } + if lpars.Crn != "" { + l[Attr_CRN] = lpars.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(lpars.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi instance (%s) user_tags: %s", *lpars.PvmInstanceID, err) + } + l[Attr_UserTags] = tags + } pvms = append(pvms, l) } return pvms diff --git a/ibm/service/power/data_source_ibm_pi_datacenter.go b/ibm/service/power/data_source_ibm_pi_datacenter.go index 1dd2643f27..4bbb2f969e 100644 --- a/ibm/service/power/data_source_ibm_pi_datacenter.go +++ b/ibm/service/power/data_source_ibm_pi_datacenter.go @@ -7,6 +7,7 @@ import ( "context" "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" @@ -28,6 +29,115 @@ func DataSourceIBMPIDatacenter() *schema.Resource { }, // Attributes + Attr_CapabilityDetails: { + Computed: true, + Description: "Additional Datacenter Capability Details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_DisasterRecovery: { + Computed: true, + Description: "Disaster Recovery Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_AsynchronousReplication: { + Computed: true, + Description: "Asynchronous Replication Target Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Enabled: { + Computed: true, + Description: "Service Enabled.", + Type: schema.TypeBool, + }, + Attr_TargetLocations: { + Computed: true, + Description: "List of all replication targets.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Region: { + Computed: true, + Description: "regionZone of replication site.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "the replication site is active / down.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SynchronousReplication: { + Computed: true, + Description: "Synchronous Replication Target Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Enabled: { + Type: schema.TypeBool, + Computed: true, + Description: "Service Enabled.", + }, + Attr_TargetLocations: { + Computed: true, + Description: "List of all replication targets.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Region: { + Computed: true, + Description: "regionZone of replication site.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "the replication site is active / down.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SupportedSystems: { + Computed: true, + Description: "Datacenter System Types Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Dedicated: { + Computed: true, + Description: "List of all available dedicated host types.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Type: schema.TypeList, + }, + Attr_General: { + Computed: true, + Description: "List of all available host types.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, Attr_DatacenterCapabilities: { Computed: true, Description: "Datacenter Capabilities.", @@ -87,6 +197,58 @@ func dataSourceIBMPIDatacenterRead(ctx context.Context, d *schema.ResourceData, d.Set(Attr_DatacenterLocation, flex.Flatten(dclocation)) d.Set(Attr_DatacenterStatus, dcData.Status) d.Set(Attr_DatacenterType, dcData.Type) + capabilityDetails := make([]map[string]interface{}, 0, 10) + if dcData.CapabilitiesDetails != nil { + capabilityDetailsMap, err := capabilityDetailsToMap(dcData.CapabilitiesDetails) + if err != nil { + return diag.FromErr(err) + } + capabilityDetails = append(capabilityDetails, capabilityDetailsMap) + } + d.Set(Attr_CapabilityDetails, capabilityDetails) return nil } + +func capabilityDetailsToMap(cd *models.CapabilitiesDetails) (map[string]interface{}, error) { + capabilityDetailsMap := make(map[string]interface{}) + disasterRecoveryMap := disasterRecoveryToMap(cd.DisasterRecovery) + capabilityDetailsMap[Attr_DisasterRecovery] = []map[string]interface{}{disasterRecoveryMap} + + supportedSystemsMap := make(map[string]interface{}) + supportedSystemsMap[Attr_Dedicated] = cd.SupportedSystems.Dedicated + supportedSystemsMap[Attr_General] = cd.SupportedSystems.General + capabilityDetailsMap[Attr_SupportedSystems] = []map[string]interface{}{supportedSystemsMap} + return capabilityDetailsMap, nil +} + +func disasterRecoveryToMap(dr *models.DisasterRecovery) map[string]interface{} { + disasterRecoveryMap := make(map[string]interface{}) + asynchronousReplicationMap := replicationServiceToMap(dr.AsynchronousReplication) + disasterRecoveryMap[Attr_AsynchronousReplication] = []map[string]interface{}{asynchronousReplicationMap} + if dr.SynchronousReplication != nil { + synchronousReplicationMap := replicationServiceToMap(dr.SynchronousReplication) + disasterRecoveryMap[Attr_SynchronousReplication] = []map[string]interface{}{synchronousReplicationMap} + } + + return disasterRecoveryMap +} + +func replicationServiceToMap(rs *models.ReplicationService) map[string]interface{} { + replicationMap := make(map[string]interface{}) + replicationMap[Attr_Enabled] = rs.Enabled + targetLocations := []map[string]interface{}{} + for _, targetLocationsItem := range rs.TargetLocations { + + targetLocationsItemMap := make(map[string]interface{}) + if targetLocationsItem.Region != "" { + targetLocationsItemMap[Attr_Region] = targetLocationsItem.Region + } + if targetLocationsItem.Status != "" { + targetLocationsItemMap[Attr_Status] = targetLocationsItem.Status + } + targetLocations = append(targetLocations, targetLocationsItemMap) + } + replicationMap[Attr_TargetLocations] = targetLocations + return replicationMap +} diff --git a/ibm/service/power/data_source_ibm_pi_datacenters.go b/ibm/service/power/data_source_ibm_pi_datacenters.go index 08059004df..f8d035a6be 100644 --- a/ibm/service/power/data_source_ibm_pi_datacenters.go +++ b/ibm/service/power/data_source_ibm_pi_datacenters.go @@ -24,6 +24,115 @@ func DataSourceIBMPIDatacenters() *schema.Resource { Description: "List of Datacenters", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + Attr_CapabilityDetails: { + Computed: true, + Description: "Additional Datacenter Capability Details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_DisasterRecovery: { + Computed: true, + Description: "Disaster Recovery Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_AsynchronousReplication: { + Computed: true, + Description: "Asynchronous Replication Target Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Enabled: { + Computed: true, + Description: "Service Enabled.", + Type: schema.TypeBool, + }, + Attr_TargetLocations: { + Computed: true, + Description: "List of all replication targets.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Region: { + Computed: true, + Description: "regionZone of replication site.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "the replication site is active / down.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SynchronousReplication: { + Computed: true, + Description: "Synchronous Replication Target Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Enabled: { + Type: schema.TypeBool, + Computed: true, + Description: "Service Enabled.", + }, + Attr_TargetLocations: { + Computed: true, + Description: "List of all replication targets.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Region: { + Computed: true, + Description: "regionZone of replication site.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "the replication site is active / down.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SupportedSystems: { + Computed: true, + Description: "Datacenter System Types Information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Dedicated: { + Computed: true, + Description: "List of all available dedicated host types.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Type: schema.TypeList, + }, + Attr_General: { + Computed: true, + Description: "List of all available host types.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, Attr_DatacenterCapabilities: { Computed: true, Description: "Datacenter Capabilities", @@ -73,6 +182,7 @@ func dataSourceIBMPIDatacentersRead(ctx context.Context, d *schema.ResourceData, for _, datacenter := range datacentersData.Datacenters { if datacenter != nil { dc := map[string]interface{}{ + Attr_DatacenterCapabilities: datacenter.Capabilities, Attr_DatacenterHref: datacenter.Href, Attr_DatacenterLocation: map[string]interface{}{ @@ -83,6 +193,11 @@ func dataSourceIBMPIDatacentersRead(ctx context.Context, d *schema.ResourceData, Attr_DatacenterStatus: datacenter.Status, Attr_DatacenterType: datacenter.Type, } + if datacenter.CapabilitiesDetails != nil { + capabilityDetailsMap, _ := capabilityDetailsToMap(datacenter.CapabilitiesDetails) + + dc[Attr_CapabilityDetails] = []map[string]interface{}{capabilityDetailsMap} + } datacenters = append(datacenters, dc) } } diff --git a/ibm/service/power/data_source_ibm_pi_disaster_recovery_location.go b/ibm/service/power/data_source_ibm_pi_disaster_recovery_location.go index 4c839ab37f..c8f9a557d3 100644 --- a/ibm/service/power/data_source_ibm_pi_disaster_recovery_location.go +++ b/ibm/service/power/data_source_ibm_pi_disaster_recovery_location.go @@ -47,6 +47,25 @@ func DataSourceIBMPIDisasterRecoveryLocation() *schema.Resource { Description: "The region zone of the location.", Type: schema.TypeString, }, + Attr_ReplicationPoolMap: { + Computed: true, + Description: "List of replication pool map.", + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_RemotePool: { + Computed: true, + Description: "Remote pool.", + Type: schema.TypeString, + }, + Attr_VolumePool: { + Computed: true, + Description: "Volume pool.", + Type: schema.TypeString, + }, + }, + }, + }, }, }, Type: schema.TypeList, @@ -69,11 +88,22 @@ func dataSourceIBMPIDisasterRecoveryLocation(ctx context.Context, d *schema.Reso } result := make([]map[string]interface{}, 0, len(drLocationSite.ReplicationSites)) - for _, i := range drLocationSite.ReplicationSites { - if i != nil { + for _, site := range drLocationSite.ReplicationSites { + if site != nil { + replicationPoolMap := make([]map[string]string, 0) + if site.ReplicationPoolMap != nil { + for _, rMap := range site.ReplicationPoolMap { + replicationPool := make(map[string]string) + replicationPool[Attr_RemotePool] = rMap.RemotePool + replicationPool[Attr_VolumePool] = rMap.VolumePool + replicationPoolMap = append(replicationPoolMap, replicationPool) + } + } + l := map[string]interface{}{ - Attr_IsActive: i.IsActive, - Attr_Location: i.Location, + Attr_IsActive: site.IsActive, + Attr_Location: site.Location, + Attr_ReplicationPoolMap: replicationPoolMap, } result = append(result, l) } diff --git a/ibm/service/power/data_source_ibm_pi_disaster_recovery_locations.go b/ibm/service/power/data_source_ibm_pi_disaster_recovery_locations.go index c838348000..7ff1a9f0a3 100644 --- a/ibm/service/power/data_source_ibm_pi_disaster_recovery_locations.go +++ b/ibm/service/power/data_source_ibm_pi_disaster_recovery_locations.go @@ -43,6 +43,25 @@ func DataSourceIBMPIDisasterRecoveryLocations() *schema.Resource { Description: "The region zone of the location.", Type: schema.TypeString, }, + Attr_ReplicationPoolMap: { + Computed: true, + Description: "List of replication pool map.", + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_RemotePool: { + Computed: true, + Description: "Remote pool.", + Type: schema.TypeString, + }, + Attr_VolumePool: { + Computed: true, + Description: "Volume pool.", + Type: schema.TypeString, + }, + }, + }, + }, }, }, Type: schema.TypeList, @@ -67,20 +86,30 @@ func dataSourceIBMPIDisasterRecoveryLocations(ctx context.Context, d *schema.Res } results := make([]map[string]interface{}, 0, len(drLocationSites.DisasterRecoveryLocations)) - for _, i := range drLocationSites.DisasterRecoveryLocations { - if i != nil { - replicationSites := make([]map[string]interface{}, 0, len(i.ReplicationSites)) - for _, j := range i.ReplicationSites { - if j != nil { + for _, drl := range drLocationSites.DisasterRecoveryLocations { + if drl != nil { + replicationSites := make([]map[string]interface{}, 0, len(drl.ReplicationSites)) + for _, site := range drl.ReplicationSites { + if site != nil { + replicationPoolMap := make([]map[string]string, 0) + if site.ReplicationPoolMap != nil { + for _, rMap := range site.ReplicationPoolMap { + replicationPool := make(map[string]string) + replicationPool[Attr_RemotePool] = rMap.RemotePool + replicationPool[Attr_VolumePool] = rMap.VolumePool + replicationPoolMap = append(replicationPoolMap, replicationPool) + } + } r := map[string]interface{}{ - Attr_IsActive: j.IsActive, - Attr_Location: j.Location, + Attr_IsActive: site.IsActive, + Attr_Location: site.Location, + Attr_ReplicationPoolMap: replicationPoolMap, } replicationSites = append(replicationSites, r) } } l := map[string]interface{}{ - Attr_Location: i.Location, + Attr_Location: drl.Location, Attr_ReplicationSites: replicationSites, } results = append(results, l) diff --git a/ibm/service/power/data_source_ibm_pi_image.go b/ibm/service/power/data_source_ibm_pi_image.go index 7b72b1430b..0975029328 100644 --- a/ibm/service/power/data_source_ibm_pi_image.go +++ b/ibm/service/power/data_source_ibm_pi_image.go @@ -5,9 +5,11 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -37,6 +39,11 @@ func DataSourceIBMPIImage() *schema.Resource { Description: "The CPU architecture that the image is designed for. ", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Hypervisor: { Computed: true, Description: "Hypervision Type.", @@ -47,7 +54,6 @@ func DataSourceIBMPIImage() *schema.Resource { Description: "The identifier of this image type.", Type: schema.TypeString, }, - // TODO: Relabel this one "operating_system" to match catalog images Attr_OperatingSystem: { Computed: true, Description: "The operating system that is installed with the image.", @@ -58,6 +64,11 @@ func DataSourceIBMPIImage() *schema.Resource { Description: "The size of the image in megabytes.", Type: schema.TypeInt, }, + Attr_SourceChecksum: { + Computed: true, + Description: "Checksum of the image.", + Type: schema.TypeString, + }, Attr_State: { Computed: true, Description: "The state for this image. ", @@ -73,6 +84,13 @@ func DataSourceIBMPIImage() *schema.Resource { Description: "The storage type for this image.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, }, } } @@ -93,10 +111,19 @@ func dataSourceIBMPIImagesRead(ctx context.Context, d *schema.ResourceData, meta d.SetId(*imagedata.ImageID) d.Set(Attr_Architecture, imagedata.Specifications.Architecture) + if imagedata.Crn != "" { + d.Set(Attr_CRN, imagedata.Crn) + tags, err := flex.GetTagsUsingCRN(meta, string(imagedata.Crn)) + if err != nil { + log.Printf("Error on get of pi image (%s) user_tags: %s", *imagedata.ImageID, err) + } + d.Set(Attr_UserTags, tags) + } d.Set(Attr_Hypervisor, imagedata.Specifications.HypervisorType) d.Set(Attr_ImageType, imagedata.Specifications.ImageType) d.Set(Attr_OperatingSystem, imagedata.Specifications.OperatingSystem) d.Set(Attr_Size, imagedata.Size) + d.Set(Attr_SourceChecksum, imagedata.Specifications.SourceChecksum) d.Set(Attr_State, imagedata.State) d.Set(Attr_StoragePool, imagedata.StoragePool) d.Set(Attr_StorageType, imagedata.StorageType) diff --git a/ibm/service/power/data_source_ibm_pi_images.go b/ibm/service/power/data_source_ibm_pi_images.go index e3a8936325..436837b1aa 100644 --- a/ibm/service/power/data_source_ibm_pi_images.go +++ b/ibm/service/power/data_source_ibm_pi_images.go @@ -5,10 +5,12 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -34,6 +36,11 @@ func DataSourceIBMPIImages() *schema.Resource { Description: "List of all supported images.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Href: { Computed: true, Description: "The hyper link of an image.", @@ -54,6 +61,11 @@ func DataSourceIBMPIImages() *schema.Resource { Description: "The name of an image.", Type: schema.TypeString, }, + Attr_SourceChecksum: { + Computed: true, + Description: "Checksum of the image.", + Type: schema.TypeString, + }, Attr_State: { Computed: true, Description: "The state of an image.", @@ -69,6 +81,13 @@ func DataSourceIBMPIImages() *schema.Resource { Description: "The storage type of an image.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, }, }, Type: schema.TypeList, @@ -93,22 +112,32 @@ func dataSourceIBMPIImagesAllRead(ctx context.Context, d *schema.ResourceData, m var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set(Attr_ImageInfo, flattenStockImages(imagedata.Images)) + d.Set(Attr_ImageInfo, flattenStockImages(imagedata.Images, meta)) return nil } -func flattenStockImages(list []*models.ImageReference) []map[string]interface{} { +func flattenStockImages(list []*models.ImageReference, meta interface{}) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ - Attr_Href: *i.Href, - Attr_ID: *i.ImageID, - Attr_ImageType: i.Specifications.ImageType, - Attr_Name: *i.Name, - Attr_State: *i.State, - Attr_StoragePool: *i.StoragePool, - Attr_StorageType: *i.StorageType, + Attr_Href: *i.Href, + Attr_ID: *i.ImageID, + Attr_ImageType: i.Specifications.ImageType, + Attr_Name: *i.Name, + Attr_SourceChecksum: i.Specifications.SourceChecksum, + Attr_State: *i.State, + Attr_StoragePool: *i.StoragePool, + Attr_StorageType: *i.StorageType, + } + if i.Crn != "" { + l[Attr_CRN] = i.Crn + tags, err := flex.GetTagsUsingCRN(meta, string(i.Crn)) + if err != nil { + log.Printf( + "Error on get of image (%s) user_tags: %s", *i.ImageID, err) + } + l[Attr_UserTags] = tags } result = append(result, l) } diff --git a/ibm/service/power/data_source_ibm_pi_instance.go b/ibm/service/power/data_source_ibm_pi_instance.go index c8ac038a9c..95d20b330e 100644 --- a/ibm/service/power/data_source_ibm_pi_instance.go +++ b/ibm/service/power/data_source_ibm_pi_instance.go @@ -5,9 +5,11 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -32,6 +34,11 @@ func DataSourceIBMPIInstance() *schema.Resource { }, // Attributes + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_DeploymentType: { Computed: true, Description: "The custom deployment type.", @@ -193,6 +200,11 @@ func DataSourceIBMPIInstance() *schema.Resource { Description: "The status of the instance.", Type: schema.TypeString, }, + Attr_StorageConnection: { + Computed: true, + Description: "The storage connection type.", + Type: schema.TypeString, + }, Attr_StoragePool: { Computed: true, Description: "The storage Pool where server is deployed.", @@ -208,6 +220,13 @@ func DataSourceIBMPIInstance() *schema.Resource { Description: "The storage type where server is deployed.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VirtualCoresAssigned: { Computed: true, Description: "The virtual cores that are assigned to the instance.", @@ -239,6 +258,14 @@ func dataSourceIBMPIInstancesRead(ctx context.Context, d *schema.ResourceData, m pvminstanceid := *powervmdata.PvmInstanceID d.SetId(pvminstanceid) + if powervmdata.Crn != "" { + d.Set(Attr_CRN, powervmdata.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(powervmdata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi instance (%s) user_tags: %s", *powervmdata.PvmInstanceID, err) + } + d.Set(Attr_UserTags, tags) + } d.Set(Attr_DeploymentType, powervmdata.DeploymentType) d.Set(Attr_LicenseRepositoryCapacity, powervmdata.LicenseRepositoryCapacity) d.Set(Attr_MaxMem, powervmdata.Maxmem) @@ -256,6 +283,7 @@ func dataSourceIBMPIInstancesRead(ctx context.Context, d *schema.ResourceData, m d.Set(Attr_SharedProcessorPool, powervmdata.SharedProcessorPool) d.Set(Attr_SharedProcessorPoolID, powervmdata.SharedProcessorPoolID) d.Set(Attr_Status, powervmdata.Status) + d.Set(Attr_StorageConnection, powervmdata.StorageConnection) d.Set(Attr_StorageType, powervmdata.StorageType) d.Set(Attr_StoragePool, powervmdata.StoragePool) d.Set(Attr_StoragePoolAffinity, powervmdata.StoragePoolAffinity) diff --git a/ibm/service/power/data_source_ibm_pi_instance_snapshot.go b/ibm/service/power/data_source_ibm_pi_instance_snapshot.go index d57f666b4b..d4a7275794 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_snapshot.go +++ b/ibm/service/power/data_source_ibm_pi_instance_snapshot.go @@ -5,9 +5,11 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -42,6 +44,11 @@ func DataSourceIBMPIInstanceSnapshot() *schema.Resource { Description: "Date of snapshot creation.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Description: { Computed: true, Description: "The description of the snapshot.", @@ -67,6 +74,13 @@ func DataSourceIBMPIInstanceSnapshot() *schema.Resource { Description: "The status of the Power Virtual Machine instance snapshot.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VolumeSnapshots: { Computed: true, Description: "A map of volume snapshots included in the Power Virtual Machine instance snapshot.", @@ -92,6 +106,14 @@ func dataSourceIBMPIInstanceSnapshotRead(ctx context.Context, d *schema.Resource d.SetId(*snapshotData.SnapshotID) d.Set(Attr_Action, snapshotData.Action) d.Set(Attr_CreationDate, snapshotData.CreationDate.String()) + if snapshotData.Crn != "" { + d.Set(Attr_CRN, snapshotData.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(snapshotData.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi instance snapshot (%s) user_tags: %s", *snapshotData.SnapshotID, err) + } + d.Set(Attr_UserTags, tags) + } d.Set(Attr_Description, snapshotData.Description) d.Set(Attr_LastUpdatedDate, snapshotData.LastUpdateDate.String()) d.Set(Attr_Name, snapshotData.Name) diff --git a/ibm/service/power/data_source_ibm_pi_instance_snapshot_test.go b/ibm/service/power/data_source_ibm_pi_instance_snapshot_test.go index 0ef17d9344..18369bfcda 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_snapshot_test.go +++ b/ibm/service/power/data_source_ibm_pi_instance_snapshot_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPIInstanceSnapshotDataSource_basic(t *testing.T) { + snapshotResData := "data.ibm_pi_instance_snapshot.testacc_ds_snapshot" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPIInstanceSnapshotDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPIInstanceSnapshotDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_instance_snapshot.testacc_ds_snapshot", "id"), + resource.TestCheckResourceAttrSet(snapshotResData, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_instance_snapshots.go b/ibm/service/power/data_source_ibm_pi_instance_snapshots.go index dad1bd7f61..bfc03e280e 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_snapshots.go +++ b/ibm/service/power/data_source_ibm_pi_instance_snapshots.go @@ -10,6 +10,7 @@ import ( "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -44,6 +45,11 @@ func DataSourceIBMPIInstanceSnapshots() *schema.Resource { Description: "Date of snapshot creation.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Description: { Computed: true, Description: "The description of the snapshot.", @@ -74,6 +80,13 @@ func DataSourceIBMPIInstanceSnapshots() *schema.Resource { Description: "The status of the Power Virtual Machine instance snapshot.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VolumeSnapshots: { Computed: true, Description: "A map of volume snapshots included in the Power Virtual Machine instance snapshot.", @@ -102,12 +115,12 @@ func dataSourceIBMPIInstanceSnapshotsRead(ctx context.Context, d *schema.Resourc var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set(Attr_InstanceSnapshots, flattenSnapshotsInstances(snapshotData.Snapshots)) + d.Set(Attr_InstanceSnapshots, flattenSnapshotsInstances(snapshotData.Snapshots, meta)) return nil } -func flattenSnapshotsInstances(list []*models.Snapshot) []map[string]interface{} { +func flattenSnapshotsInstances(list []*models.Snapshot, meta interface{}) []map[string]interface{} { log.Printf("Calling the flattenSnapshotsInstances call with list %d", len(list)) result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { @@ -122,6 +135,14 @@ func flattenSnapshotsInstances(list []*models.Snapshot) []map[string]interface{} Attr_Status: i.Status, Attr_VolumeSnapshots: i.VolumeSnapshots, } + if i.Crn != "" { + l[Attr_CRN] = i.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(i.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi instance snapshot (%s) user_tags: %s", *i.SnapshotID, err) + } + l[Attr_UserTags] = tags + } result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_instance_snapshots_test.go b/ibm/service/power/data_source_ibm_pi_instance_snapshots_test.go index f508671d50..df3b994096 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_snapshots_test.go +++ b/ibm/service/power/data_source_ibm_pi_instance_snapshots_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPIInstanceSnapshotsDataSource_basic(t *testing.T) { + snapshotResData := "data.ibm_pi_instance_snapshots.testacc_ds_snapshots" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPIInstanceSnapshotsDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPIInstanceSnapshotsDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_instance_snapshots.testacc_ds_snapshots", "id"), + resource.TestCheckResourceAttrSet(snapshotResData, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_instance_test.go b/ibm/service/power/data_source_ibm_pi_instance_test.go index 1c352395a6..080692e296 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_test.go +++ b/ibm/service/power/data_source_ibm_pi_instance_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPIInstanceDataSource_basic(t *testing.T) { + instanceResData := "data.ibm_pi_instance.testacc_ds_instance" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPIInstanceDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPIInstanceDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_instance.testacc_ds_instance", "id"), + resource.TestCheckResourceAttrSet(instanceResData, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_instance_volumes.go b/ibm/service/power/data_source_ibm_pi_instance_volumes.go index b0ceedf9eb..385f0e5798 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_volumes.go +++ b/ibm/service/power/data_source_ibm_pi_instance_volumes.go @@ -5,10 +5,12 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -49,6 +51,21 @@ func DataSourceIBMPIInstanceVolumes() *schema.Resource { Description: "Indicates if the volume is boot capable.", Type: schema.TypeBool, }, + Attr_CreationDate: { + Computed: true, + Description: "Date volume was created.", + Type: schema.TypeString, + }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, + Attr_FreezeTime: { + Computed: true, + Description: "The freeze time of remote copy.", + Type: schema.TypeString, + }, Attr_Href: { Computed: true, Description: "The hyper link of the volume.", @@ -59,6 +76,11 @@ func DataSourceIBMPIInstanceVolumes() *schema.Resource { Description: "The unique identifier of the volume.", Type: schema.TypeString, }, + Attr_LastUpdateDate: { + Computed: true, + Description: "The last updated date of the volume.", + Type: schema.TypeString, + }, Attr_Name: { Computed: true, Description: "The name of the volume.", @@ -69,6 +91,17 @@ func DataSourceIBMPIInstanceVolumes() *schema.Resource { Description: "Volume pool, name of storage pool where the volume is located.", Type: schema.TypeString, }, + Attr_ReplicationEnabled: { + Computed: true, + Description: "Indicates if the volume should be replication enabled or not.", + Type: schema.TypeBool, + }, + Attr_ReplicationSites: { + Computed: true, + Description: "List of replication sites for volume replication.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_Shareable: { Computed: true, Description: "Indicates if the volume is shareable between VMs.", @@ -89,6 +122,13 @@ func DataSourceIBMPIInstanceVolumes() *schema.Resource { Description: "The disk type that is used for this volume.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, }, }, Type: schema.TypeList, @@ -114,25 +154,44 @@ func dataSourceIBMPIInstanceVolumesRead(ctx context.Context, d *schema.ResourceD var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) d.Set(Attr_BootVolumeID, *volumedata.Volumes[0].VolumeID) - d.Set(Attr_InstanceVolumes, flattenVolumesInstances(volumedata.Volumes)) + d.Set(Attr_InstanceVolumes, flattenVolumesInstances(volumedata.Volumes, meta)) return nil } -func flattenVolumesInstances(list []*models.VolumeReference) []map[string]interface{} { +func flattenVolumesInstances(list []*models.VolumeReference, meta interface{}) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { + l := map[string]interface{}{ - Attr_Bootable: *i.Bootable, - Attr_Href: *i.Href, - Attr_ID: *i.VolumeID, - Attr_Name: *i.Name, - Attr_Pool: i.VolumePool, - Attr_Shareable: *i.Shareable, - Attr_Size: *i.Size, - Attr_State: *i.State, - Attr_Type: *i.DiskType, + Attr_Bootable: *i.Bootable, + Attr_CreationDate: i.CreationDate.String(), + Attr_Href: *i.Href, + Attr_ID: *i.VolumeID, + Attr_LastUpdateDate: i.LastUpdateDate.String(), + Attr_Name: *i.Name, + Attr_Pool: i.VolumePool, + Attr_ReplicationEnabled: i.ReplicationEnabled, + Attr_Shareable: *i.Shareable, + Attr_Size: *i.Size, + Attr_State: *i.State, + Attr_Type: *i.DiskType, + } + if i.Crn != "" { + l[Attr_CRN] = i.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(i.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of volume (%s) user_tags: %s", *i.VolumeID, err) + } + l[Attr_UserTags] = tags } + if i.FreezeTime != nil { + l[Attr_FreezeTime] = i.FreezeTime.String() + } + if len(i.ReplicationSites) > 0 { + l[Attr_ReplicationSites] = i.ReplicationSites + } + result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_instances.go b/ibm/service/power/data_source_ibm_pi_instances.go index 481e4e8ba0..fcfc9ff25c 100644 --- a/ibm/service/power/data_source_ibm_pi_instances.go +++ b/ibm/service/power/data_source_ibm_pi_instances.go @@ -5,11 +5,13 @@ package power import ( "context" + "log" "strconv" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -34,6 +36,11 @@ func DataSourceIBMPIInstances() *schema.Resource { Description: "List of power virtual server instances for the respective cloud instance.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Fault: { Computed: true, Description: "Fault information.", @@ -174,6 +181,11 @@ func DataSourceIBMPIInstances() *schema.Resource { Description: "The status of the instance.", Type: schema.TypeString, }, + Attr_StorageConnection: { + Computed: true, + Description: "The storage connection type.", + Type: schema.TypeString, + }, Attr_StoragePool: { Computed: true, Description: "The storage Pool where server is deployed.", @@ -189,6 +201,13 @@ func DataSourceIBMPIInstances() *schema.Resource { Description: "The storage type where server is deployed.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VirtualCoresAssigned: { Computed: true, Description: "The virtual cores that are assigned to the instance.", @@ -220,12 +239,12 @@ func dataSourceIBMPIInstancesAllRead(ctx context.Context, d *schema.ResourceData var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set(Attr_PVMInstances, flattenPvmInstances(powervmdata.PvmInstances)) + d.Set(Attr_PVMInstances, flattenPvmInstances(powervmdata.PvmInstances, meta)) return nil } -func flattenPvmInstances(list []*models.PVMInstanceReference) []map[string]interface{} { +func flattenPvmInstances(list []*models.PVMInstanceReference, meta interface{}) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ @@ -247,12 +266,22 @@ func flattenPvmInstances(list []*models.PVMInstanceReference) []map[string]inter Attr_SharedProcessorPool: i.SharedProcessorPool, Attr_SharedProcessorPoolID: i.SharedProcessorPoolID, Attr_Status: *i.Status, + Attr_StorageConnection: i.StorageConnection, Attr_StoragePool: i.StoragePool, Attr_StoragePoolAffinity: i.StoragePoolAffinity, Attr_StorageType: i.StorageType, Attr_VirtualCoresAssigned: i.VirtualCores.Assigned, } + if i.Crn != "" { + l[Attr_CRN] = i.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(i.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi instance (%s) user_tags: %s", *i.PvmInstanceID, err) + } + l[Attr_UserTags] = tags + } + if i.Health != nil { l[Attr_HealthStatus] = i.Health.Status } @@ -273,6 +302,7 @@ func flattenPvmInstanceNetworks(list []*models.PVMInstanceNetwork) (networks []m p := make(map[string]interface{}) p[Attr_ExternalIP] = pvmip.ExternalIP p[Attr_IP] = pvmip.IPAddress + p[Attr_Macaddress] = pvmip.MacAddress p[Attr_MacAddress] = pvmip.MacAddress p[Attr_NetworkID] = pvmip.NetworkID p[Attr_NetworkName] = pvmip.NetworkName diff --git a/ibm/service/power/data_source_ibm_pi_instances_test.go b/ibm/service/power/data_source_ibm_pi_instances_test.go index f707581ef2..cf1dfe786f 100644 --- a/ibm/service/power/data_source_ibm_pi_instances_test.go +++ b/ibm/service/power/data_source_ibm_pi_instances_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPIInstancesDataSource_basic(t *testing.T) { + instancesResData := "data.ibm_pi_instances.testacc_ds_instance" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPIInstancesDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPIInstancesDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_instances.testacc_ds_instance", "id"), + resource.TestCheckResourceAttrSet(instancesResData, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_network.go b/ibm/service/power/data_source_ibm_pi_network.go index 2835514f90..dec8dab0cc 100644 --- a/ibm/service/power/data_source_ibm_pi_network.go +++ b/ibm/service/power/data_source_ibm_pi_network.go @@ -5,10 +5,12 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -48,6 +50,11 @@ func DataSourceIBMPINetwork() *schema.Resource { Description: "The CIDR of the network.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_DNS: { Computed: true, Description: "The DNS Servers for the network.", @@ -91,6 +98,13 @@ func DataSourceIBMPINetwork() *schema.Resource { Description: "The percentage of IP addresses used.", Type: schema.TypeFloat, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VLanID: { Computed: true, Description: "The VLAN ID that the network is connected to.", @@ -122,6 +136,14 @@ func dataSourceIBMPINetworkRead(ctx context.Context, d *schema.ResourceData, met if networkdata.Cidr != nil { d.Set(Attr_CIDR, networkdata.Cidr) } + if networkdata.Crn != "" { + d.Set(Attr_CRN, networkdata.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(networkdata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi network (%s) user_tags: %s", *networkdata.NetworkID, err) + } + d.Set(Attr_UserTags, tags) + } if len(networkdata.DNSServers) > 0 { d.Set(Attr_DNS, networkdata.DNSServers) } diff --git a/ibm/service/power/data_source_ibm_pi_network_address_group.go b/ibm/service/power/data_source_ibm_pi_network_address_group.go new file mode 100644 index 0000000000..2efc6a80e0 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_address_group.go @@ -0,0 +1,112 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" +) + +func DataSourceIBMPINetworkAddressGroup() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPINetworkAddressGroupRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkAddressGroupID: { + Description: "Network Address Group ID.", + Required: true, + Type: schema.TypeString, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The Network Address Group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IP addresses in CIDR notation (for example 192.168.66.2/32) in the Network Address Group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CIDR: { + Computed: true, + Description: "The IP addresses in CIDR notation for example 192.168.1.5/32.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The id of the Network Address Group member IP addresses.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the Network Address Group.", + Type: schema.TypeString, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + } +} + +func dataSourceIBMPINetworkAddressGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nagID := d.Get(Arg_NetworkAddressGroupID).(string) + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, cloudInstanceID) + networkAddressGroup, err := nagC.Get(nagID) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(*networkAddressGroup.ID) + if networkAddressGroup.Crn != nil { + d.Set(Attr_CRN, networkAddressGroup.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkAddressGroup.Crn)) + if err != nil { + log.Printf("Error on get of pi network address group (%s) user_tags: %s", nagID, err) + } + d.Set(Attr_UserTags, userTags) + } + + members := []map[string]interface{}{} + if len(networkAddressGroup.Members) > 0 { + for _, mbr := range networkAddressGroup.Members { + member := memberToMap(mbr) + members = append(members, member) + } + d.Set(Attr_Members, members) + } + d.Set(Attr_Name, networkAddressGroup.Name) + + return nil +} diff --git a/ibm/service/power/data_source_ibm_pi_network_address_group_test.go b/ibm/service/power/data_source_ibm_pi_network_address_group_test.go new file mode 100644 index 0000000000..d076aa5100 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_address_group_test.go @@ -0,0 +1,36 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMPINetworkAddressGroupDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkAddressGroupDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_network_address_group.network_address_group", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkAddressGroupDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_network_address_group" "network_address_group" { + pi_cloud_instance_id = "%s" + pi_network_address_group_id = "%s" + }`, acc.Pi_cloud_instance_id, acc.Pi_network_address_group_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_address_groups.go b/ibm/service/power/data_source_ibm_pi_network_address_groups.go new file mode 100644 index 0000000000..35f8d3a862 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_address_groups.go @@ -0,0 +1,145 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "log" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceIBMPINetworkAddressGroups() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPINetworkAddressGroupsRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + + // Attributes + Attr_NetworkAddressGroups: { + Computed: true, + Description: "list of Network Address Groups.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CRN: { + Computed: true, + Description: "The Network Address Group's crn.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The id of the Network Address Group.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IP addresses in CIDR notation (for example 192.168.66.2/32) in the Network Address Group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CIDR: { + Computed: true, + Description: "The IP addresses in CIDR notation for example 192.168.1.5/32.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The id of the Network Address Group member IP addresses.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the Network Address Group.", + Type: schema.TypeString, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + }, + Type: schema.TypeList, + }, + }, + } +} + +func dataSourceIBMPINetworkAddressGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, cloudInstanceID) + networkAddressGroups, err := nagC.GetAll() + if err != nil { + return diag.FromErr(err) + } + + var genID, _ = uuid.GenerateUUID() + d.SetId(genID) + + nags := []map[string]interface{}{} + if len(networkAddressGroups.NetworkAddressGroups) > 0 { + for _, nag := range networkAddressGroups.NetworkAddressGroups { + modelMap := networkAddressGroupsNetworkAddressGroupToMap(nag, meta) + nags = append(nags, modelMap) + } + } + d.Set(Attr_NetworkAddressGroups, nags) + + return nil +} + +func networkAddressGroupsNetworkAddressGroupToMap(networkAddressGroup *models.NetworkAddressGroup, meta interface{}) map[string]interface{} { + nag := make(map[string]interface{}) + if networkAddressGroup.Crn != nil { + nag[Attr_CRN] = networkAddressGroup.Crn + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkAddressGroup.Crn)) + if err != nil { + log.Printf("Error on get of pi network address group (%s) user_tags: %s", *networkAddressGroup.ID, err) + } + nag[Attr_UserTags] = userTags + } + + nag[Attr_ID] = networkAddressGroup.ID + if len(networkAddressGroup.Members) > 0 { + members := []map[string]interface{}{} + for _, membersItem := range networkAddressGroup.Members { + member := memberToMap(membersItem) + members = append(members, member) + } + nag[Attr_Members] = members + } + nag[Attr_Name] = networkAddressGroup.Name + return nag +} + +func memberToMap(mbr *models.NetworkAddressGroupMember) map[string]interface{} { + member := make(map[string]interface{}) + member[Attr_CIDR] = mbr.Cidr + member[Attr_ID] = mbr.ID + return member +} diff --git a/ibm/service/power/data_source_ibm_pi_network_address_groups_test.go b/ibm/service/power/data_source_ibm_pi_network_address_groups_test.go new file mode 100644 index 0000000000..fdb1f2c34f --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_address_groups_test.go @@ -0,0 +1,35 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMPINetworkAddressGroupsDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkAddressGroupsDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_network_address_groups.network_address_groups", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkAddressGroupsDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_network_address_groups" "network_address_groups" { + pi_cloud_instance_id = "%s" + }`, acc.Pi_cloud_instance_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_interface.go b/ibm/service/power/data_source_ibm_pi_network_interface.go new file mode 100644 index 0000000000..f582f9ba90 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_interface.go @@ -0,0 +1,164 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" +) + +func DataSourceIBMPINetworkInterface() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPINetworkInterfaceRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkID: { + Description: "Network ID.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkInterfaceID: { + Description: "Network Interface ID.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The Network Interface's crn.", + Type: schema.TypeString, + }, + Attr_Instance: { + Computed: true, + Description: "The attached instance to this Network Interface.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Href: { + Computed: true, + Description: "Link to instance resource.", + Type: schema.TypeString, + }, + Attr_InstanceID: { + Computed: true, + Description: "The attached instance ID.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_IPAddress: { + Computed: true, + Description: "The ip address of this Network Interface.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of the Network Interface.", + Type: schema.TypeString, + }, + Attr_Name: { + Computed: true, + Description: "Name of the Network Interface (not unique or indexable).", + Type: schema.TypeString, + }, + Attr_NetworkInterfaceID: { + Computed: true, + Description: "ID of the network interface.", + Type: schema.TypeString, + }, + Attr_NetworkSecurityGroupID: { + Computed: true, + Description: "ID of the Network Security Group the network interface will be added to.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "The status of the network address group.", + Type: schema.TypeString, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + } +} + +func dataSourceIBMPINetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + networkID := d.Get(Arg_NetworkID).(string) + networkInterfaceID := d.Get(Arg_NetworkInterfaceID).(string) + networkC := instance.NewIBMPINetworkClient(ctx, sess, cloudInstanceID) + networkInterface, err := networkC.GetNetworkInterface(networkID, networkInterfaceID) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(fmt.Sprintf("%s/%s", networkID, *networkInterface.ID)) + d.Set(Attr_IPAddress, networkInterface.IPAddress) + d.Set(Attr_MacAddress, networkInterface.MacAddress) + d.Set(Attr_Name, networkInterface.Name) + d.Set(Attr_NetworkInterfaceID, *networkInterface.ID) + d.Set(Attr_NetworkSecurityGroupID, networkInterface.NetworkSecurityGroupID) + + if networkInterface.Instance != nil { + instance := []map[string]interface{}{} + instanceMap := pvmInstanceToMap(networkInterface.Instance) + instance = append(instance, instanceMap) + d.Set(Attr_Instance, instance) + } + d.Set(Attr_Status, networkInterface.Status) + if networkInterface.Crn != nil { + d.Set(Attr_CRN, networkInterface.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkInterface.Crn)) + if err != nil { + log.Printf("Error on get of network interface (%s) user_tags: %s", *networkInterface.ID, err) + } + d.Set(Attr_UserTags, userTags) + } + + return nil +} + +func pvmInstanceToMap(pvm *models.NetworkInterfaceInstance) map[string]interface{} { + instanceMap := make(map[string]interface{}) + if pvm.Href != "" { + instanceMap[Attr_Href] = pvm.Href + } + if pvm.InstanceID != "" { + instanceMap[Attr_InstanceID] = pvm.InstanceID + } + return instanceMap +} diff --git a/ibm/service/power/data_source_ibm_pi_network_interface_test.go b/ibm/service/power/data_source_ibm_pi_network_interface_test.go new file mode 100644 index 0000000000..599558207e --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_interface_test.go @@ -0,0 +1,43 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkInterfaceDataSourceBasic(t *testing.T) { + netIntData := "data.ibm_pi_network_interface.network_interface" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkInterfaceDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(netIntData, power.Attr_ID), + resource.TestCheckResourceAttrSet(netIntData, power.Attr_IPAddress), + resource.TestCheckResourceAttrSet(netIntData, power.Attr_MacAddress), + resource.TestCheckResourceAttrSet(netIntData, power.Attr_Name), + resource.TestCheckResourceAttrSet(netIntData, power.Attr_Status), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkInterfaceDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_network_interface" "network_interface" { + pi_cloud_instance_id = "%s" + pi_network_id = "%s" + pi_network_interface_id = "%s" + }`, acc.Pi_cloud_instance_id, acc.Pi_network_id, acc.Pi_network_interface_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_interfaces.go b/ibm/service/power/data_source_ibm_pi_network_interfaces.go new file mode 100644 index 0000000000..a4a571165d --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_interfaces.go @@ -0,0 +1,166 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "log" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceIBMPINetworkInterfaces() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPINetworkInterfacesRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkID: { + Description: "Network ID.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + // Attributes + Attr_Interfaces: { + Computed: true, + Description: "Network Interfaces.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CRN: { + Computed: true, + Description: "The Network Interface's crn.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The unique Network Interface ID.", + Type: schema.TypeString, + }, + Attr_Instance: { + Computed: true, + Description: "The attached instance to this Network Interface.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Href: { + Computed: true, + Description: "Link to instance resource.", + Type: schema.TypeString, + }, + Attr_InstanceID: { + Computed: true, + Description: "The attached instance ID.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_IPAddress: { + Computed: true, + Description: "The ip address of this Network Interface.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of the Network Interface.", + Type: schema.TypeString, + }, + Attr_Name: { + Computed: true, + Description: "Name of the Network Interface (not unique or indexable).", + Type: schema.TypeString, + }, + Attr_NetworkSecurityGroupID: { + Computed: true, + Description: "ID of the Network Security Group the network interface will be added to.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "The status of the network address group.", + Type: schema.TypeString, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + }, + Type: schema.TypeList, + }, + }, + } +} + +func dataSourceIBMPINetworkInterfacesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + networkID := d.Get(Arg_NetworkID).(string) + networkC := instance.NewIBMPINetworkClient(ctx, sess, cloudInstanceID) + networkInterfaces, err := networkC.GetAllNetworkInterfaces(networkID) + if err != nil { + return diag.FromErr(err) + } + + var genID, _ = uuid.GenerateUUID() + d.SetId(genID) + interfaces := []map[string]interface{}{} + if len(networkInterfaces.Interfaces) > 0 { + for _, netInterface := range networkInterfaces.Interfaces { + interfaceMap := networkInterfaceToMap(netInterface, meta) + interfaces = append(interfaces, interfaceMap) + } + } + d.Set(Attr_Interfaces, interfaces) + + return nil +} + +func networkInterfaceToMap(netInterface *models.NetworkInterface, meta interface{}) map[string]interface{} { + interfaceMap := make(map[string]interface{}) + interfaceMap[Attr_ID] = netInterface.ID + interfaceMap[Attr_IPAddress] = netInterface.IPAddress + interfaceMap[Attr_MacAddress] = netInterface.MacAddress + interfaceMap[Attr_Name] = netInterface.Name + interfaceMap[Attr_NetworkSecurityGroupID] = netInterface.NetworkSecurityGroupID + if netInterface.Instance != nil { + pvmInstanceMap := pvmInstanceToMap(netInterface.Instance) + interfaceMap[Attr_Instance] = []map[string]interface{}{pvmInstanceMap} + } + interfaceMap[Attr_Status] = netInterface.Status + if netInterface.Crn != nil { + interfaceMap[Attr_CRN] = netInterface.Crn + userTags, err := flex.GetTagsUsingCRN(meta, string(*netInterface.Crn)) + if err != nil { + log.Printf("Error on get of network interface (%s) user_tags: %s", *netInterface.ID, err) + } + interfaceMap[Attr_UserTags] = userTags + } + + return interfaceMap +} diff --git a/ibm/service/power/data_source_ibm_pi_network_interfaces_test.go b/ibm/service/power/data_source_ibm_pi_network_interfaces_test.go new file mode 100644 index 0000000000..03e16df20d --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_interfaces_test.go @@ -0,0 +1,36 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMPINetworkInterfacesDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkInterfacesDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_network_interfaces.network_interfaces", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkInterfacesDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_network_interfaces" "network_interfaces" { + pi_cloud_instance_id = "%s" + pi_network_id = "%s" + }`, acc.Pi_cloud_instance_id, acc.Pi_network_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_security_group.go b/ibm/service/power/data_source_ibm_pi_network_security_group.go new file mode 100644 index 0000000000..323dda38fc --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_security_group.go @@ -0,0 +1,316 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceIBMPINetworkSecurityGroup() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPINetworkSecurityGroupRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkSecurityGroupID: { + Description: "network security group ID.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The network security group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IPv4 addresses and, or network interfaces in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the member in a network security group.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of a network interface included if the type is network-interface.", + Type: schema.TypeString, + }, + Attr_Target: { + Computed: true, + Description: "If ipv4-address type, then IPv4 address or if network-interface type, then network interface ID.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of member.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the network security group.", + Type: schema.TypeString, + }, + Attr_Rules: { + Computed: true, + Description: "The list of rules in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Action: { + Computed: true, + Description: "The action to take if the rule matches network traffic.", + Type: schema.TypeString, + }, + Attr_DestinationPort: { + Computed: true, + Description: "The list of destination port.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_ID: { + Computed: true, + Description: "The ID of the rule in a network security group.", + Type: schema.TypeString, + }, + Attr_Protocol: { + Computed: true, + Description: "The list of protocol.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ICMPType: { + Computed: true, + Description: "If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched.", + Type: schema.TypeString, + }, + Attr_TCPFlags: { + Computed: true, + Description: "If tcp type, the list of TCP flags and if not present then all flags are matched.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Flag: { + Computed: true, + Description: "TCP flag.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Type: { + Computed: true, + Description: "The protocol of the network traffic.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Remote: { + Computed: true, + Description: "List of remote.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the remote Network Address Group or network security group the rules apply to. Not required for default-network-address-group.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of remote group the rules apply to.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SourcePort: { + Computed: true, + Description: "List of source port", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + } +} + +func dataSourceIBMPINetworkSecurityGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + + networkSecurityGroup, err := nsgClient.Get(d.Get(Arg_NetworkSecurityGroupID).(string)) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(*networkSecurityGroup.ID) + if networkSecurityGroup.Crn != nil { + d.Set(Attr_CRN, networkSecurityGroup.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkSecurityGroup.Crn)) + if err != nil { + log.Printf("Error on get of pi network security group (%s) user_tags: %s", *networkSecurityGroup.ID, err) + } + d.Set(Attr_UserTags, userTags) + } + + if len(networkSecurityGroup.Members) > 0 { + members := []map[string]interface{}{} + for _, mbr := range networkSecurityGroup.Members { + mbrMap := networkSecurityGroupMemberToMap(mbr) + members = append(members, mbrMap) + } + d.Set(Attr_Members, members) + } + + d.Set(Attr_Name, networkSecurityGroup.Name) + + if len(networkSecurityGroup.Rules) > 0 { + rules := []map[string]interface{}{} + for _, rule := range networkSecurityGroup.Rules { + ruleMap := networkSecurityGroupRuleToMap(rule) + rules = append(rules, ruleMap) + } + d.Set(Attr_Rules, rules) + } + + return nil +} + +func networkSecurityGroupMemberToMap(mbr *models.NetworkSecurityGroupMember) map[string]interface{} { + mbrMap := make(map[string]interface{}) + mbrMap[Attr_ID] = mbr.ID + if mbr.MacAddress != "" { + mbrMap[Attr_MacAddress] = mbr.MacAddress + } + mbrMap[Attr_Target] = mbr.Target + mbrMap[Attr_Type] = mbr.Type + return mbrMap +} + +func networkSecurityGroupRuleToMap(rule *models.NetworkSecurityGroupRule) map[string]interface{} { + ruleMap := make(map[string]interface{}) + ruleMap[Attr_Action] = rule.Action + if rule.DestinationPort != nil { + destinationPortMap := networkSecurityGroupRulePortToMap(rule.DestinationPort) + ruleMap[Attr_DestinationPort] = []map[string]interface{}{destinationPortMap} + } + + ruleMap[Attr_ID] = rule.ID + + protocolMap := networkSecurityGroupRuleProtocolToMap(rule.Protocol) + ruleMap[Attr_Protocol] = []map[string]interface{}{protocolMap} + + remoteMap := networkSecurityGroupRuleRemoteToMap(rule.Remote) + ruleMap[Attr_Remote] = []map[string]interface{}{remoteMap} + + if rule.SourcePort != nil { + sourcePortMap := networkSecurityGroupRulePortToMap(rule.SourcePort) + ruleMap[Attr_SourcePort] = []map[string]interface{}{sourcePortMap} + } + + return ruleMap +} + +func networkSecurityGroupRulePortToMap(port *models.NetworkSecurityGroupRulePort) map[string]interface{} { + portMap := make(map[string]interface{}) + portMap[Attr_Maximum] = port.Maximum + portMap[Attr_Minimum] = port.Minimum + return portMap +} + +func networkSecurityGroupRuleProtocolToMap(protocol *models.NetworkSecurityGroupRuleProtocol) map[string]interface{} { + protocolMap := make(map[string]interface{}) + if protocol.IcmpType != nil { + protocolMap[Attr_ICMPType] = protocol.IcmpType + } + if len(protocol.TCPFlags) > 0 { + tcpFlags := []map[string]interface{}{} + for _, tcpFlagsItem := range protocol.TCPFlags { + tcpFlagsItemMap := make(map[string]interface{}) + tcpFlagsItemMap[Attr_Flag] = tcpFlagsItem.Flag + tcpFlags = append(tcpFlags, tcpFlagsItemMap) + } + protocolMap[Attr_TCPFlags] = tcpFlags + } + if protocol.Type != "" { + protocolMap[Attr_Type] = protocol.Type + } + return protocolMap +} + +func networkSecurityGroupRuleRemoteToMap(remote *models.NetworkSecurityGroupRuleRemote) map[string]interface{} { + remoteMap := make(map[string]interface{}) + if remote.ID != "" { + remoteMap[Attr_ID] = remote.ID + } + if remote.Type != "" { + remoteMap[Attr_Type] = remote.Type + } + return remoteMap +} diff --git a/ibm/service/power/data_source_ibm_pi_network_security_group_test.go b/ibm/service/power/data_source_ibm_pi_network_security_group_test.go new file mode 100644 index 0000000000..4bb77b59d9 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_security_group_test.go @@ -0,0 +1,37 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMPINetworkSecurityGroupDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_network_security_group.network_security_group", "id"), + resource.TestCheckResourceAttrSet("data.ibm_pi_network_security_group.network_security_group", "name"), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkSecurityGroupDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_network_security_group" "network_security_group" { + pi_cloud_instance_id = "%s" + pi_network_security_group_id = "%s" + }`, acc.Pi_cloud_instance_id, acc.Pi_network_security_group_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_security_groups.go b/ibm/service/power/data_source_ibm_pi_network_security_groups.go new file mode 100644 index 0000000000..b79d24326f --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_security_groups.go @@ -0,0 +1,266 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "log" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceIBMPINetworkSecurityGroups() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPINetworkSecurityGroupsRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + // Attributes + Attr_NetworkSecurityGroups: { + Computed: true, + Description: "list of Network Security Groups.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CRN: { + Computed: true, + Description: "The network security group's crn.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The ID of the network security group.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IPv4 addresses and, or network interfaces in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the member in a network security group.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of a network interface included if the type is network-interface.", + Type: schema.TypeString, + }, + Attr_Target: { + Computed: true, + Description: "If ipv4-address type, then IPv4 address or if network-interface type, then network interface ID.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of member.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the network security group.", + Type: schema.TypeString, + }, + Attr_Rules: { + Computed: true, + Description: "The list of rules in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Action: { + Computed: true, + Description: "The action to take if the rule matches network traffic.", + Type: schema.TypeString, + }, + Attr_DestinationPort: { + Computed: true, + Description: "The list of destination port.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_ID: { + Computed: true, + Description: "The ID of the rule in a network security group.", + Type: schema.TypeString, + }, + Attr_Protocol: { + Computed: true, + Description: "The list of protocol.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ICMPType: { + Computed: true, + Description: "If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched.", + Type: schema.TypeString, + }, + Attr_TCPFlags: { + Computed: true, + Description: "If tcp type, the list of TCP flags and if not present then all flags are matched.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Flag: { + Computed: true, + Description: "TCP flag.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Type: { + Computed: true, + Description: "The protocol of the network traffic.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Remote: { + Computed: true, + Description: "List of remote.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the remote Network Address Group or network security group the rules apply to. Not required for default-network-address-group.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of remote group the rules apply to.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SourcePort: { + Computed: true, + Description: "List of source port", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + }, + Type: schema.TypeList, + }, + }, + } +} + +func dataSourceIBMPINetworkSecurityGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + nsgResp, err := nsgClient.GetAll() + if err != nil { + return diag.FromErr(err) + } + + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + + networkSecurityGroups := []map[string]interface{}{} + if len(nsgResp.NetworkSecurityGroups) > 0 { + for _, nsg := range nsgResp.NetworkSecurityGroups { + networkSecurityGroup := networkSecurityGroupToMap(nsg, meta) + networkSecurityGroups = append(networkSecurityGroups, networkSecurityGroup) + } + } + + d.Set(Attr_NetworkSecurityGroups, networkSecurityGroups) + + return nil +} + +func networkSecurityGroupToMap(nsg *models.NetworkSecurityGroup, meta interface{}) map[string]interface{} { + networkSecurityGroup := make(map[string]interface{}) + if nsg.Crn != nil { + networkSecurityGroup[Attr_CRN] = nsg.Crn + userTags, err := flex.GetTagsUsingCRN(meta, string(*nsg.Crn)) + if err != nil { + log.Printf("Error on get of pi network security group (%s) user_tags: %s", *nsg.ID, err) + } + networkSecurityGroup[Attr_UserTags] = userTags + } + + networkSecurityGroup[Attr_ID] = nsg.ID + if len(nsg.Members) > 0 { + members := []map[string]interface{}{} + for _, mbr := range nsg.Members { + mbrMap := networkSecurityGroupMemberToMap(mbr) + members = append(members, mbrMap) + } + networkSecurityGroup[Attr_Members] = members + } + networkSecurityGroup[Attr_Name] = nsg.Name + if len(nsg.Rules) > 0 { + rules := []map[string]interface{}{} + for _, rule := range nsg.Rules { + rulesItemMap := networkSecurityGroupRuleToMap(rule) + rules = append(rules, rulesItemMap) + } + networkSecurityGroup[Attr_Rules] = rules + } + return networkSecurityGroup +} diff --git a/ibm/service/power/data_source_ibm_pi_network_security_groups_test.go b/ibm/service/power/data_source_ibm_pi_network_security_groups_test.go new file mode 100644 index 0000000000..7d3c9abf05 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_security_groups_test.go @@ -0,0 +1,35 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMPINetworkSecurityGroupsDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupsDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_network_security_groups.network_security_groups", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkSecurityGroupsDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_network_security_groups" "network_security_groups" { + pi_cloud_instance_id = "%s" + }`, acc.Pi_cloud_instance_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_test.go b/ibm/service/power/data_source_ibm_pi_network_test.go index c4c09fa58c..9cb5fa3c72 100644 --- a/ibm/service/power/data_source_ibm_pi_network_test.go +++ b/ibm/service/power/data_source_ibm_pi_network_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPINetworkDataSource_basic(t *testing.T) { + networkRes := "data.ibm_pi_network.testacc_ds_network" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPINetworkDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPINetworkDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_network.testacc_ds_network", "id"), + resource.TestCheckResourceAttrSet(networkRes, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_networks.go b/ibm/service/power/data_source_ibm_pi_networks.go index 75d9fc6964..db5c3ed5c2 100644 --- a/ibm/service/power/data_source_ibm_pi_networks.go +++ b/ibm/service/power/data_source_ibm_pi_networks.go @@ -5,10 +5,12 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -38,6 +40,11 @@ func DataSourceIBMPINetworks() *schema.Resource { Description: "The network communication configuration option of the network (for satellite locations only).", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_DhcpManaged: { Computed: true, Description: "Indicates if the network DHCP Managed.", @@ -68,6 +75,13 @@ func DataSourceIBMPINetworks() *schema.Resource { Description: "The type of network.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VLanID: { Computed: true, Description: "The VLAN ID that the network is connected to.", @@ -97,12 +111,12 @@ func dataSourceIBMPINetworksRead(ctx context.Context, d *schema.ResourceData, me var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set(Attr_Networks, flattenNetworks(networkdata.Networks)) + d.Set(Attr_Networks, flattenNetworks(networkdata.Networks, meta)) return nil } -func flattenNetworks(list []*models.NetworkReference) []map[string]interface{} { +func flattenNetworks(list []*models.NetworkReference, meta interface{}) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ @@ -115,6 +129,15 @@ func flattenNetworks(list []*models.NetworkReference) []map[string]interface{} { Attr_Type: *i.Type, Attr_VLanID: *i.VlanID, } + + if i.Crn != "" { + l[Attr_CRN] = i.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(i.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi network (%s) user_tags: %s", *i.NetworkID, err) + } + l[Attr_UserTags] = tags + } result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_networks_test.go b/ibm/service/power/data_source_ibm_pi_networks_test.go index 3235db2a64..2d53f2e73b 100644 --- a/ibm/service/power/data_source_ibm_pi_networks_test.go +++ b/ibm/service/power/data_source_ibm_pi_networks_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPINetworksDataSource_basic(t *testing.T) { + networksResData := "data.ibm_pi_networks.testacc_ds_networks" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPINetworksDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPINetworksDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_networks.testacc_ds_networks", "id"), + resource.TestCheckResourceAttrSet(networksResData, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_public_network.go b/ibm/service/power/data_source_ibm_pi_public_network.go index ef8195c964..04af881a3c 100644 --- a/ibm/service/power/data_source_ibm_pi_public_network.go +++ b/ibm/service/power/data_source_ibm_pi_public_network.go @@ -26,6 +26,11 @@ func DataSourceIBMPIPublicNetwork() *schema.Resource { }, // Attributes + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Name: { Computed: true, Description: "The name of the network.", @@ -63,6 +68,9 @@ func dataSourceIBMPIPublicNetworkRead(ctx context.Context, d *schema.ResourceDat } d.SetId(*networkdata.Networks[0].NetworkID) + if networkdata.Networks[0].Crn != "" { + d.Set(Attr_CRN, networkdata.Networks[0].Crn) + } if networkdata.Networks[0].Name != nil { d.Set(Attr_Name, networkdata.Networks[0].Name) } diff --git a/ibm/service/power/data_source_ibm_pi_public_network_test.go b/ibm/service/power/data_source_ibm_pi_public_network_test.go index 3ac39c13a6..3062b232de 100644 --- a/ibm/service/power/data_source_ibm_pi_public_network_test.go +++ b/ibm/service/power/data_source_ibm_pi_public_network_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPIPublicNetworkDataSource_basic(t *testing.T) { + publicNetworkResData := "data.ibm_pi_public_network.testacc_ds_public_network" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPIPublicNetworkDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPIPublicNetworkDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_public_network.testacc_ds_public_network", "id"), + resource.TestCheckResourceAttrSet(publicNetworkResData, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_pvm_snapshot.go b/ibm/service/power/data_source_ibm_pi_pvm_snapshot.go index a3e6021a6c..c3e1ea5292 100644 --- a/ibm/service/power/data_source_ibm_pi_pvm_snapshot.go +++ b/ibm/service/power/data_source_ibm_pi_pvm_snapshot.go @@ -10,6 +10,7 @@ import ( "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -50,6 +51,11 @@ func DataSourceIBMPIPVMSnapshot() *schema.Resource { Description: "Date of snapshot creation.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_Description: { Computed: true, Description: "The description of the snapshot.", @@ -80,6 +86,13 @@ func DataSourceIBMPIPVMSnapshot() *schema.Resource { Description: "The status of the Power Virtual Machine instance snapshot.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VolumeSnapshots: { Computed: true, Description: "A map of volume snapshots included in the Power Virtual Machine instance snapshot.", @@ -109,12 +122,12 @@ func dataSourceIBMPISnapshotRead(ctx context.Context, d *schema.ResourceData, me var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set(Attr_PVMSnapshots, flattenPVMSnapshotInstances(snapshotData.Snapshots)) + d.Set(Attr_PVMSnapshots, flattenPVMSnapshotInstances(snapshotData.Snapshots, meta)) return nil } -func flattenPVMSnapshotInstances(list []*models.Snapshot) []map[string]interface{} { +func flattenPVMSnapshotInstances(list []*models.Snapshot, meta interface{}) []map[string]interface{} { log.Printf("Calling the flattenPVMSnapshotInstances call with list %d", len(list)) result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { @@ -129,6 +142,14 @@ func flattenPVMSnapshotInstances(list []*models.Snapshot) []map[string]interface Attr_Status: i.Status, Attr_VolumeSnapshots: i.VolumeSnapshots, } + if i.Crn != "" { + l[Attr_CRN] = i.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(i.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi pvm snapshot (%s) user_tags: %s", *i.SnapshotID, err) + } + l[Attr_UserTags] = tags + } result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_pvm_snapshot_test.go b/ibm/service/power/data_source_ibm_pi_pvm_snapshot_test.go index e5c502c1cb..0394b660a7 100644 --- a/ibm/service/power/data_source_ibm_pi_pvm_snapshot_test.go +++ b/ibm/service/power/data_source_ibm_pi_pvm_snapshot_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPISnapshotDataSource_basic(t *testing.T) { + snapshotRes := "data.ibm_pi_pvm_snapshots.testacc_pi_snapshots" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPISnapshotDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPISnapshotDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_pvm_snapshots.testacc_pi_snapshots", "id"), + resource.TestCheckResourceAttrSet(snapshotRes, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_sap_profile.go b/ibm/service/power/data_source_ibm_pi_sap_profile.go index 1f53d1ad72..a1e04e803f 100644 --- a/ibm/service/power/data_source_ibm_pi_sap_profile.go +++ b/ibm/service/power/data_source_ibm_pi_sap_profile.go @@ -43,16 +43,42 @@ func DataSourceIBMPISAPProfile() *schema.Resource { Description: "Amount of cores.", Type: schema.TypeInt, }, + Attr_FullSystemProfile: { + Computed: true, + Description: "Requires full system for deployment.", + Type: schema.TypeBool, + }, Attr_Memory: { Computed: true, Description: "Amount of memory (in GB).", Type: schema.TypeInt, }, + Attr_SAPS: { + Computed: true, + Description: "SAP Application Performance Standard", + Type: schema.TypeInt, + }, + Attr_SupportedSystems: { + Computed: true, + Description: "List of supported systems.", + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, Attr_Type: { Computed: true, Description: "Type of profile.", Type: schema.TypeString, }, + Attr_WorkloadType: { + Computed: true, + Description: "Workload Type.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Type: schema.TypeList, + }, }, } } @@ -76,8 +102,12 @@ func dataSourceIBMPISAPProfileRead(ctx context.Context, d *schema.ResourceData, d.SetId(*sapProfile.ProfileID) d.Set(Attr_Certified, *sapProfile.Certified) d.Set(Attr_Cores, *sapProfile.Cores) + d.Set(Attr_FullSystemProfile, sapProfile.FullSystemProfile) d.Set(Attr_Memory, *sapProfile.Memory) + d.Set(Attr_SAPS, sapProfile.Saps) + d.Set(Attr_SupportedSystems, sapProfile.SupportedSystems) d.Set(Attr_Type, *sapProfile.Type) + d.Set(Attr_WorkloadType, *&sapProfile.WorkloadTypes) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_sap_profiles.go b/ibm/service/power/data_source_ibm_pi_sap_profiles.go index a3c3c5f5a6..7517694019 100644 --- a/ibm/service/power/data_source_ibm_pi_sap_profiles.go +++ b/ibm/service/power/data_source_ibm_pi_sap_profiles.go @@ -43,6 +43,11 @@ func DataSourceIBMPISAPProfiles() *schema.Resource { Description: "Amount of cores.", Type: schema.TypeInt, }, + Attr_FullSystemProfile: { + Computed: true, + Description: "Requires full system for deployment.", + Type: schema.TypeBool, + }, Attr_Memory: { Computed: true, Description: "Amount of memory (in GB).", @@ -53,11 +58,32 @@ func DataSourceIBMPISAPProfiles() *schema.Resource { Description: "SAP Profile ID.", Type: schema.TypeString, }, + Attr_SAPS: { + Computed: true, + Description: "SAP Application Performance Standard", + Type: schema.TypeInt, + }, + Attr_SupportedSystems: { + Computed: true, + Description: "List of supported systems.", + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, Attr_Type: { Computed: true, Description: "Type of profile.", Type: schema.TypeString, }, + Attr_WorkloadType: { + Computed: true, + Description: "Workload Type.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Type: schema.TypeList, + }, }, }, Type: schema.TypeList, @@ -84,11 +110,15 @@ func dataSourceIBMPISAPProfilesRead(ctx context.Context, d *schema.ResourceData, result := make([]map[string]interface{}, 0, len(sapProfiles.Profiles)) for _, sapProfile := range sapProfiles.Profiles { profile := map[string]interface{}{ - Attr_Certified: *sapProfile.Certified, - Attr_Cores: *sapProfile.Cores, - Attr_Memory: *sapProfile.Memory, - Attr_ProfileID: *sapProfile.ProfileID, - Attr_Type: *sapProfile.Type, + Attr_Certified: *sapProfile.Certified, + Attr_Cores: *sapProfile.Cores, + Attr_FullSystemProfile: sapProfile.FullSystemProfile, + Attr_Memory: *sapProfile.Memory, + Attr_ProfileID: *sapProfile.ProfileID, + Attr_SAPS: sapProfile.Saps, + Attr_SupportedSystems: sapProfile.SupportedSystems, + Attr_Type: *sapProfile.Type, + Attr_WorkloadType: *&sapProfile.WorkloadTypes, } result = append(result, profile) } diff --git a/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go b/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go index b6c04c4f58..dc22966c84 100644 --- a/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go +++ b/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go @@ -5,9 +5,11 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -42,6 +44,11 @@ func DataSourceIBMPISharedProcessorPool() *schema.Resource { Description: "The available cores in the shared processor pool.", Type: schema.TypeFloat, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_HostID: { Computed: true, Description: "The host ID where the shared processor pool resides.", @@ -124,6 +131,13 @@ func DataSourceIBMPISharedProcessorPool() *schema.Resource { Description: "The status details of the shared processor pool.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, }, } } @@ -146,6 +160,17 @@ func dataSourceIBMPISharedProcessorPoolRead(ctx context.Context, d *schema.Resou d.SetId(*response.SharedProcessorPool.ID) d.Set(Attr_AllocatedCores, response.SharedProcessorPool.AllocatedCores) d.Set(Attr_AvailableCores, response.SharedProcessorPool.AvailableCores) + if response.SharedProcessorPool.Crn != "" { + d.Set(Attr_CRN, response.SharedProcessorPool.Crn) + if response.SharedProcessorPool.Crn != "" { + d.Set(Attr_CRN, response.SharedProcessorPool.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(response.SharedProcessorPool.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi shared_processor_pool (%s) user_tags: %s", *response.SharedProcessorPool.ID, err) + } + d.Set(Attr_UserTags, tags) + } + } d.Set(Attr_HostID, response.SharedProcessorPool.HostID) d.Set(Attr_Name, response.SharedProcessorPool.Name) d.Set(Attr_ReservedCores, response.SharedProcessorPool.ReservedCores) diff --git a/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go b/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go index 74ccf37d22..021b095d30 100644 --- a/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go +++ b/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go @@ -5,9 +5,11 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -42,6 +44,11 @@ func DataSourceIBMPISharedProcessorPools() *schema.Resource { Description: "The available cores in the shared processor pool.", Type: schema.TypeInt, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_HostID: { Computed: true, Description: "The host ID where the shared processor pool resides.", @@ -72,6 +79,13 @@ func DataSourceIBMPISharedProcessorPools() *schema.Resource { Description: "The status details of the shared processor pool.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, }, }, Type: schema.TypeList, @@ -106,6 +120,14 @@ func dataSourceIBMPISharedProcessorPoolsRead(ctx context.Context, d *schema.Reso Attr_Status: pool.Status, Attr_StatusDetail: pool.StatusDetail, } + if pool.Crn != "" { + key[Attr_CRN] = pool.Crn + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(pool.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi shared_processor_pool (%s) user_tags: %s", *pool.ID, err) + } + key[Attr_UserTags] = tags + } result = append(result, key) } diff --git a/ibm/service/power/data_source_ibm_pi_storage_tiers.go b/ibm/service/power/data_source_ibm_pi_storage_tiers.go new file mode 100644 index 0000000000..c6a1bcfe27 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_storage_tiers.go @@ -0,0 +1,100 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/go-uuid" +) + +func DataSourceIBMPIStorageTiers() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPIStorageTiersRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + // Attributes + Attr_RegionStorageTiers: { + Computed: true, + Description: "An array of of storage tiers supported in a region.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Description: { + Computed: true, + Description: "Description of the storage tier label.", + Type: schema.TypeString, + }, + Attr_Name: { + Computed: true, + Description: "Name of the storage tier.", + Type: schema.TypeString, + }, + Attr_State: { + Computed: true, + Description: "State of the storage tier (active or inactive).", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + }, + } +} + +func dataSourceIBMPIStorageTiersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + + client := instance.NewIBMPIStorageTierClient(ctx, sess, cloudInstanceID) + rst, err := client.GetAll() + if err != nil { + return diag.FromErr(err) + } + var genID, _ = uuid.GenerateUUID() + d.SetId(genID) + + regionStorageTiers := []map[string]interface{}{} + if len(rst) > 0 { + for _, storageTier := range rst { + regionStorageTier := storageTierToMap(storageTier) + regionStorageTiers = append(regionStorageTiers, regionStorageTier) + } + } + d.Set(Attr_RegionStorageTiers, regionStorageTiers) + + return nil +} + +func storageTierToMap(storageTier *models.StorageTier) map[string]interface{} { + storageTierMap := make(map[string]interface{}) + if storageTier.Description != "" { + storageTierMap[Attr_Description] = storageTier.Description + } + if storageTier.Name != "" { + storageTierMap[Attr_Name] = storageTier.Name + } + if storageTier.State != nil { + storageTierMap[Attr_State] = storageTier.State + } + return storageTierMap +} diff --git a/ibm/service/power/data_source_ibm_pi_storage_tiers_test.go b/ibm/service/power/data_source_ibm_pi_storage_tiers_test.go new file mode 100644 index 0000000000..143d6f95b3 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_storage_tiers_test.go @@ -0,0 +1,35 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMPIStorageTiersDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIStorageTiersDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_storage_tiers.storage_tiers", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIStorageTiersDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_storage_tiers" "storage_tiers" { + pi_cloud_instance_id = "%s" + }`, acc.Pi_cloud_instance_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_volume.go b/ibm/service/power/data_source_ibm_pi_volume.go index 6e1bf43c8d..1efe5607e4 100644 --- a/ibm/service/power/data_source_ibm_pi_volume.go +++ b/ibm/service/power/data_source_ibm_pi_volume.go @@ -5,9 +5,11 @@ package power import ( "context" + "log" "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -47,16 +49,31 @@ func DataSourceIBMPIVolume() *schema.Resource { Description: "Indicates if the volume is boot capable.", Type: schema.TypeBool, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_ConsistencyGroupName: { Computed: true, Description: "Consistency group name if volume is a part of volume group.", Type: schema.TypeString, }, + Attr_CreationDate: { + Computed: true, + Description: "Date volume was created.", + Type: schema.TypeString, + }, Attr_DiskType: { Computed: true, Description: "The disk type that is used for the volume.", Type: schema.TypeString, }, + Attr_FreezeTime: { + Computed: true, + Description: "The freeze time of remote copy.", + Type: schema.TypeString, + }, Attr_GroupID: { Computed: true, Description: "The volume group id in which the volume belongs.", @@ -67,6 +84,11 @@ func DataSourceIBMPIVolume() *schema.Resource { Description: "Amount of iops assigned to the volume", Type: schema.TypeString, }, + Attr_LastUpdateDate: { + Computed: true, + Description: "The last updated date of the volume.", + Type: schema.TypeString, + }, Attr_MasterVolumeName: { Computed: true, Description: "The master volume name.", @@ -87,6 +109,12 @@ func DataSourceIBMPIVolume() *schema.Resource { Description: "Indicates if the volume should be replication enabled or not.", Type: schema.TypeBool, }, + Attr_ReplicationSites: { + Computed: true, + Description: "List of replication sites for volume replication.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_ReplicationStatus: { Computed: true, Description: "The replication status of the volume.", @@ -112,6 +140,13 @@ func DataSourceIBMPIVolume() *schema.Resource { Description: "The state of the volume.", Type: schema.TypeString, }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, Attr_VolumePool: { Computed: true, Description: "Volume pool, name of storage pool where the volume is located.", @@ -144,14 +179,30 @@ func dataSourceIBMPIVolumeRead(ctx context.Context, d *schema.ResourceData, meta d.Set(Attr_AuxiliaryVolumeName, volumedata.AuxVolumeName) d.Set(Attr_Bootable, volumedata.Bootable) d.Set(Attr_ConsistencyGroupName, volumedata.ConsistencyGroupName) + d.Set(Attr_CreationDate, volumedata.CreationDate.String()) + if volumedata.Crn != "" { + d.Set(Attr_CRN, volumedata.Crn) + tags, err := flex.GetTagsUsingCRN(meta, string(volumedata.Crn)) + if err != nil { + log.Printf("Error on get of pi volume (%s) user_tags: %s", *volumedata.VolumeID, err) + } + d.Set(Attr_UserTags, tags) + } d.Set(Attr_DiskType, volumedata.DiskType) + if volumedata.FreezeTime != nil { + d.Set(Attr_FreezeTime, volumedata.FreezeTime.String()) + } d.Set(Attr_GroupID, volumedata.GroupID) d.Set(Attr_IOThrottleRate, volumedata.IoThrottleRate) + d.Set(Attr_LastUpdateDate, volumedata.LastUpdateDate.String()) d.Set(Attr_MasterVolumeName, volumedata.MasterVolumeName) d.Set(Attr_MirroringState, volumedata.MirroringState) d.Set(Attr_PrimaryRole, volumedata.PrimaryRole) d.Set(Attr_ReplicationEnabled, volumedata.ReplicationEnabled) d.Set(Attr_ReplicationType, volumedata.ReplicationType) + if len(volumedata.ReplicationSites) > 0 { + d.Set(Attr_ReplicationSites, volumedata.ReplicationSites) + } d.Set(Attr_ReplicationStatus, volumedata.ReplicationStatus) d.Set(Attr_State, volumedata.State) d.Set(Attr_Shareable, volumedata.Shareable) diff --git a/ibm/service/power/data_source_ibm_pi_volume_group.go b/ibm/service/power/data_source_ibm_pi_volume_group.go index daddb0709a..8ee773b904 100644 --- a/ibm/service/power/data_source_ibm_pi_volume_group.go +++ b/ibm/service/power/data_source_ibm_pi_volume_group.go @@ -33,11 +33,22 @@ func DataSourceIBMPIVolumeGroup() *schema.Resource { }, // Attributes + Attr_Auxiliary: { + Computed: true, + Description: "Indicates if the volume is auxiliary or not.", + Type: schema.TypeBool, + }, Attr_ConsistencyGroupName: { Computed: true, Description: "The name of consistency group at storage controller level.", Type: schema.TypeString, }, + Attr_ReplicationSites: { + Computed: true, + Description: "Indicates the replication sites of the volume group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_ReplicationStatus: { Computed: true, Description: "The replication status of volume group.", @@ -73,6 +84,11 @@ func DataSourceIBMPIVolumeGroup() *schema.Resource { }, Type: schema.TypeSet, }, + Attr_StoragePool: { + Computed: true, + Description: "Indicates the storage pool of the volume group", + Type: schema.TypeString, + }, Attr_VolumeGroupName: { Computed: true, Description: "The name of the volume group.", @@ -96,12 +112,17 @@ func dataSourceIBMPIVolumeGroupRead(ctx context.Context, d *schema.ResourceData, } d.SetId(*vgData.ID) + d.Set(Attr_Auxiliary, vgData.Auxiliary) d.Set(Attr_ConsistencyGroupName, vgData.ConsistencyGroupName) d.Set(Attr_ReplicationStatus, vgData.ReplicationStatus) + if len(vgData.ReplicationSites) > 0 { + d.Set(Attr_ReplicationSites, vgData.ReplicationSites) + } d.Set(Attr_Status, vgData.Status) if vgData.StatusDescription != nil { d.Set(Attr_StatusDescriptionErrors, flattenVolumeGroupStatusDescription(vgData.StatusDescription.Errors)) } + d.Set(Attr_StoragePool, vgData.StoragePool) d.Set(Attr_VolumeGroupName, vgData.Name) return nil diff --git a/ibm/service/power/data_source_ibm_pi_volume_group_details.go b/ibm/service/power/data_source_ibm_pi_volume_group_details.go index 518ef8b540..4448499baa 100644 --- a/ibm/service/power/data_source_ibm_pi_volume_group_details.go +++ b/ibm/service/power/data_source_ibm_pi_volume_group_details.go @@ -32,6 +32,11 @@ func DataSourceIBMPIVolumeGroupDetails() *schema.Resource { }, // Attributes + Attr_Auxiliary: { + Computed: true, + Description: "Indicates if the volume is auxiliary or not.", + Type: schema.TypeBool, + }, Attr_ConsistencyGroupName: { Computed: true, Description: "The name of consistency group at storage controller level.", @@ -42,6 +47,12 @@ func DataSourceIBMPIVolumeGroupDetails() *schema.Resource { Description: "The replication status of volume group.", Type: schema.TypeString, }, + Attr_ReplicationSites: { + Computed: true, + Description: "Indicates the replication sites of the volume group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_Status: { Computed: true, Description: "The status of the volume group.", @@ -72,6 +83,11 @@ func DataSourceIBMPIVolumeGroupDetails() *schema.Resource { }, Type: schema.TypeSet, }, + Attr_StoragePool: { + Computed: true, + Description: "Indicates the storage pool of the volume group", + Type: schema.TypeString, + }, Attr_VolumeIDs: { Computed: true, Description: "List of volume IDs, member of volume group.", @@ -101,12 +117,17 @@ func dataSourceIBMPIVolumeGroupDetailsRead(ctx context.Context, d *schema.Resour } d.SetId(*vgData.ID) + d.Set(Attr_Auxiliary, vgData.Auxiliary) d.Set(Attr_ConsistencyGroupName, vgData.ConsistencyGroupName) d.Set(Attr_ReplicationStatus, vgData.ReplicationStatus) + if len(vgData.ReplicationSites) > 0 { + d.Set(Attr_ReplicationSites, vgData.ReplicationSites) + } d.Set(Attr_Status, vgData.Status) if vgData.StatusDescription != nil { d.Set(Attr_StatusDescriptionErrors, flattenVolumeGroupStatusDescription(vgData.StatusDescription.Errors)) } + d.Set(Attr_StoragePool, vgData.StoragePool) d.Set(Attr_VolumeIDs, vgData.VolumeIDs) d.Set(Attr_VolumeGroupName, vgData.Name) diff --git a/ibm/service/power/data_source_ibm_pi_volume_groups.go b/ibm/service/power/data_source_ibm_pi_volume_groups.go index a704297f9e..b3c79262fa 100644 --- a/ibm/service/power/data_source_ibm_pi_volume_groups.go +++ b/ibm/service/power/data_source_ibm_pi_volume_groups.go @@ -35,6 +35,11 @@ func DataSourceIBMPIVolumeGroups() *schema.Resource { Description: "List of all volume groups.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + Attr_Auxiliary: { + Computed: true, + Description: "Indicates if the volume is auxiliary or not.", + Type: schema.TypeBool, + }, Attr_ConsistencyGroupName: { Computed: true, Description: "The name of consistency group at storage controller level.", @@ -50,6 +55,12 @@ func DataSourceIBMPIVolumeGroups() *schema.Resource { Description: "The replication status of volume group.", Type: schema.TypeString, }, + Attr_ReplicationSites: { + Computed: true, + Description: "Indicates the replication sites of the volume group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_Status: { Computed: true, Description: "The status of the volume group.", @@ -80,6 +91,11 @@ func DataSourceIBMPIVolumeGroups() *schema.Resource { }, Type: schema.TypeSet, }, + Attr_StoragePool: { + Computed: true, + Description: "Indicates the storage pool of the volume group", + Type: schema.TypeString, + }, Attr_VolumeGroupName: { Computed: true, Description: "The name of the volume group.", @@ -118,13 +134,19 @@ func flattenVolumeGroups(list []*models.VolumeGroup) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ + Attr_Auxiliary: i.Auxiliary, Attr_ConsistencyGroupName: i.ConsistencyGroupName, Attr_ID: *i.ID, Attr_ReplicationStatus: i.ReplicationStatus, Attr_Status: i.Status, Attr_StatusDescriptionErrors: flattenVolumeGroupStatusDescription(i.StatusDescription.Errors), + Attr_StoragePool: i.StoragePool, Attr_VolumeGroupName: i.Name, } + if len(i.ReplicationSites) > 0 { + l[Attr_ReplicationSites] = i.ReplicationSites + } + result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_volume_groups_details.go b/ibm/service/power/data_source_ibm_pi_volume_groups_details.go index e11c5583d4..6098962552 100644 --- a/ibm/service/power/data_source_ibm_pi_volume_groups_details.go +++ b/ibm/service/power/data_source_ibm_pi_volume_groups_details.go @@ -35,6 +35,11 @@ func DataSourceIBMPIVolumeGroupsDetails() *schema.Resource { Description: "List of all volume group.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + Attr_Auxiliary: { + Computed: true, + Description: "Indicates if the volume is auxiliary or not.", + Type: schema.TypeBool, + }, Attr_ConsistencyGroupName: { Computed: true, Description: "The name of consistency group at storage controller level.", @@ -50,11 +55,22 @@ func DataSourceIBMPIVolumeGroupsDetails() *schema.Resource { Description: "The replication status of volume group.", Type: schema.TypeString, }, + Attr_ReplicationSites: { + Computed: true, + Description: "Indicates the replication sites of the volume group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_Status: { Computed: true, Description: "The status of the volume group.", Type: schema.TypeString, }, + Attr_StoragePool: { + Computed: true, + Description: "Indicates the storage pool of the volume group", + Type: schema.TypeString, + }, Attr_StatusDescriptionErrors: { Computed: true, Description: "The status details of the volume group.", @@ -124,14 +140,20 @@ func flattenVolumeGroupsDetails(list []*models.VolumeGroupDetails) []map[string] result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ + Attr_Auxiliary: i.Auxiliary, Attr_ConsistencyGroupName: i.ConsistencyGroupName, Attr_ID: *i.ID, Attr_ReplicationStatus: i.ReplicationStatus, Attr_Status: i.Status, + Attr_StoragePool: i.StoragePool, "status_description_errors": flattenVolumeGroupStatusDescription(i.StatusDescription.Errors), Attr_VolumeGroupName: i.Name, Attr_VolumeIDs: i.VolumeIDs, } + if len(i.ReplicationSites) > 0 { + l[Attr_ReplicationSites] = i.ReplicationSites + } + result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_volume_snapshot.go b/ibm/service/power/data_source_ibm_pi_volume_snapshot.go new file mode 100644 index 0000000000..4813133561 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_volume_snapshot.go @@ -0,0 +1,98 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceIBMPIVolumeSnapshot() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPIVolumeSnapshotRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_VolumeSnapshotID: { + Description: "The volume snapshot id.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + + // Attributes + Attr_CreationDate: { + Computed: true, + Description: "The date and time when the volume snapshot was created.", + Type: schema.TypeString, + }, + Attr_CRN: { + Computed: true, + Description: "The CRN of the volume snapshot.", + Type: schema.TypeString, + }, + Attr_Name: { + Computed: true, + Description: "The volume snapshot name.", + Type: schema.TypeString, + }, + Attr_Size: { + Computed: true, + Description: "The size of the volume snapshot, in gibibytes (GiB).", + Type: schema.TypeFloat, + }, + Attr_Status: { + Computed: true, + Description: "The status for the volume snapshot.", + Type: schema.TypeString, + }, + Attr_UpdatedDate: { + Computed: true, + Description: "The date and time when the volume snapshot was last updated.", + Type: schema.TypeString, + }, + Attr_VolumeID: { + Computed: true, + Description: "The volume UUID associated with the snapshot.", + Type: schema.TypeString, + }, + }, + } +} + +func dataSourceIBMPIVolumeSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + snapshotID := d.Get(Arg_VolumeSnapshotID).(string) + + client := instance.NewIBMPISnapshotClient(ctx, sess, cloudInstanceID) + snapshot, err := client.V1VolumeSnapshotsGet(snapshotID) + if err != nil { + return diag.FromErr(err) + } + d.SetId(*snapshot.ID) + d.Set(Attr_CreationDate, snapshot.CreationDate.String()) + d.Set(Attr_CRN, snapshot.Crn) + d.Set(Attr_Name, *snapshot.Name) + d.Set(Attr_Size, *snapshot.Size) + d.Set(Attr_Status, snapshot.Status) + d.Set(Attr_UpdatedDate, snapshot.UpdatedDate.String()) + d.Set(Attr_VolumeID, *snapshot.VolumeID) + return nil +} diff --git a/ibm/service/power/data_source_ibm_pi_volume_snapshot_test.go b/ibm/service/power/data_source_ibm_pi_volume_snapshot_test.go new file mode 100644 index 0000000000..2b974cb803 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_volume_snapshot_test.go @@ -0,0 +1,37 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccIBMPIVolumeSnapshotDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIVolumeSnapshotDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_volume_snapshot.snapshot_instance", "id"), + resource.TestCheckResourceAttrSet("data.ibm_pi_volume_snapshot.snapshot_instance", "name"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIVolumeSnapshotDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_volume_snapshot" "snapshot_instance" { + pi_cloud_instance_id = "%s" + pi_volume_snapshot_id = "%s" + } + `, acc.Pi_cloud_instance_id, acc.Pi_snapshot_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_volume_snapshots.go b/ibm/service/power/data_source_ibm_pi_volume_snapshots.go new file mode 100644 index 0000000000..b648b0be24 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_volume_snapshots.go @@ -0,0 +1,120 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceIBMPIVolumeSnapshots() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPIVolumeSnapshotsRead, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + + // Attributes + Attr_VolumesSnapshots: { + Computed: true, + Description: "The list of volume snapshots.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CreationDate: { + Computed: true, + Description: "The date and time when the volume snapshot was created.", + Type: schema.TypeString, + }, + Attr_CRN: { + Computed: true, + Description: "The CRN of the volume snapshot.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The snapshot UUID.", + Type: schema.TypeString, + }, + Attr_Name: { + Computed: true, + Description: "The volume snapshot name.", + Type: schema.TypeString, + }, + Attr_Size: { + Computed: true, + Description: "The size of the volume snapshot, in gibibytes (GiB).", + Type: schema.TypeFloat, + }, + Attr_Status: { + Computed: true, + Description: "The status for the volume snapshot.", + Type: schema.TypeString, + }, + Attr_UpdatedDate: { + Computed: true, + Description: "The date and time when the volume snapshot was last updated.", + Type: schema.TypeString, + }, + Attr_VolumeID: { + Computed: true, + Description: "The volume UUID associated with the snapshot.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeSet, + }, + }, + } +} + +func dataSourceIBMPIVolumeSnapshotsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + client := instance.NewIBMPISnapshotClient(ctx, sess, cloudInstanceID) + snapshots, err := client.V1VolumeSnapshotsGetall() + if err != nil { + return diag.FromErr(err) + } + d.Set(Attr_VolumesSnapshots, flattenSnapshotsV1(snapshots.VolumeSnapshots)) + var clientgenU, _ = uuid.GenerateUUID() + d.SetId(clientgenU) + + return nil +} + +func flattenSnapshotsV1(snapshotList []*models.SnapshotV1) []map[string]interface{} { + snapshots := make([]map[string]interface{}, 0, len(snapshotList)) + for _, snap := range snapshotList { + snapshot := map[string]interface{}{ + Attr_CreationDate: snap.CreationDate.String(), + Attr_CRN: snap.Crn, + Attr_ID: *snap.ID, + Attr_Name: *snap.Name, + Attr_Size: *snap.Size, + Attr_Status: *snap.Status, + Attr_UpdatedDate: snap.UpdatedDate.String(), + Attr_VolumeID: *snap.VolumeID, + } + snapshots = append(snapshots, snapshot) + } + return snapshots +} diff --git a/ibm/service/power/data_source_ibm_pi_volume_snapshots_test.go b/ibm/service/power/data_source_ibm_pi_volume_snapshots_test.go new file mode 100644 index 0000000000..356e412652 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_volume_snapshots_test.go @@ -0,0 +1,35 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccIBMPIVolumeSnapshotsDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIVolumeSnapshotsDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_volume_snapshots.snapshots", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIVolumeSnapshotsDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_pi_volume_snapshots" "snapshots" { + pi_cloud_instance_id = "%s" + } + `, acc.Pi_cloud_instance_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_volume_test.go b/ibm/service/power/data_source_ibm_pi_volume_test.go index a3b8498b7a..f40fb2dca8 100644 --- a/ibm/service/power/data_source_ibm_pi_volume_test.go +++ b/ibm/service/power/data_source_ibm_pi_volume_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIBMPIVolumeDataSource_basic(t *testing.T) { + volumeRes := "data.ibm_pi_volume.testacc_ds_volume" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -20,7 +21,7 @@ func TestAccIBMPIVolumeDataSource_basic(t *testing.T) { { Config: testAccCheckIBMPIVolumeDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_pi_volume.testacc_ds_volume", "id"), + resource.TestCheckResourceAttrSet(volumeRes, "id"), ), }, }, diff --git a/ibm/service/power/data_source_ibm_pi_workspace.go b/ibm/service/power/data_source_ibm_pi_workspace.go index e71df4a6a1..b16abe6425 100644 --- a/ibm/service/power/data_source_ibm_pi_workspace.go +++ b/ibm/service/power/data_source_ibm_pi_workspace.go @@ -49,6 +49,20 @@ func DatasourceIBMPIWorkspace() *schema.Resource { Description: "The Workspace crn.", Type: schema.TypeString, }, + Attr_NetworkSecurityGroups: { + Computed: true, + Description: "Network security groups configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_State: { + Computed: true, + Description: "The state of a Network Security Groups configuration.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, Attr_PowerEdgeRouter: { Computed: true, Elem: &schema.Resource{ @@ -132,6 +146,13 @@ func dataSourceIBMPIWorkspaceRead(ctx context.Context, d *schema.ResourceData, m detailsData[Attr_PowerEdgeRouter] = []map[string]interface{}{wsPowerEdge} wsDetails = append(wsDetails, detailsData) } + if wsData.Details.NetworkSecurityGroups != nil { + wsNSG := map[string]interface{}{ + Attr_State: *wsData.Details.NetworkSecurityGroups.State, + } + detailsData[Attr_NetworkSecurityGroups] = []map[string]interface{}{wsNSG} + wsDetails = append(wsDetails, detailsData) + } d.Set(Attr_WorkspaceDetails, wsDetails) wsLocation := map[string]interface{}{ diff --git a/ibm/service/power/data_source_ibm_pi_workspaces.go b/ibm/service/power/data_source_ibm_pi_workspaces.go index 58851a7bec..257510342c 100644 --- a/ibm/service/power/data_source_ibm_pi_workspaces.go +++ b/ibm/service/power/data_source_ibm_pi_workspaces.go @@ -54,6 +54,20 @@ func DatasourceIBMPIWorkspaces() *schema.Resource { Description: "The Workspace crn.", Type: schema.TypeString, }, + Attr_NetworkSecurityGroups: { + Computed: true, + Description: "Network security groups configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_State: { + Computed: true, + Description: "The state of a Network Security Groups configuration.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, Attr_PowerEdgeRouter: { Computed: true, Elem: &schema.Resource{ @@ -142,6 +156,13 @@ func dataSourceIBMPIWorkspacesRead(ctx context.Context, d *schema.ResourceData, detailsData[Attr_PowerEdgeRouter] = []map[string]interface{}{wsPowerEdge} wsDetails = append(wsDetails, detailsData) } + if ws.Details.NetworkSecurityGroups != nil { + wsNSG := map[string]interface{}{ + Attr_State: *ws.Details.NetworkSecurityGroups.State, + } + detailsData[Attr_NetworkSecurityGroups] = []map[string]interface{}{wsNSG} + wsDetails = append(wsDetails, detailsData) + } workspace := map[string]interface{}{ Attr_WorkspaceCapabilities: ws.Capabilities, diff --git a/ibm/service/power/ibm_pi_constants.go b/ibm/service/power/ibm_pi_constants.go index a081df931f..aac2683c62 100644 --- a/ibm/service/power/ibm_pi_constants.go +++ b/ibm/service/power/ibm_pi_constants.go @@ -10,6 +10,7 @@ const ( Arg_AffinityVolume = "pi_affinity_volume" Arg_AntiAffinityInstances = "pi_anti_affinity_instances" Arg_AntiAffinityVolumes = "pi_anti_affinity_volumes" + Arg_BootVolumeReplicationEnabled = "pi_boot_volume_replication_enabled" Arg_Cidr = "pi_cidr" Arg_CloudConnectionID = "pi_cloud_connection_id" Arg_CloudConnectionName = "pi_cloud_connection_name" @@ -20,6 +21,7 @@ const ( Arg_DeploymentTarget = "pi_deployment_target" Arg_DeploymentType = "pi_deployment_type" Arg_Description = "pi_description" + Arg_DestinationPorts = "pi_destination_ports" Arg_DhcpID = "pi_dhcp_id" Arg_DhcpName = "pi_dhcp_name" Arg_DhcpSnatEnabled = "pi_dhcp_snat_enabled" @@ -37,6 +39,7 @@ const ( Arg_ImageName = "pi_image_name" Arg_InstanceID = "pi_instance_id" Arg_InstanceName = "pi_instance_name" + Arg_IPAddress = "pi_ip_address" Arg_Key = "pi_ssh_key" Arg_KeyName = "pi_key_name" Arg_KeyPairName = "pi_key_pair_name" @@ -45,7 +48,14 @@ const ( Arg_Memory = "pi_memory" Arg_Name = "pi_name" Arg_Network = "pi_network" + Arg_NetworkAddressGroupID = "pi_network_address_group_id" + Arg_NetworkAddressGroupMemberID = "pi_network_address_group_member_id" + Arg_NetworkID = "pi_network_id" + Arg_NetworkInterfaceID = "pi_network_interface_id" Arg_NetworkName = "pi_network_name" + Arg_NetworkSecurityGroupID = "pi_network_security_group_id" + Arg_NetworkSecurityGroupMemberID = "pi_network_security_group_member_id" + Arg_NetworkSecurityGroupRuleID = "pi_network_security_group_rule_id" Arg_PinPolicy = "pi_pin_policy" Arg_PlacementGroupID = "pi_placement_group_id" Arg_PlacementGroupName = "pi_placement_group_name" @@ -53,14 +63,17 @@ const ( Arg_Plan = "pi_plan" Arg_Processors = "pi_processors" Arg_ProcType = "pi_proc_type" + Arg_Protocol = "pi_protocol" Arg_PVMInstanceActionType = "pi_action" Arg_PVMInstanceHealthStatus = "pi_health_status" Arg_PVMInstanceId = "pi_instance_id" + Arg_Remote = "pi_remote" Arg_Remove = "pi_remove" Arg_Replicants = "pi_replicants" Arg_ReplicationEnabled = "pi_replication_enabled" Arg_ReplicationPolicy = "pi_replication_policy" Arg_ReplicationScheme = "pi_replication_scheme" + Arg_ReplicationSites = "pi_replication_sites" Arg_ResourceGroupID = "pi_resource_group_id" Arg_SAP = "sap" Arg_SAPDeploymentType = "pi_sap_deployment_type" @@ -74,6 +87,7 @@ const ( Arg_SharedProcessorPoolReservedCores = "pi_shared_processor_pool_reserved_cores" Arg_SnapshotID = "pi_snapshot_id" Arg_SnapShotName = "pi_snap_shot_name" + Arg_SourcePorts = "pi_source_ports" Arg_SPPPlacementGroupID = "pi_spp_placement_group_id" Arg_SPPPlacementGroupName = "pi_spp_placement_group_name" Arg_SPPPlacementGroupPolicy = "pi_spp_placement_group_policy" @@ -83,8 +97,11 @@ const ( Arg_StoragePoolAffinity = "pi_storage_pool_affinity" Arg_StorageType = "pi_storage_type" Arg_SysType = "pi_sys_type" + Arg_Target = "pi_target" Arg_TargetStorageTier = "pi_target_storage_tier" + Arg_Type = "pi_type" Arg_UserData = "pi_user_data" + Arg_UserTags = "pi_user_tags" Arg_VirtualCoresAssigned = "pi_virtual_cores_assigned" Arg_VirtualOpticalDevice = "pi_virtual_optical_device" Arg_VolumeCloneName = "pi_volume_clone_name" @@ -98,6 +115,7 @@ const ( Arg_VolumePool = "pi_volume_pool" Arg_VolumeShareable = "pi_volume_shareable" Arg_VolumeSize = "pi_volume_size" + Arg_VolumeSnapshotID = "pi_volume_snapshot_id" Arg_VolumeType = "pi_volume_type" Arg_VTL = "vtl" @@ -112,6 +130,7 @@ const ( Attr_Addresses = "addresses" Attr_AllocatedCores = "allocated_cores" Attr_Architecture = "architecture" + Attr_AsynchronousReplication = "asynchronous_replication" Attr_Auxiliary = "auxiliary" Attr_AuxiliaryChangedVolumeName = "auxiliary_changed_volume_name" Attr_AuxiliaryVolumeName = "auxiliary_volume_name" @@ -123,6 +142,7 @@ const ( Attr_Bootable = "bootable" Attr_BootVolumeID = "boot_volume_id" Attr_Capabilities = "capabilities" + Attr_CapabilityDetails = "capability_details" Attr_Capacity = "capacity" Attr_Certified = "certified" Attr_CIDR = "cidr" @@ -156,13 +176,17 @@ const ( Attr_Datacenters = "datacenters" Attr_DatacenterStatus = "pi_datacenter_status" Attr_DatacenterType = "pi_datacenter_type" + Attr_Dedicated = "dedicated" Attr_Default = "default" Attr_DeleteOnTermination = "delete_on_termination" Attr_DeploymentType = "deployment_type" Attr_Description = "description" + Attr_DestinationPort = "destination_port" Attr_Details = "details" Attr_DhcpID = "dhcp_id" Attr_DhcpManaged = "dhcp_managed" + Attr_Direction = "direction" + Attr_DisasterRecovery = "disaster_recovery" Attr_DisasterRecoveryLocations = "disaster_recovery_locations" Attr_DiskFormat = "disk_format" Attr_DiskType = "disk_type" @@ -174,11 +198,13 @@ const ( Attr_FailureMessage = "failure_message" Attr_FailureReason = "failure_reason" Attr_Fault = "fault" + Attr_Flag = "flag" Attr_FlashCopyMappings = "flash_copy_mappings" Attr_FlashCopyName = "flash_copy_name" Attr_FreezeTime = "freeze_time" Attr_FullSystemProfile = "full_system_profile" Attr_Gateway = "gateway" + Attr_General = "general" Attr_GlobalRouting = "global_routing" Attr_GreDestinationAddress = "gre_destination_address" Attr_GreSourceAddress = "gre_source_address" @@ -197,18 +223,21 @@ const ( Attr_IBMiPHA = "ibmi_pha" Attr_IBMiRDS = "ibmi_rds" Attr_IBMiRDSUsers = "ibmi_rds_users" + Attr_ICMPType = "icmp_type" Attr_ID = "id" Attr_ImageID = "image_id" Attr_ImageInfo = "image_info" Attr_Images = "images" Attr_ImageType = "image_type" Attr_InputVolumes = "input_volumes" + Attr_Instance = "instance" Attr_InstanceID = "instance_id" Attr_InstanceIP = "instance_ip" Attr_InstanceMac = "instance_mac" Attr_Instances = "instances" Attr_InstanceSnapshots = "instance_snapshots" Attr_InstanceVolumes = "instance_volumes" + Attr_Interfaces = "interfaces" Attr_IOThrottleRate = "io_throttle_rate" Attr_IP = "ip" Attr_IPAddress = "ip_address" @@ -235,6 +264,7 @@ const ( Attr_MaxAllocationSize = "max_allocation_size" Attr_MaxAvailable = "max_available" Attr_MaxCoresAvailable = "max_cores_available" + Attr_Maximum = "maximum" Attr_MaximumStorageAllocation = "max_storage_allocation" Attr_MaxMem = "maxmem" Attr_MaxMemory = "max_memory" @@ -248,6 +278,7 @@ const ( Attr_Metered = "metered" Attr_MigrationStatus = "migration_status" Attr_Min = "min" + Attr_Minimum = "minimum" Attr_MinMem = "minmem" Attr_MinMemory = "min_memory" Attr_MinProc = "minproc" @@ -256,10 +287,17 @@ const ( Attr_MirroringState = "mirroring_state" Attr_MTU = "mtu" Attr_Name = "name" + Attr_NetworkAddressGroupID = "network_address_group_id" + Attr_NetworkAddressGroupMemberID = "network_address_group_member_id" + Attr_NetworkAddressGroups = "network_address_groups" Attr_NetworkID = "network_id" + Attr_NetworkInterfaceID = "network_interface_id" Attr_NetworkName = "network_name" Attr_NetworkPorts = "network_ports" Attr_Networks = "networks" + Attr_NetworkSecurityGroupID = "network_security_group_id" + Attr_NetworkSecurityGroupMemberID = "network_security_group_member_id" + Attr_NetworkSecurityGroups = "network_security_groups" Attr_NumberOfVolumes = "number_of_volumes" Attr_Onboardings = "onboardings" Attr_OperatingSystem = "operating_system" @@ -282,15 +320,21 @@ const ( Attr_ProfileID = "profile_id" Attr_Profiles = "profiles" Attr_Progress = "progress" + Attr_Protocol = "protocol" Attr_PublicIP = "public_ip" Attr_PVMInstanceID = "pvm_instance_id" Attr_PVMInstances = "pvm_instances" Attr_PVMSnapshots = "pvm_snapshots" Attr_Region = "region" + Attr_RegionStorageTiers = "region_storage_tiers" + Attr_Remote = "remote" Attr_RemoteCopyID = "remote_copy_id" Attr_RemoteCopyRelationshipNames = "remote_copy_relationship_names" Attr_RemoteCopyRelationships = "remote_copy_relationships" + Attr_RemotePool = "remote_pool" Attr_ReplicationEnabled = "replication_enabled" + Attr_ReplicationPoolMap = "replication_pool_map" + Attr_ReplicationServices = "replication_services" Attr_ReplicationSites = "replication_sites" Attr_ReplicationStatus = "replication_status" Attr_ReplicationType = "replication_type" @@ -299,6 +343,7 @@ const ( Attr_ReservedMemory = "reserved_memory" Attr_ResultsOnboardedVolumes = "results_onboarded_volumes" Attr_ResultsVolumeOnboardingFailures = "results_volume_onboarding_failures" + Attr_Rules = "rules" Attr_SAPS = "saps" Attr_Secondaries = "secondaries" Attr_ServerName = "server_name" @@ -325,6 +370,8 @@ const ( Attr_SharedProcessorPoolStatusDetail = "status_detail" Attr_Size = "size" Attr_SnapshotID = "snapshot_id" + Attr_SourceChecksum = "source_checksum" + Attr_SourcePort = "source_port" Attr_SourceVolumeID = "source_volume_id" Attr_SourceVolumeName = "source_volume_name" Attr_Speed = "speed" @@ -339,6 +386,7 @@ const ( Attr_Status = "status" Attr_StatusDescriptionErrors = "status_description_errors" Attr_StatusDetail = "status_detail" + Attr_StorageConnection = "storage_connection" Attr_StoragePool = "storage_pool" Attr_StoragePoolAffinity = "storage_pool_affinity" Attr_StoragePoolsCapacity = "storage_pools_capacity" @@ -346,13 +394,17 @@ const ( Attr_StorageTypesCapacity = "storage_types_capacity" Attr_SupportedSystems = "supported_systems" Attr_Synchronized = "synchronized" + Attr_SynchronousReplication = "synchronous_replication" Attr_SystemPoolName = "system_pool_name" Attr_SystemPools = "system_pools" Attr_Systems = "systems" Attr_SysType = "sys_type" Attr_Systype = "systype" + Attr_Target = "target" + Attr_TargetLocations = "target_locations" Attr_TargetVolumeName = "target_volume_name" Attr_TaskID = "task_id" + Attr_TCPFlags = "tcp_flags" Attr_TenantID = "tenant_id" Attr_TenantName = "tenant_name" Attr_TotalCapacity = "total_capacity" @@ -365,6 +417,7 @@ const ( Attr_TotalStandardStorageConsumed = "total_standard_storage_consumed" Attr_Type = "type" Attr_Uncapped = "uncapped" + Attr_UpdatedDate = "updated_date" Attr_URL = "url" Attr_UsedCore = "used_core" Attr_UsedIPCount = "used_ip_count" @@ -385,6 +438,7 @@ const ( Attr_VolumePool = "volume_pool" Attr_Volumes = "volumes" Attr_VolumeSnapshots = "volume_snapshots" + Attr_VolumesSnapshots = "volume_snapshots" Attr_VolumeStatus = "volume_status" Attr_VPCCRNs = "vpc_crns" Attr_VPCEnabled = "vpc_enabled" @@ -418,6 +472,8 @@ const ( // Allowed Values Affinity = "affinity" + All = "all" + Allow = "allow" AntiAffinity = "anti-affinity" Attach = "attach" BYOL = "byol" @@ -425,15 +481,28 @@ const ( Critical = "CRITICAL" CUSTOM_VIRTUAL_CORES = "custom-virtualcores" Dedicated = "dedicated" + DefaultNAG = "default-network-address-group" + Deny = "deny" DeploymentTypeEpic = "EPIC" DeploymentTypeVMNoStorage = "VMNoStorage" + DestinationUnreach = "destination-unreach" DHCPVlan = "dhcp-vlan" + Disable = "disable" + Echo = "echo" + EchoReply = "echo-reply" + Enable = "enable" Hana = "Hana" Hard = "hard" Host = "host" HostGroup = "hostGroup" + ICMP = "icmp" + IPV4_Address = "ipv4-address" + NAG = "network-address-group" + MaxVolumeSupport = "maxVolumeSupport" Netweaver = "Netweaver" + Network_Interface = "network-interface" None = "none" + NSG = "network-security-group" OK = "OK" PER = "power-edge-router" Prefix = "prefix" @@ -443,7 +512,11 @@ const ( SAP = "SAP" Shared = "shared" Soft = "soft" + SourceQuench = "source-quench" Suffix = "suffix" + TCP = "tcp" + TimeExceeded = "time-exceeded" + UDP = "udp" UserTagType = "user" Vlan = "vlan" vSCSI = "vSCSI" @@ -485,12 +558,13 @@ const ( State_PendingReclamation = "pending_reclamation" State_Provisioning = "provisioning" State_Removed = "removed" + State_Removing = "removing" State_Resize = "resize" State_RESIZE = "RESIZE" State_Retry = "retry" State_Shutoff = "shutoff" - State_Stopping = "stopping" State_SHUTOFF = "SHUTOFF" + State_Stopping = "stopping" State_Up = "up" State_Updating = "updating" State_VerifyResize = "verify_resize" diff --git a/ibm/service/power/resource_ibm_pi_capture.go b/ibm/service/power/resource_ibm_pi_capture.go index ed0b73d15e..107a432da9 100644 --- a/ibm/service/power/resource_ibm_pi_capture.go +++ b/ibm/service/power/resource_ibm_pi_capture.go @@ -29,11 +29,13 @@ func ResourceIBMPICapture() *schema.Resource { CreateContext: resourceIBMPICaptureCreate, ReadContext: resourceIBMPICaptureRead, DeleteContext: resourceIBMPICaptureDelete, + UpdateContext: resourceIBMPICaptureUpdate, Importer: &schema.ResourceImporter{}, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(75 * time.Minute), Delete: schema.DefaultTimeout(50 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -104,7 +106,19 @@ func ResourceIBMPICapture() *schema.Resource { ForceNew: true, Description: "Cloud Storage Image Path (bucket-name [/folder/../..])", }, + Arg_UserTags: { + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, // Computed Attribute + Attr_CRN: { + Computed: true, + Description: "The CRN of the resource.", + Type: schema.TypeString, + }, "image_id": { Type: schema.TypeString, Computed: true, @@ -161,6 +175,10 @@ func resourceIBMPICaptureCreate(ctx context.Context, d *schema.ResourceData, met } } + if v, ok := d.GetOk(Arg_UserTags); ok { + captureBody.UserTags = flex.FlattenSet(v.(*schema.Set)) + } + captureResponse, err := client.CaptureInstanceToImageCatalogV2(name, captureBody) if err != nil { @@ -173,6 +191,22 @@ func resourceIBMPICaptureCreate(ctx context.Context, d *schema.ResourceData, met if err != nil { return diag.FromErr(err) } + + if _, ok := d.GetOk(Arg_UserTags); ok && capturedestination != cloudStorageDestination { + imageClient := st.NewIBMPIImageClient(ctx, sess, cloudInstanceID) + imagedata, err := imageClient.Get(capturename) + if err != nil { + log.Printf("Error on get of ibm pi capture (%s) while applying pi_user_tags: %s", capturename, err) + } + if imagedata.Crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err = flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(imagedata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi capture (%s) pi_user_tags during creation: %s", *imagedata.ImageID, err) + } + } + } + return resourceIBMPICaptureRead(ctx, d, meta) } @@ -197,12 +231,20 @@ func resourceIBMPICaptureRead(ctx context.Context, d *schema.ResourceData, meta case *p_cloud_images.PcloudCloudinstancesImagesGetNotFound: log.Printf("[DEBUG] image does not exist %v", err) d.SetId("") - return nil + return diag.Errorf("image does not exist %v", err) } log.Printf("[DEBUG] get image failed %v", err) return diag.FromErr(err) } imageid := *imagedata.ImageID + if imagedata.Crn != "" { + d.Set(Attr_CRN, imagedata.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(imagedata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of ibm pi capture (%s) pi_user_tags: %s", *imagedata.ImageID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set("image_id", imageid) } d.Set(helpers.PICloudInstanceId, cloudInstanceID) @@ -239,3 +281,24 @@ func resourceIBMPICaptureDelete(ctx context.Context, d *schema.ResourceData, met d.SetId("") return nil } + +func resourceIBMPICaptureUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + captureID := parts[1] + capturedestination := parts[2] + + if capturedestination != cloudStorageDestination && d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi capture (%s) pi_user_tags: %s", captureID, err) + } + } + } + + return resourceIBMPICaptureRead(ctx, d, meta) +} diff --git a/ibm/service/power/resource_ibm_pi_capture_test.go b/ibm/service/power/resource_ibm_pi_capture_test.go index 5e69dc801b..e66401efa6 100644 --- a/ibm/service/power/resource_ibm_pi_capture_test.go +++ b/ibm/service/power/resource_ibm_pi_capture_test.go @@ -53,7 +53,43 @@ func TestAccIBMPICaptureWithVolume(t *testing.T) { resource.TestCheckResourceAttr(captureRes, "pi_capture_name", name), resource.TestCheckResourceAttrSet(captureRes, "image_id"), ), - ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccIBMPICaptureUserTags(t *testing.T) { + captureRes := "ibm_pi_capture.capture_instance" + name := fmt.Sprintf("tf-pi-capture-%d", acctest.RandIntRange(10, 100)) + userTagsString := `["env:dev", "test_tag"]` + userTagsStringUpdated := `["env:dev", "test_tag","test_tag2"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPICaptureDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPICaptureUserTagsConfig(name, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPICaptureExists(captureRes), + resource.TestCheckResourceAttr(captureRes, "pi_capture_name", name), + resource.TestCheckResourceAttrSet(captureRes, "image_id"), + resource.TestCheckResourceAttr(captureRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(captureRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(captureRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPICaptureUserTagsConfig(name, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPICaptureExists(captureRes), + resource.TestCheckResourceAttr(captureRes, "pi_capture_name", name), + resource.TestCheckResourceAttrSet(captureRes, "image_id"), + resource.TestCheckResourceAttr(captureRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(captureRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(captureRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(captureRes, "pi_user_tags.*", "test_tag2"), + ), }, }, }) @@ -177,6 +213,18 @@ func testAccCheckIBMPICaptureConfigBasic(name string) string { `, acc.Pi_cloud_instance_id, name, acc.Pi_instance_name) } +func testAccCheckIBMPICaptureUserTagsConfig(name string, userTagsString string) string { + return fmt.Sprintf(` + resource "ibm_pi_capture" "capture_instance" { + pi_cloud_instance_id="%[1]s" + pi_capture_name = "%s" + pi_instance_name = "%s" + pi_capture_destination = "image-catalog" + pi_user_tags = %s + } + `, acc.Pi_cloud_instance_id, name, acc.Pi_instance_name, userTagsString) +} + func testAccCheckIBMPICaptureCloudStorageConfig(name string) string { return fmt.Sprintf(` resource "ibm_pi_capture" "capture_instance" { diff --git a/ibm/service/power/resource_ibm_pi_image.go b/ibm/service/power/resource_ibm_pi_image.go index e42f54ff3a..1814bbd06a 100644 --- a/ibm/service/power/resource_ibm_pi_image.go +++ b/ibm/service/power/resource_ibm_pi_image.go @@ -29,11 +29,13 @@ func ResourceIBMPIImage() *schema.Resource { CreateContext: resourceIBMPIImageCreate, ReadContext: resourceIBMPIImageRead, DeleteContext: resourceIBMPIImageDelete, + UpdateContext: resourceIBMPIImageUpdate, Importer: &schema.ResourceImporter{}, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), Delete: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -188,8 +190,20 @@ func ResourceIBMPIImage() *schema.Resource { }, }, }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, // Computed Attribute + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, "image_id": { Type: schema.TypeString, Computed: true, @@ -219,6 +233,9 @@ func resourceIBMPIImageCreate(ctx context.Context, d *schema.ResourceData, meta ImageID: imageid, Source: &source, } + if tags, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } imageResponse, err := client.Create(body) if err != nil { return diag.FromErr(err) @@ -232,6 +249,16 @@ func resourceIBMPIImageCreate(ctx context.Context, d *schema.ResourceData, meta log.Printf("[DEBUG] err %s", err) return diag.FromErr(err) } + + if _, ok := d.GetOk(Arg_UserTags); ok { + if imageResponse.Crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(imageResponse.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi image (%s) pi_user_tags during creation: %s", *IBMPIImageID, err) + } + } + } } // COS image import @@ -240,6 +267,7 @@ func resourceIBMPIImageCreate(ctx context.Context, d *schema.ResourceData, meta bucketImageFileName := d.Get(helpers.PIImageBucketFileName).(string) bucketRegion := d.Get(helpers.PIImageBucketRegion).(string) bucketAccess := d.Get(helpers.PIImageBucketAccess).(string) + body := &models.CreateCosImageImportJob{ ImageName: &imageName, BucketName: &bucketName, @@ -297,6 +325,9 @@ func resourceIBMPIImageCreate(ctx context.Context, d *schema.ResourceData, meta } body.ImportDetails = &importDetailsModel } + if tags, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } imageResponse, err := client.CreateCosImage(body) if err != nil { return diag.FromErr(err) @@ -313,6 +344,16 @@ func resourceIBMPIImageCreate(ctx context.Context, d *schema.ResourceData, meta if err != nil { return diag.FromErr(err) } + + if _, ok := d.GetOk(Arg_UserTags); ok { + if image.Crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(image.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi image (%s) pi_user_tags during creation: %s", *image.ImageID, err) + } + } + } d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, *image.ImageID)) } @@ -345,12 +386,39 @@ func resourceIBMPIImageRead(ctx context.Context, d *schema.ResourceData, meta in } imageid := *imagedata.ImageID + if imagedata.Crn != "" { + d.Set(Attr_CRN, imagedata.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(imagedata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of image (%s) pi_user_tags: %s", *imagedata.ImageID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set("image_id", imageid) d.Set(helpers.PICloudInstanceId, cloudInstanceID) return nil } +func resourceIBMPIImageUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + _, imageID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi image (%s) pi_user_tags: %s", imageID, err) + } + } + } + + return resourceIBMPIImageRead(ctx, d, meta) +} + func resourceIBMPIImageDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { diff --git a/ibm/service/power/resource_ibm_pi_image_test.go b/ibm/service/power/resource_ibm_pi_image_test.go index c50bda620f..5c4d8c4450 100644 --- a/ibm/service/power/resource_ibm_pi_image_test.go +++ b/ibm/service/power/resource_ibm_pi_image_test.go @@ -20,7 +20,7 @@ import ( ) func TestAccIBMPIImagebasic(t *testing.T) { - + imageRes := "ibm_pi_image.power_image" name := fmt.Sprintf("tf-pi-image-%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, @@ -30,9 +30,8 @@ func TestAccIBMPIImagebasic(t *testing.T) { { Config: testAccCheckIBMPIImageConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckIBMPIImageExists("ibm_pi_image.power_image"), - resource.TestCheckResourceAttr( - "ibm_pi_image.power_image", "pi_image_name", name), + testAccCheckIBMPIImageExists(imageRes), + resource.TestCheckResourceAttr(imageRes, "pi_image_name", name), ), }, }, @@ -137,6 +136,54 @@ func testAccCheckIBMPIImageCOSPublicConfig(name string) string { `, name, acc.Pi_cloud_instance_id, acc.Pi_image_bucket_name, acc.Pi_image_bucket_file_name) } +func TestAccIBMPIImageUserTags(t *testing.T) { + imageRes := "ibm_pi_image.power_image" + name := fmt.Sprintf("tf-pi-image-%d", acctest.RandIntRange(10, 100)) + userTagsString := `["env:dev","test_tag"]` + userTagsStringUpdated := `["env:dev","test_tag","ibm"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIImageUserTagsConfig(name, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIImageExists(imageRes), + resource.TestCheckResourceAttr(imageRes, "pi_image_name", name), + resource.TestCheckResourceAttrSet(imageRes, "image_id"), + resource.TestCheckResourceAttr(imageRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(imageRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(imageRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPIImageUserTagsConfig(name, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIImageExists(imageRes), + resource.TestCheckResourceAttr(imageRes, "pi_image_name", name), + resource.TestCheckResourceAttrSet(imageRes, "image_id"), + resource.TestCheckResourceAttr(imageRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(imageRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(imageRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(imageRes, "pi_user_tags.*", "ibm"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIImageUserTagsConfig(name string, userTagsString string) string { + return fmt.Sprintf(` + resource "ibm_pi_image" "power_image" { + pi_cloud_instance_id = "%[2]s" + pi_image_id = "%[3]s" + pi_image_name = "%[1]s" + pi_user_tags = %[4]s + } + `, name, acc.Pi_cloud_instance_id, acc.Pi_image, userTagsString) +} + func TestAccIBMPIImageBYOLImport(t *testing.T) { imageRes := "ibm_pi_image.cos_image" name := fmt.Sprintf("tf-pi-image-byoi-%d", acctest.RandIntRange(10, 100)) @@ -164,7 +211,7 @@ func testAccCheckIBMPIImageBYOLConfig(name string) string { pi_image_bucket_access = "private" pi_image_bucket_file_name = "%[4]s" pi_image_bucket_name = "%[3]s" - pi_image_bucket_region = "us-east" + pi_image_bucket_region = "%[7]s" pi_image_name = "%[1]s" pi_image_secret_key = "%[6]s" pi_image_storage_type = "tier3" @@ -174,5 +221,5 @@ func testAccCheckIBMPIImageBYOLConfig(name string) string { vendor = "SAP" } } - `, name, acc.Pi_cloud_instance_id, acc.Pi_image_bucket_name, acc.Pi_image_bucket_file_name, acc.Pi_image_bucket_access_key, acc.Pi_image_bucket_secret_key) + `, name, acc.Pi_cloud_instance_id, acc.Pi_image_bucket_name, acc.Pi_image_bucket_file_name, acc.Pi_image_bucket_access_key, acc.Pi_image_bucket_secret_key, acc.Pi_image_bucket_region) } diff --git a/ibm/service/power/resource_ibm_pi_instance.go b/ibm/service/power/resource_ibm_pi_instance.go index 111ab08889..c73c01bd14 100644 --- a/ibm/service/power/resource_ibm_pi_instance.go +++ b/ibm/service/power/resource_ibm_pi_instance.go @@ -71,6 +71,12 @@ func ResourceIBMPIInstance() *schema.Resource { Optional: true, Type: schema.TypeList, }, + Arg_BootVolumeReplicationEnabled: { + Description: "Indicates if the boot volume should be replication enabled or not.", + ForceNew: true, + Optional: true, + Type: schema.TypeBool, + }, Arg_CloudInstanceID: { Description: "This is the Power Instance id that is assigned to the account", ForceNew: true, @@ -244,6 +250,14 @@ func ResourceIBMPIInstance() *schema.Resource { Type: schema.TypeString, ValidateFunc: validate.ValidateAllowedStringValues([]string{Prefix, Suffix}), }, + Arg_ReplicationSites: { + Description: "Indicates the replication sites of the boot volume.", + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, Arg_SAPProfileID: { ConflictsWith: []string{Arg_Processors, Arg_Memory, Arg_ProcType}, Description: "SAP Profile ID for the amount of cores and memory", @@ -285,7 +299,7 @@ func ResourceIBMPIInstance() *schema.Resource { Description: "Storage Connectivity Group for server deployment", Optional: true, Type: schema.TypeString, - ValidateFunc: validate.ValidateAllowedStringValues([]string{vSCSI}), + ValidateFunc: validate.ValidateAllowedStringValues([]string{vSCSI, MaxVolumeSupport}), }, Arg_SysType: { Computed: true, @@ -300,6 +314,13 @@ func ResourceIBMPIInstance() *schema.Resource { Optional: true, Type: schema.TypeString, }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, Arg_VirtualCoresAssigned: { Computed: true, Description: "Virtual Cores Assigned to the PVMInstance", @@ -322,6 +343,11 @@ func ResourceIBMPIInstance() *schema.Resource { }, // Attributes + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_HealthStatus: { Computed: true, Description: "PI Instance health status", @@ -472,6 +498,20 @@ func resourceIBMPIInstanceCreate(ctx context.Context, d *schema.ResourceData, me } } } + + // If user tags are set, make sure tags are set correctly before moving on + if _, ok := d.GetOk(Arg_UserTags); ok { + oldList, newList := d.GetChange(Arg_UserTags) + for _, s := range *pvmList { + if s.Crn != "" { + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(s.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi instance (%s) pi_user_tags during creation: %s", *s.PvmInstanceID, err) + } + } + } + } + // If virtual optical device provided then update cloud initialization if vod, ok := d.GetOk(Arg_VirtualOpticalDevice); ok { for _, s := range *pvmList { @@ -510,6 +550,14 @@ func resourceIBMPIInstanceRead(ctx context.Context, d *schema.ResourceData, meta return diag.FromErr(err) } + if powervmdata.Crn != "" { + d.Set(Attr_CRN, powervmdata.Crn) + tags, err := flex.GetTagsUsingCRN(meta, string(powervmdata.Crn)) + if err != nil { + log.Printf("Error on get of ibm pi instance (%s) pi_user_tags: %s", *powervmdata.PvmInstanceID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set(Arg_Memory, powervmdata.Memory) d.Set(Arg_Processors, powervmdata.Processors) if powervmdata.Status != nil { @@ -882,6 +930,16 @@ func resourceIBMPIInstanceUpdate(ctx context.Context, d *schema.ResourceData, me return diag.FromErr(err) } } + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi instance (%s) pi_user_tags: %s", instanceID, err) + } + } + } + return resourceIBMPIInstanceRead(ctx, d, meta) } @@ -1378,6 +1436,20 @@ func createSAPInstance(d *schema.ResourceData, sapClient *instance.IBMPISAPInsta if st, ok := d.GetOk(Arg_StorageType); ok { body.StorageType = st.(string) } + var bootVolumeReplicationEnabled bool + if bootVolumeReplicationBoolean, ok := d.GetOk(Arg_BootVolumeReplicationEnabled); ok { + bootVolumeReplicationEnabled = bootVolumeReplicationBoolean.(bool) + body.BootVolumeReplicationEnabled = &bootVolumeReplicationEnabled + } + var replicationSites []string + if sites, ok := d.GetOk(Arg_ReplicationSites); ok { + if !bootVolumeReplicationEnabled { + return nil, fmt.Errorf("must set %s to true in order to specify replication sites", Arg_BootVolumeReplicationEnabled) + } else { + replicationSites = flex.FlattenSet(sites.(*schema.Set)) + body.ReplicationSites = replicationSites + } + } if sp, ok := d.GetOk(Arg_StoragePool); ok { body.StoragePool = sp.(string) } @@ -1416,6 +1488,9 @@ func createSAPInstance(d *schema.ResourceData, sapClient *instance.IBMPISAPInsta if deploymentTarget, ok := d.GetOk(Arg_DeploymentTarget); ok { body.DeploymentTarget = expandDeploymentTarget(deploymentTarget.(*schema.Set).List()) } + if tags, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } pvmList, err := sapClient.Create(body) if err != nil { return nil, fmt.Errorf("failed to provision: %v", err) @@ -1610,6 +1685,24 @@ func createPVMInstance(d *schema.ResourceData, client *instance.IBMPIInstanceCli if deploymentTarget, ok := d.GetOk(Arg_DeploymentTarget); ok { body.DeploymentTarget = expandDeploymentTarget(deploymentTarget.(*schema.Set).List()) } + var bootVolumeReplicationEnabled bool + if bootVolumeReplicationBoolean, ok := d.GetOk(Arg_BootVolumeReplicationEnabled); ok { + bootVolumeReplicationEnabled = bootVolumeReplicationBoolean.(bool) + body.BootVolumeReplicationEnabled = &bootVolumeReplicationEnabled + } + var replicationSites []string + if sites, ok := d.GetOk(Arg_ReplicationSites); ok { + if !bootVolumeReplicationEnabled { + return nil, fmt.Errorf("must set %s to true in order to specify replication sites", Arg_BootVolumeReplicationEnabled) + } else { + replicationSites = flex.FlattenSet(sites.(*schema.Set)) + body.ReplicationSites = replicationSites + } + } + + if tags, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } pvmList, err := client.Create(body) if err != nil { diff --git a/ibm/service/power/resource_ibm_pi_instance_test.go b/ibm/service/power/resource_ibm_pi_instance_test.go index 9a0edc79f3..7c2974f1d2 100644 --- a/ibm/service/power/resource_ibm_pi_instance_test.go +++ b/ibm/service/power/resource_ibm_pi_instance_test.go @@ -96,7 +96,6 @@ func testAccCheckIBMPIInstanceDeploymentTypeConfig(name, instanceHealthStatus, e pi_network { network_id = data.ibm_pi_network.power_networks.id } - } `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, instanceHealthStatus, epic, systype, acc.PiStorageType) } @@ -133,7 +132,6 @@ func testAccCheckIBMPIInstanceIBMiLicense(name, instanceHealthStatus string, IBM pi_network { network_id = data.ibm_pi_network.power_networks.id } - }`, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, instanceHealthStatus, IBMiCSS, IBMiRDSUsers) } @@ -256,6 +254,75 @@ func testAccCheckIBMPIInstanceDeplomentTargetConfig(name string) string { `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name) } +func testAccCheckIBMPIInstanceUserTagsConfig(name, instanceHealthStatus string, userTagsString string) string { + return fmt.Sprintf(` + resource "ibm_pi_key" "key" { + pi_cloud_instance_id = "%[1]s" + pi_key_name = "%[2]s" + pi_ssh_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR" + } + data "ibm_pi_image" "power_image" { + pi_cloud_instance_id = "%[1]s" + pi_image_name = "%[3]s" + } + data "ibm_pi_network" "power_networks" { + pi_cloud_instance_id = "%[1]s" + pi_network_name = "%[4]s" + } + resource "ibm_pi_volume" "power_volume" { + pi_cloud_instance_id = "%[1]s" + pi_volume_name = "%[2]s" + pi_volume_pool = data.ibm_pi_image.power_image.storage_pool + pi_volume_shareable = true + pi_volume_size = 20 + pi_volume_type = "%[6]s" + } + resource "ibm_pi_instance" "power_instance" { + pi_cloud_instance_id = "%[1]s" + pi_health_status = "%[5]s" + pi_image_id = data.ibm_pi_image.power_image.id + pi_instance_name = "%[2]s" + pi_key_pair_name = ibm_pi_key.key.name + pi_memory = "2" + pi_proc_type = "shared" + pi_processors = "0.25" + pi_storage_pool = data.ibm_pi_image.power_image.storage_pool + pi_storage_type = "%[6]s" + pi_sys_type = "s922" + pi_volume_ids = [ibm_pi_volume.power_volume.volume_id] + pi_network { + network_id = data.ibm_pi_network.power_networks.id + } + pi_user_tags = %[7]s + } + `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, instanceHealthStatus, acc.PiStorageType, userTagsString) +} + +func testAccCheckIBMPIInstanceStorageConnectionConfig(name, instanceHealthStatus string) string { + return fmt.Sprintf(` + resource "ibm_pi_volume" "power_volume" { + pi_cloud_instance_id = "%[1]s" + pi_volume_size = 1 + pi_volume_name = "%[2]s" + pi_volume_type = "tier3" + } + resource "ibm_pi_instance" "power_instance" { + pi_cloud_instance_id = "%[1]s" + pi_memory = "2" + pi_processors = "1" + pi_instance_name = "%[2]s" + pi_proc_type = "shared" + pi_image_id = "%[3]s" + pi_sys_type = "s922" + pi_network { + network_id = "%[4]s" + } + pi_storage_connection = "%[5]s" + pi_health_status = "%[6]s" + pi_volume_ids = [ibm_pi_volume.power_volume.volume_id] + } + `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, acc.Pi_storage_connection, instanceHealthStatus) +} func testAccCheckIBMPIInstanceDestroy(s *terraform.State) error { sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() if err != nil { @@ -336,6 +403,25 @@ func TestAccIBMPIInstanceBasic(t *testing.T) { }, }) } +func TestAccIBMPIInstanceStorageConnection(t *testing.T) { + instanceRes := "ibm_pi_instance.power_instance" + name := fmt.Sprintf("tf-pi-instance-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIInstanceStorageConnectionConfig(name, power.OK), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIInstanceExists(instanceRes), + resource.TestCheckResourceAttr(instanceRes, "pi_instance_name", name), + resource.TestCheckResourceAttr(instanceRes, "pi_storage_connection", acc.Pi_storage_connection), + ), + }, + }, + }) +} func TestAccIBMPIInstanceDeploymentTarget(t *testing.T) { instanceRes := "ibm_pi_instance.power_instance" name := fmt.Sprintf("tf-pi-instance-%d", acctest.RandIntRange(10, 100)) @@ -526,7 +612,7 @@ func testAccIBMPISAPInstanceConfig(name, sapProfile string) string { pi_storage_type = "tier1" pi_network { network_id = resource.ibm_pi_network.power_network.network_id - } + } } `, acc.Pi_cloud_instance_id, name, sapProfile, acc.Pi_sap_image) } @@ -703,7 +789,7 @@ func testAccCheckIBMPIStoppedInstanceConfigUpdate(name, instanceHealthStatus, pr pi_volume_pool = data.ibm_pi_image.power_image.storage_pool pi_volume_shareable = true pi_volume_size = 20 - } + } resource "ibm_pi_instance" "power_instance" { pi_cloud_instance_id = "%[1]s" pi_health_status = "%[5]s" @@ -785,3 +871,96 @@ func TestAccIBMPIInstanceDeploymentTypeNoStorage(t *testing.T) { }, }) } + +func TestAccIBMPIInstanceDeploymentGRS(t *testing.T) { + instanceRes := "ibm_pi_instance.power_instance" + bootVolumeData := "data.ibm_pi_volume.power_boot_volume_data" + name := fmt.Sprintf("tf-pi-instance-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIBMPIInstanceGRSConfig(name, power.OK, "2", "0.25"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIInstanceExists(instanceRes), + resource.TestCheckResourceAttr(instanceRes, "pi_instance_name", name), + resource.TestCheckResourceAttr(bootVolumeData, "replication_enabled", "true"), + ), + }, + }, + }) +} + +func testAccIBMPIInstanceGRSConfig(name string, instanceHealthStatus string, memory string, proc string) string { + return fmt.Sprintf(` + data "ibm_pi_image" "power_image" { + pi_image_name = "%[3]s" + pi_cloud_instance_id = "%[1]s" + } + data "ibm_pi_network" "power_networks" { + pi_cloud_instance_id = "%[1]s" + pi_network_name = "%[4]s" + } + data "ibm_pi_volume" "power_boot_volume_data" { + pi_cloud_instance_id = "%[1]s" + pi_volume_name = data.ibm_pi_instance_volumes.power_instance_volumes_data.instance_volumes[0].name + } + data "ibm_pi_instance_volumes" "power_instance_volumes_data" { + pi_cloud_instance_id = "%[1]s" + pi_instance_name = ibm_pi_instance.power_instance.pi_instance_name + } + resource "ibm_pi_instance" "power_instance" { + pi_boot_volume_replication_enabled = true + pi_memory = "%[7]s" + pi_processors = "%[6]s" + pi_instance_name = "%[2]s" + pi_proc_type = "shared" + pi_image_id = data.ibm_pi_image.power_image.id + pi_sys_type = "e980" + pi_cloud_instance_id = "%[1]s" + pi_storage_pool = data.ibm_pi_image.power_image.storage_pool + pi_pin_policy = "none" + pi_health_status = "%[5]s" + pi_network { + network_id = data.ibm_pi_network.power_networks.id + } + } + `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, instanceHealthStatus, proc, memory) +} + +func TestAccIBMPIInstanceUserTags(t *testing.T) { + instanceRes := "ibm_pi_instance.power_instance" + name := fmt.Sprintf("tf-pi-instance-%d", acctest.RandIntRange(10, 100)) + userTagsString := `["env:dev", "test_tag"]` + userTagsStringUpdated := `["env:dev", "test_tag", "test_tag2"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIInstanceUserTagsConfig(name, power.OK, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIInstanceExists(instanceRes), + resource.TestCheckResourceAttr(instanceRes, "pi_instance_name", name), + resource.TestCheckResourceAttr(instanceRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(instanceRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(instanceRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPIInstanceUserTagsConfig(name, power.OK, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIInstanceExists(instanceRes), + resource.TestCheckResourceAttr(instanceRes, "pi_instance_name", name), + resource.TestCheckResourceAttr(instanceRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(instanceRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(instanceRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(instanceRes, "pi_user_tags.*", "test_tag2"), + ), + }, + }, + }) +} diff --git a/ibm/service/power/resource_ibm_pi_network.go b/ibm/service/power/resource_ibm_pi_network.go index 5842599a2b..3670aa3d10 100644 --- a/ibm/service/power/resource_ibm_pi_network.go +++ b/ibm/service/power/resource_ibm_pi_network.go @@ -122,8 +122,20 @@ func ResourceIBMPINetwork() *schema.Resource { }, }, }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, //Computed Attributes + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, "network_id": { Type: schema.TypeString, Computed: true, @@ -158,7 +170,9 @@ func resourceIBMPINetworkCreate(ctx context.Context, d *schema.ResourceData, met body.DNSServers = networkdns } } - + if tags, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } if v, ok := d.GetOk(helpers.PINetworkJumbo); ok { body.Jumbo = v.(bool) } @@ -223,6 +237,16 @@ func resourceIBMPINetworkCreate(ctx context.Context, d *schema.ResourceData, met return diag.FromErr(err) } + if _, ok := d.GetOk(Arg_UserTags); ok { + if networkResponse.Crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(networkResponse.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi snapshot (%s) pi_user_tags during creation: %s", networkID, err) + } + } + } + return resourceIBMPINetworkRead(ctx, d, meta) } @@ -242,7 +266,14 @@ func resourceIBMPINetworkRead(ctx context.Context, d *schema.ResourceData, meta if err != nil { return diag.FromErr(err) } - + if networkdata.Crn != "" { + d.Set(Attr_CRN, networkdata.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(networkdata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi network (%s) pi_user_tags: %s", *networkdata.NetworkID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set("network_id", networkdata.NetworkID) d.Set(helpers.PINetworkCidr, networkdata.Cidr) d.Set(helpers.PINetworkDNS, networkdata.DNSServers) @@ -310,6 +341,16 @@ func resourceIBMPINetworkUpdate(ctx context.Context, d *schema.ResourceData, met } } + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi network (%s) pi_user_tags: %s", networkID, err) + } + } + } + return resourceIBMPINetworkRead(ctx, d, meta) } diff --git a/ibm/service/power/resource_ibm_pi_network_address_group.go b/ibm/service/power/resource_ibm_pi_network_address_group.go new file mode 100644 index 0000000000..b20b0cf26e --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_address_group.go @@ -0,0 +1,257 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceIBMPINetworkAddressGroup() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkAddressGroupCreate, + ReadContext: resourceIBMPINetworkAddressGroupRead, + UpdateContext: resourceIBMPINetworkAddressGroupUpdate, + DeleteContext: resourceIBMPINetworkAddressGroupDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + CustomizeDiff: customdiff.Sequence( + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + return flex.ResourceTagsCustomizeDiff(diff) + }, + ), + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_Name: { + Description: "The name of the Network Address Group.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_UserTags: { + Description: "The user tags associated with this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The Network Address Group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IP addresses in CIDR notation (for example 192.168.66.2/32) in the Network Address Group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CIDR: { + Computed: true, + Description: "The IP addresses in CIDR notation for example 192.168.1.5/32.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The id of the Network Address Group member IP addresses.", + Type: schema.TypeString, + }, + }, + }, + Optional: true, + Type: schema.TypeList, + }, + Attr_NetworkAddressGroupID: { + Computed: true, + Description: "The unique identifier of the network address group.", + Type: schema.TypeString, + }, + }, + } +} + +func resourceIBMPINetworkAddressGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + name := d.Get(Arg_Name).(string) + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, cloudInstanceID) + var body = &models.NetworkAddressGroupCreate{ + Name: &name, + } + + if v, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.ExpandStringList(v.([]interface{})) + } + + networkAddressGroup, err := nagC.Create(body) + if err != nil { + return diag.FromErr(err) + } + if _, ok := d.GetOk(Arg_UserTags); ok { + if networkAddressGroup.Crn != nil { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(*networkAddressGroup.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi network address group (%s) pi_user_tags during creation: %s", *networkAddressGroup.ID, err) + } + } + } + d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, *networkAddressGroup.ID)) + + return resourceIBMPINetworkAddressGroupRead(ctx, d, meta) +} + +func resourceIBMPINetworkAddressGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID, nagID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, cloudInstanceID) + networkAddressGroup, err := nagC.Get(nagID) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), NotFound) { + d.SetId("") + return nil + } + return diag.FromErr(err) + } + d.Set(Arg_Name, networkAddressGroup.Name) + if networkAddressGroup.Crn != nil { + d.Set(Attr_CRN, networkAddressGroup.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkAddressGroup.Crn)) + if err != nil { + log.Printf("Error on get of network address group (%s) pi_user_tags: %s", nagID, err) + } + d.Set(Arg_UserTags, userTags) + } + + d.Set(Attr_NetworkAddressGroupID, networkAddressGroup.ID) + members := []map[string]interface{}{} + if len(networkAddressGroup.Members) > 0 { + for _, mbr := range networkAddressGroup.Members { + member := memberToMap(mbr) + members = append(members, member) + } + d.Set(Attr_Members, members) + } else { + d.Set(Attr_Members, nil) + } + + return nil +} + +func resourceIBMPINetworkAddressGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + hasChange := false + body := &models.NetworkAddressGroupUpdate{} + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi network address group (%s) pi_user_tags: %s", parts[1], err) + } + } + } + if d.HasChange(Arg_Name) { + body.Name = d.Get(Arg_Name).(string) + hasChange = true + } + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, parts[0]) + if hasChange { + _, err := nagC.Update(parts[1], body) + if err != nil { + return diag.FromErr(err) + } + } + + return resourceIBMPINetworkAddressGroupRead(ctx, d, meta) +} + +func resourceIBMPINetworkAddressGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, parts[0]) + err = nagC.Delete(parts[1]) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkAddressGroupDeleted(ctx, nagC, parts[1], d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + + return nil +} + +func isWaitForIBMPINetworkAddressGroupDeleted(ctx context.Context, client *instance.IBMPINetworkAddressGroupClient, nagID string, timeout time.Duration) (interface{}, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Deleting}, + Target: []string{State_NotFound}, + Refresh: isIBMPINetworkAddressGroupDeleteRefreshFunc(client, nagID), + Delay: 10 * time.Second, + MinTimeout: 30 * time.Second, + Timeout: timeout, + } + + return stateConf.WaitForStateContext(ctx) +} +func isIBMPINetworkAddressGroupDeleteRefreshFunc(client *instance.IBMPINetworkAddressGroupClient, nagID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + nag, err := client.Get(nagID) + if err != nil { + return nag, State_NotFound, nil + } + return nag, State_Deleting, nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_address_group_member.go b/ibm/service/power/resource_ibm_pi_network_address_group_member.go new file mode 100644 index 0000000000..2965d9a925 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_address_group_member.go @@ -0,0 +1,271 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "slices" + "strings" + "time" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceIBMPINetworkAddressGroupMember() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkAddressGroupMemberCreate, + ReadContext: resourceIBMPINetworkAddressGroupMemberRead, + DeleteContext: resourceIBMPINetworkAddressGroupMemberDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + // Arguments + Arg_Cidr: { + Description: "The member to add in CIDR format.", + ExactlyOneOf: []string{Arg_Cidr, Arg_NetworkAddressGroupMemberID}, + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkAddressGroupID: { + Description: "Network Address Group ID.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkAddressGroupMemberID: { + Description: "The network address group member id to remove.", + ExactlyOneOf: []string{Arg_Cidr, Arg_NetworkAddressGroupMemberID}, + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The network address group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IP addresses in CIDR notation (for example 192.168.66.2/32) in the Network Address Group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_CIDR: { + Computed: true, + Description: "The IP addresses in CIDR notation for example 192.168.1.5/32.", + Type: schema.TypeString, + }, + Attr_ID: { + Computed: true, + Description: "The id of the Network Address Group member IP addresses.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the Network Address Group.", + Type: schema.TypeString, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + } +} +func resourceIBMPINetworkAddressGroupMemberCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nagID := d.Get(Arg_NetworkAddressGroupID).(string) + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, cloudInstanceID) + var body = &models.NetworkAddressGroupAddMember{} + if v, ok := d.GetOk(Arg_Cidr); ok { + cidr := v.(string) + body.Cidr = &cidr + NetworkAddressGroupMember, err := nagC.AddMember(nagID, body) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkAddressGroupMemberAdd(ctx, nagC, nagID, *NetworkAddressGroupMember.ID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", cloudInstanceID, nagID, *NetworkAddressGroupMember.ID)) + } + if v, ok := d.GetOk(Arg_NetworkAddressGroupMemberID); ok { + memberID := v.(string) + err := nagC.DeleteMember(nagID, memberID) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkAddressGroupMemberRemove(ctx, nagC, nagID, memberID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, nagID)) + } + + return resourceIBMPINetworkAddressGroupMemberRead(ctx, d, meta) +} +func resourceIBMPINetworkAddressGroupMemberRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, parts[0]) + networkAddressGroup, err := nagC.Get(parts[1]) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), NotFound) { + d.SetId("") + return nil + } + return diag.FromErr(err) + } + + if networkAddressGroup.Crn != nil { + d.Set(Attr_CRN, networkAddressGroup.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkAddressGroup.Crn)) + if err != nil { + log.Printf("Error on get of pi network address group (%s) user_tags: %s", parts[1], err) + } + d.Set(Attr_UserTags, userTags) + } + if len(networkAddressGroup.Members) > 0 { + members := []map[string]interface{}{} + for _, mbr := range networkAddressGroup.Members { + member := memberToMap(mbr) + members = append(members, member) + } + d.Set(Attr_Members, members) + } else { + d.Set(Attr_Members, nil) + } + d.Set(Attr_Name, networkAddressGroup.Name) + + return nil +} +func resourceIBMPINetworkAddressGroupMemberDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if len(parts) > 2 { + nagC := instance.NewIBMPINetworkAddressGroupClient(ctx, sess, parts[0]) + err = nagC.DeleteMember(parts[1], parts[2]) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkAddressGroupMemberRemove(ctx, nagC, parts[1], parts[2], d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + } + + d.SetId("") + + return nil +} +func isWaitForIBMPINetworkAddressGroupMemberAdd(ctx context.Context, client *instance.IBMPINetworkAddressGroupClient, id, memberID string, timeout time.Duration) (interface{}, error) { + + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Pending}, + Target: []string{State_Available}, + Refresh: isIBMPINetworkAddressGroupMemberAddRefreshFunc(client, id, memberID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 30 * time.Second, + } + + return stateConf.WaitForStateContext(ctx) +} +func isIBMPINetworkAddressGroupMemberAddRefreshFunc(client *instance.IBMPINetworkAddressGroupClient, id, memberID string) retry.StateRefreshFunc { + + return func() (interface{}, string, error) { + networkAddressGroup, err := client.Get(id) + if err != nil { + return nil, "", err + } + + if len(networkAddressGroup.Members) > 0 { + var mbrIDs []string + for _, mbr := range networkAddressGroup.Members { + mbrIDs = append(mbrIDs, *mbr.ID) + } + if slices.Contains(mbrIDs, memberID) { + return networkAddressGroup, State_Available, nil + } + } + return networkAddressGroup, State_Pending, nil + } +} +func isWaitForIBMPINetworkAddressGroupMemberRemove(ctx context.Context, client *instance.IBMPINetworkAddressGroupClient, id, memberID string, timeout time.Duration) (interface{}, error) { + + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Pending}, + Target: []string{State_Removed}, + Refresh: isIBMPINetworkAddressGroupMemberRemoveRefreshFunc(client, id, memberID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 30 * time.Second, + } + + return stateConf.WaitForStateContext(ctx) +} +func isIBMPINetworkAddressGroupMemberRemoveRefreshFunc(client *instance.IBMPINetworkAddressGroupClient, id, memberID string) retry.StateRefreshFunc { + + return func() (interface{}, string, error) { + networkAddressGroup, err := client.Get(id) + if err != nil { + return nil, "", err + } + var mbrIDs []string + for _, mbr := range networkAddressGroup.Members { + mbrIDs = append(mbrIDs, *mbr.ID) + } + if !slices.Contains(mbrIDs, memberID) { + return networkAddressGroup, State_Removed, nil + } + return networkAddressGroup, State_Pending, nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_address_group_member_test.go b/ibm/service/power/resource_ibm_pi_network_address_group_member_test.go new file mode 100644 index 0000000000..f2f443369c --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_address_group_member_test.go @@ -0,0 +1,77 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccIBMPINetworkAddressGroupMemberBasic(t *testing.T) { + name := fmt.Sprintf("tf-nag-name-%d", acctest.RandIntRange(10, 100)) + cidr := "192.168.1.5/32" + nagMemberRes := "ibm_pi_network_address_group_member.network_address_group_member" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkAddressGroupMemberConfigBasic(name, cidr), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkAddressGroupMemberExists(nagMemberRes), + resource.TestCheckResourceAttrSet(nagMemberRes, power.Arg_NetworkAddressGroupID), + resource.TestCheckResourceAttr(nagMemberRes, power.Arg_Cidr, cidr), + resource.TestCheckResourceAttrSet(nagMemberRes, power.Attr_Name), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkAddressGroupMemberConfigBasic(name, cidr string) string { + return testAccCheckIBMPINetworkAddressGroupConfigBasic(name) + fmt.Sprintf(` + resource "ibm_pi_network_address_group_member" "network_address_group_member" { + pi_cloud_instance_id = "%[1]s" + pi_cidr = "%[2]s" + pi_network_address_group_id = ibm_pi_network_address_group.network_address_group.network_address_group_id + }`, acc.Pi_cloud_instance_id, cidr) +} + +func testAccCheckIBMPINetworkAddressGroupMemberExists(n string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, nsgID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nsgClient := instance.NewIBMPINetworkAddressGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nsgClient.Get(nsgID) + if err != nil { + return err + } + return nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_address_group_test.go b/ibm/service/power/resource_ibm_pi_network_address_group_test.go new file mode 100644 index 0000000000..098d7ca1ac --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_address_group_test.go @@ -0,0 +1,116 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkAddressGroupBasic(t *testing.T) { + name := fmt.Sprintf("tf-nag-name-%d", acctest.RandIntRange(10, 100)) + nameUpdate := fmt.Sprintf("tf-nag-name-update-%d", acctest.RandIntRange(10, 100)) + nagRes := "ibm_pi_network_address_group.network_address_group" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPINetworkAddressGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkAddressGroupConfigBasic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkAddressGroupExists(nagRes), + resource.TestCheckResourceAttrSet(nagRes, power.Attr_ID), + resource.TestCheckResourceAttrSet(nagRes, power.Attr_NetworkAddressGroupID), + resource.TestCheckResourceAttr(nagRes, power.Arg_Name, name), + ), + }, + { + Config: testAccCheckIBMPINetworkAddressGroupConfigBasic(nameUpdate), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(nagRes, power.Arg_Name, nameUpdate), + resource.TestCheckResourceAttrSet(nagRes, power.Attr_ID), + resource.TestCheckResourceAttrSet(nagRes, power.Attr_NetworkAddressGroupID), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkAddressGroupConfigBasic(name string) string { + return fmt.Sprintf(` + resource "ibm_pi_network_address_group" "network_address_group" { + pi_cloud_instance_id = "%[1]s" + pi_name = "%[2]s" + } + `, acc.Pi_cloud_instance_id, name) +} + +func testAccCheckIBMPINetworkAddressGroupExists(n string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, nagID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nagC := instance.NewIBMPINetworkAddressGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nagC.Get(nagID) + if err != nil { + return err + } + + return nil + } +} + +func testAccCheckIBMPINetworkAddressGroupDestroy(s *terraform.State) error { + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_pi_network_address_group" { + continue + } + + cloudInstanceID, nagID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nagC := instance.NewIBMPINetworkAddressGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nagC.Get(nagID) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), power.NotFound) { + return nil + } + } + return fmt.Errorf("network addess group still exists: %s", rs.Primary.ID) + } + return nil +} diff --git a/ibm/service/power/resource_ibm_pi_network_interface.go b/ibm/service/power/resource_ibm_pi_network_interface.go new file mode 100644 index 0000000000..f1deb2956a --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_interface.go @@ -0,0 +1,357 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" +) + +func ResourceIBMPINetworkInterface() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkInterfaceCreate, + ReadContext: resourceIBMPINetworkInterfaceRead, + UpdateContext: resourceIBMPINetworkInterfaceUpdate, + DeleteContext: resourceIBMPINetworkInterfaceDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_InstanceID: { + Description: "If supplied populated it attaches to the InstanceID, if empty detaches from InstanceID.", + Optional: true, + Type: schema.TypeString, + }, + Arg_IPAddress: { + Description: "The requested IP address of this Network Interface.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + Arg_Name: { + Description: "Name of the Network Interface.", + Optional: true, + Type: schema.TypeString, + }, + Arg_NetworkID: { + Description: "Network ID.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The Network Interface's crn.", + Type: schema.TypeString, + }, + Attr_Instance: { + Computed: true, + Optional: true, + Description: "The attached instance to this Network Interface.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Href: { + Computed: true, + Description: "Link to instance resource.", + Type: schema.TypeString, + }, + Attr_InstanceID: { + Computed: true, + Description: "The attached instance ID.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_IPAddress: { + Computed: true, + Description: "The ip address of this Network Interface.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of the Network Interface.", + Type: schema.TypeString, + }, + Attr_Name: { + Computed: true, + Description: "Name of the Network Interface (not unique or indexable).", + Type: schema.TypeString, + }, + Attr_NetworkInterfaceID: { + Computed: true, + Description: "The unique identifier of the network interface.", + Type: schema.TypeString, + }, + Attr_NetworkSecurityGroupID: { + Computed: true, + Description: "ID of the Network Security Group the network interface will be added to.", + Type: schema.TypeString, + }, + Attr_Status: { + Computed: true, + Description: "The status of the network address group.", + Type: schema.TypeString, + }, + }, + } +} + +func resourceIBMPINetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + networkID := d.Get(Arg_NetworkID).(string) + networkC := instance.NewIBMPINetworkClient(ctx, sess, cloudInstanceID) + var body = &models.NetworkInterfaceCreate{} + if v, ok := d.GetOk(Arg_IPAddress); ok { + body.IPAddress = v.(string) + } + if v, ok := d.GetOk(Arg_Name); ok { + body.Name = v.(string) + } + if v, ok := d.GetOk(Arg_UserTags); ok { + userTags := flex.FlattenSet(v.(*schema.Set)) + body.UserTags = userTags + } + networkInterface, err := networkC.CreateNetworkInterface(networkID, body) + if err != nil { + return diag.FromErr(err) + } + networkInterfaceID := *networkInterface.ID + _, err = isWaitForIBMPINetworkInterfaceAvailable(ctx, networkC, networkID, networkInterfaceID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + crn := networkInterface.Crn + if _, ok := d.GetOk(Arg_UserTags); ok { + if crn != nil { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, *crn, "", UserTagType) + if err != nil { + log.Printf("Error on update of network interface (%s) pi_user_tags: %s", networkInterfaceID, err) + } + } + } + if v, ok := d.GetOk(Arg_InstanceID); ok { + instanceID := v.(string) + body := &models.NetworkInterfaceUpdate{ + InstanceID: &instanceID, + } + _, err = networkC.UpdateNetworkInterface(networkID, networkInterfaceID, body) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkPortUpdateAvailable(ctx, networkC, networkID, networkInterfaceID, instanceID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + } + d.SetId(fmt.Sprintf("%s/%s/%s", cloudInstanceID, networkID, networkInterfaceID)) + + return resourceIBMPINetworkInterfaceRead(ctx, d, meta) +} + +func resourceIBMPINetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + networkC := instance.NewIBMPINetworkClient(ctx, sess, parts[0]) + networkInterface, err := networkC.GetNetworkInterface(parts[1], parts[2]) + if err != nil { + return diag.FromErr(err) + } + + d.Set(Attr_IPAddress, networkInterface.IPAddress) + d.Set(Attr_MacAddress, networkInterface.MacAddress) + d.Set(Attr_Name, networkInterface.Name) + d.Set(Attr_NetworkInterfaceID, networkInterface.ID) + d.Set(Attr_NetworkSecurityGroupID, networkInterface.NetworkSecurityGroupID) + if networkInterface.Instance != nil { + pvmInstance := []map[string]interface{}{} + instanceMap := pvmInstanceToMap(networkInterface.Instance) + pvmInstance = append(pvmInstance, instanceMap) + d.Set(Attr_Instance, pvmInstance) + } else { + d.Set(Attr_Instance, nil) + } + d.Set(Attr_Status, networkInterface.Status) + if networkInterface.Crn != nil { + d.Set(Attr_CRN, networkInterface.Crn) + tags, err := flex.GetTagsUsingCRN(meta, string(*networkInterface.Crn)) + if err != nil { + log.Printf("Error on get of network interface (%s) pi_user_tags: %s", *networkInterface.ID, err) + } + d.Set(Arg_UserTags, tags) + } + + return nil +} + +func resourceIBMPINetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + networkC := instance.NewIBMPINetworkClient(ctx, sess, parts[0]) + body := &models.NetworkInterfaceUpdate{} + + hasChange := false + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of network interface (%s) pi_user_tags: %s", parts[2], err) + } + } + } + if d.HasChange(Arg_Name) { + name := d.Get(Arg_Name).(string) + body.Name = &name + hasChange = true + } + if d.HasChange(Arg_InstanceID) { + instanceID := d.Get(Arg_InstanceID).(string) + body.InstanceID = &instanceID + hasChange = true + } + + if hasChange { + _, err = networkC.UpdateNetworkInterface(parts[1], parts[2], body) + if err != nil { + return diag.FromErr(err) + } + if d.HasChange(Arg_InstanceID) { + _, err = isWaitForIBMPINetworkPortUpdateAvailable(ctx, networkC, parts[1], parts[2], d.Get(Arg_InstanceID).(string), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return diag.FromErr(err) + } + } + } + + return resourceIBMPINetworkInterfaceRead(ctx, d, meta) +} + +func resourceIBMPINetworkInterfaceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + networkC := instance.NewIBMPINetworkClient(ctx, sess, parts[0]) + err = networkC.DeleteNetworkInterface(parts[1], parts[2]) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + + return nil +} + +func isWaitForIBMPINetworkInterfaceAvailable(ctx context.Context, client *instance.IBMPINetworkClient, networkID string, networkInterfaceID string, timeout time.Duration) (interface{}, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Build}, + Target: []string{State_Down}, + Refresh: isIBMPINetworkInterfaceRefreshFunc(client, networkID, networkInterfaceID), + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPINetworkInterfaceRefreshFunc(client *instance.IBMPINetworkClient, networkID, networkInterfaceID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + networkInterface, err := client.GetNetworkInterface(networkID, networkInterfaceID) + if err != nil { + return nil, "", err + } + if strings.ToLower(*networkInterface.Status) == State_Down { + return networkInterface, State_Down, nil + } + return networkInterface, State_Build, nil + } +} + +func isWaitForIBMPINetworkPortUpdateAvailable(ctx context.Context, client *instance.IBMPINetworkClient, networkID, networkInterfaceID, instanceid string, timeout time.Duration) (interface{}, error) { + + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Build}, + Target: []string{State_Active}, + Refresh: isIBMPINetworkInterfaceUpdateRefreshFunc(client, networkID, networkInterfaceID, instanceid), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPINetworkInterfaceUpdateRefreshFunc(client *instance.IBMPINetworkClient, networkID, networkInterfaceID, instanceid string) retry.StateRefreshFunc { + + return func() (interface{}, string, error) { + networkInterface, err := client.GetNetworkInterface(networkID, networkInterfaceID) + if err != nil { + return nil, "", err + } + if strings.ToLower(*networkInterface.Status) == State_Active && networkInterface.Instance.InstanceID == instanceid { + return networkInterface, State_Active, nil + } + return networkInterface, State_Build, nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_interface_test.go b/ibm/service/power/resource_ibm_pi_network_interface_test.go new file mode 100644 index 0000000000..cc3e937e14 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_interface_test.go @@ -0,0 +1,148 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkInterfaceBasic(t *testing.T) { + name := fmt.Sprintf("tf-pi-name-%d", acctest.RandIntRange(10, 100)) + netInterRes := "ibm_pi_network_interface.network_interface" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPINetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkInterfaceConfigBasic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkInterfaceExists(netInterRes), + resource.TestCheckResourceAttr(netInterRes, power.Arg_NetworkID, acc.Pi_network_id), + resource.TestCheckResourceAttrSet(netInterRes, power.Arg_NetworkID), + resource.TestCheckResourceAttr(netInterRes, power.Attr_Name, name), + ), + }, + }, + }) +} + +func TestAccIBMPINetworkInterfaceAllArgs(t *testing.T) { + + name := fmt.Sprintf("tf-pi-name-%d", acctest.RandIntRange(10, 100)) + nameUpdate := fmt.Sprintf("tf-pi-name-update-%d", acctest.RandIntRange(10, 100)) + userTags := `["tf-ni-tag-1", "tf-ni-tag-2"]` + userTagsUpdated := `["tf-ni-tag-1","tf-ni-tag-2", "tf-ni-tag-3"]` + netInterRes := "ibm_pi_network_interface.network_interface" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPINetworkInterfaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkInterfaceConfig(name, userTags), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkInterfaceExists(netInterRes), + resource.TestCheckResourceAttr(netInterRes, power.Attr_Name, name), + resource.TestCheckResourceAttrSet(netInterRes, power.Arg_NetworkID), + resource.TestCheckResourceAttrSet(netInterRes, power.Attr_IPAddress), + ), + }, + { + Config: testAccCheckIBMPINetworkInterfaceConfig(nameUpdate, userTagsUpdated), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(netInterRes, power.Attr_Name, nameUpdate), + resource.TestCheckResourceAttrSet(netInterRes, power.Arg_NetworkID), + resource.TestCheckResourceAttrSet(netInterRes, power.Attr_IPAddress), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkInterfaceConfigBasic(name string) string { + return fmt.Sprintf(` + resource "ibm_pi_network_interface" "network_interface" { + pi_cloud_instance_id = "%[1]s" + pi_name = "%[2]s" + pi_network_id = "%[3]s" + }`, acc.Pi_cloud_instance_id, name, acc.Pi_network_id) +} + +func testAccCheckIBMPINetworkInterfaceConfig(name, userTags string) string { + return fmt.Sprintf(` + + resource "ibm_pi_network_interface" "network_interface" { + pi_cloud_instance_id = "%[1]s" + pi_network_id = "%[2]s" + pi_name = "%[3]s" + pi_user_tags = %[4]s + }`, acc.Pi_cloud_instance_id, acc.Pi_network_id, name, userTags) +} + +func testAccCheckIBMPINetworkInterfaceExists(n string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + parts, err := flex.IdParts(rs.Primary.ID) + if err != nil { + return err + } + networkClient := instance.NewIBMPINetworkClient(context.Background(), sess, parts[0]) + + _, err = networkClient.GetNetworkInterface(parts[1], parts[2]) + if err != nil { + return err + } + return nil + + } +} + +func testAccCheckIBMPINetworkInterfaceDestroy(s *terraform.State) error { + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_pi_network_interface" { + continue + } + parts, err := flex.IdParts(rs.Primary.ID) + if err != nil { + return err + } + networkClient := instance.NewIBMPINetworkClient(context.Background(), sess, parts[0]) + _, err = networkClient.GetNetworkInterface(parts[1], parts[2]) + if err == nil { + return fmt.Errorf("pi_network_interface still exists: %s", rs.Primary.ID) + } + } + + return nil +} diff --git a/ibm/service/power/resource_ibm_pi_network_port_attach.go b/ibm/service/power/resource_ibm_pi_network_port_attach.go index e0e342e15a..99bb5adb05 100644 --- a/ibm/service/power/resource_ibm_pi_network_port_attach.go +++ b/ibm/service/power/resource_ibm_pi_network_port_attach.go @@ -63,6 +63,14 @@ func ResourceIBMPINetworkPortAttach() *schema.Resource { ForceNew: true, Computed: true, }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, //Computed Attributes "macaddress": { @@ -101,7 +109,9 @@ func resourceIBMPINetworkPortAttachCreate(ctx context.Context, d *schema.Resourc ipaddress := v.(string) nwportBody.IPAddress = ipaddress } - + if tags, ok := d.GetOk(Arg_UserTags); ok { + nwportBody.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } nwportattachBody := &models.NetworkPortUpdate{ Description: &description, PvmInstanceID: &instanceID, diff --git a/ibm/service/power/resource_ibm_pi_network_security_group.go b/ibm/service/power/resource_ibm_pi_network_security_group.go new file mode 100644 index 0000000000..b92c8a8e45 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group.go @@ -0,0 +1,384 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +func ResourceIBMPINetworkSecurityGroup() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkSecurityGroupCreate, + ReadContext: resourceIBMPINetworkSecurityGroupRead, + UpdateContext: resourceIBMPINetworkSecurityGroupUpdate, + DeleteContext: resourceIBMPINetworkSecurityGroupDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + CustomizeDiff: customdiff.Sequence( + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + return flex.ResourceTagsCustomizeDiff(diff) + }, + ), + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_Name: { + Description: "The name of the network security group.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_UserTags: { + Description: "The user tags associated with this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The network security group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IPv4 addresses and, or network interfaces in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the member in a network security group.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of a network interface included if the type is network-interface.", + Type: schema.TypeString, + }, + Attr_Target: { + Computed: true, + Description: "If ipv4-address type, then IPv4 address or if network-interface type, then network interface ID.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of member.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_NetworkSecurityGroupID: { + Computed: true, + Description: "The unique identifier of the network security group.", + Type: schema.TypeString, + }, + Attr_Rules: { + Computed: true, + Description: "The list of rules in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Action: { + Computed: true, + Description: "The action to take if the rule matches network traffic.", + Type: schema.TypeString, + }, + Attr_DestinationPort: { + Computed: true, + Description: "The list of destination port.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_ID: { + Computed: true, + Description: "The ID of the rule in a network security group.", + Type: schema.TypeString, + }, + Attr_Protocol: { + Computed: true, + Description: "The list of protocol.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ICMPType: { + Computed: true, + Description: "If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched.", + Type: schema.TypeString, + }, + Attr_TCPFlags: { + Computed: true, + Description: "If tcp type, the list of TCP flags and if not present then all flags are matched.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Flag: { + Computed: true, + Description: "TCP flag.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Type: { + Computed: true, + Description: "The protocol of the network traffic.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Remote: { + Computed: true, + Description: "List of remote.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the remote Network Address Group or network security group the rules apply to. Not required for default-network-address-group.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of remote group the rules apply to.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SourcePort: { + Computed: true, + Description: "ist of source port", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + }, + } +} + +func resourceIBMPINetworkSecurityGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + name := d.Get(Arg_Name).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + + body := &models.NetworkSecurityGroupCreate{ + Name: &name, + } + if v, ok := d.GetOk(Arg_UserTags); ok { + userTags := flex.FlattenSet(v.(*schema.Set)) + body.UserTags = userTags + } + + networkSecurityGroup, err := nsgClient.Create(body) + if err != nil { + return diag.FromErr(err) + } + if _, ok := d.GetOk(Arg_UserTags); ok { + if networkSecurityGroup.Crn != nil { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(*networkSecurityGroup.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi network security group (%s) pi_user_tags during creation: %s", *networkSecurityGroup.ID, err) + } + } + } + nsgID := *networkSecurityGroup.ID + d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, nsgID)) + + return resourceIBMPINetworkSecurityGroupRead(ctx, d, meta) +} + +func resourceIBMPINetworkSecurityGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + cloudInstanceID, nsgID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + networkSecurityGroup, err := nsgClient.Get(nsgID) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), NotFound) { + d.SetId("") + return nil + } + return diag.FromErr(err) + } + d.Set(Arg_Name, networkSecurityGroup.Name) + crn := networkSecurityGroup.Crn + if crn != nil { + d.Set(Attr_CRN, networkSecurityGroup.Crn) + userTags, err := flex.GetGlobalTagsUsingCRN(meta, string(*networkSecurityGroup.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of network security group (%s) pi_user_tags: %s", nsgID, err) + } + d.Set(Arg_UserTags, userTags) + } + + if len(networkSecurityGroup.Members) > 0 { + members := []map[string]interface{}{} + for _, mbr := range networkSecurityGroup.Members { + mbrMap := networkSecurityGroupMemberToMap(mbr) + members = append(members, mbrMap) + } + d.Set(Attr_Members, members) + } else { + d.Set(Attr_Members, []string{}) + } + + d.Set(Attr_NetworkSecurityGroupID, networkSecurityGroup.ID) + + if len(networkSecurityGroup.Rules) > 0 { + rules := []map[string]interface{}{} + for _, rule := range networkSecurityGroup.Rules { + ruleMap := networkSecurityGroupRuleToMap(rule) + rules = append(rules, ruleMap) + } + d.Set(Attr_Rules, rules) + } else { + d.Set(Attr_Rules, []string{}) + } + + return nil +} + +func resourceIBMPINetworkSecurityGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + cloudInstanceID, nsgID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi network security group (%s) pi_user_tags: %s", nsgID, err) + } + } + } + if d.HasChange(Arg_Name) { + body := &models.NetworkSecurityGroupUpdate{ + Name: d.Get(Arg_Name).(string), + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + _, err = nsgClient.Update(nsgID, body) + if err != nil { + return diag.FromErr(err) + } + } + return resourceIBMPINetworkSecurityGroupRead(ctx, d, meta) +} + +func resourceIBMPINetworkSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + cloudInstanceID, nsgID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + err = nsgClient.Delete(nsgID) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkSecurityGroupDeleted(ctx, nsgClient, nsgID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + + return nil +} +func isWaitForIBMPINetworkSecurityGroupDeleted(ctx context.Context, client *instance.IBMPINetworkSecurityGroupClient, nsgID string, timeout time.Duration) (interface{}, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Deleting}, + Target: []string{State_NotFound}, + Refresh: isIBMPINetworkSecurityGroupDeleteRefreshFunc(client, nsgID), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + Timeout: timeout, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPINetworkSecurityGroupDeleteRefreshFunc(client *instance.IBMPINetworkSecurityGroupClient, nsgID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + nsg, err := client.Get(nsgID) + if err != nil { + return nsg, State_NotFound, nil + } + return nsg, State_Deleting, nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_action.go b/ibm/service/power/resource_ibm_pi_network_security_group_action.go new file mode 100644 index 0000000000..9fc22b13b1 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_action.go @@ -0,0 +1,198 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" +) + +func ResourceIBMPINetworkSecurityGroupAction() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkSecurityGroupActionCreate, + ReadContext: resourceIBMPINetworkSecurityGroupActionRead, + UpdateContext: resourceIBMPINetworkSecurityGroupActionUpdate, + DeleteContext: resourceIBMPINetworkSecurityGroupActionDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_Action: { + Description: "Name of the action to take; can be enable to enable NSGs in a workspace or disable to disable NSGs in a workspace.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validate.ValidateAllowedStringValues([]string{Disable, Enable}), + }, + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + // Attribute + Attr_State: { + Computed: true, + Description: "The workspace network security group's state.", + Type: schema.TypeString, + }, + }, + } +} + +func resourceIBMPINetworkSecurityGroupActionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + action := d.Get(Arg_Action).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + wsclient := instance.NewIBMPIWorkspacesClient(ctx, sess, cloudInstanceID) + _, err = isWaitForWorkspaceActive(ctx, wsclient, cloudInstanceID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + body := &models.NetworkSecurityGroupsAction{Action: &action} + err = nsgClient.Action(body) + if err != nil { + return diag.FromErr(err) + } + + _, err = isWaitForNSGStatus(ctx, wsclient, cloudInstanceID, action, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + d.SetId(cloudInstanceID) + return resourceIBMPINetworkSecurityGroupActionRead(ctx, d, meta) +} + +func resourceIBMPINetworkSecurityGroupActionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + wsclient := instance.NewIBMPIWorkspacesClient(ctx, sess, d.Id()) + ws, err := wsclient.Get(d.Id()) + if err != nil { + return diag.FromErr(err) + } + d.Set(Attr_State, ws.Details.NetworkSecurityGroups.State) + + return nil +} +func resourceIBMPINetworkSecurityGroupActionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + if d.HasChange(Arg_Action) { + action := d.Get(Arg_Action).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + + body := &models.NetworkSecurityGroupsAction{Action: &action} + err = nsgClient.Action(body) + if err != nil { + return diag.FromErr(err) + } + wsclient := instance.NewIBMPIWorkspacesClient(ctx, sess, cloudInstanceID) + _, err = isWaitForNSGStatus(ctx, wsclient, cloudInstanceID, action, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return diag.FromErr(err) + } + } + + return resourceIBMPINetworkSecurityGroupActionRead(ctx, d, meta) +} +func resourceIBMPINetworkSecurityGroupActionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + d.SetId("") + return nil +} +func isWaitForWorkspaceActive(ctx context.Context, client *instance.IBMPIWorkspacesClient, id string, timeout time.Duration) (interface{}, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Provisioning}, + Target: []string{State_Active}, + Refresh: isWorkspaceRefreshFunc(client, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForStateContext(ctx) +} +func isWorkspaceRefreshFunc(client *instance.IBMPIWorkspacesClient, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + ws, err := client.Get(id) + if err != nil { + return nil, "", err + } + + if *(ws.Status) == State_Active { + return ws, State_Active, nil + } + if *(ws.Details.NetworkSecurityGroups.State) == State_Provisioning { + return ws, State_Provisioning, nil + } + if *(ws.Details.NetworkSecurityGroups.State) == State_Failed { + return ws, *ws.Details.NetworkSecurityGroups.State, fmt.Errorf("[ERROR] workspace network security group configuration state is:%s", *ws.Status) + } + + return ws, State_Configuring, nil + } +} +func isWaitForNSGStatus(ctx context.Context, client *instance.IBMPIWorkspacesClient, id, action string, timeout time.Duration) (interface{}, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Configuring, State_Removing}, + Target: []string{State_Active, State_Inactive}, + Refresh: isPERWorkspaceNSGRefreshFunc(client, id, action), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isPERWorkspaceNSGRefreshFunc(client *instance.IBMPIWorkspacesClient, id, action string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + ws, err := client.Get(id) + if err != nil { + return nil, "", err + } + + if *(ws.Details.NetworkSecurityGroups.State) == State_Active && action == Enable { + return ws, State_Active, nil + } + if *(ws.Details.NetworkSecurityGroups.State) == State_Inactive && action == Disable { + return ws, State_Inactive, nil + } + if *(ws.Details.NetworkSecurityGroups.State) == State_Removing { + return ws, State_Removing, nil + } + if *(ws.Details.NetworkSecurityGroups.State) == State_Error { + return ws, *ws.Details.NetworkSecurityGroups.State, fmt.Errorf("[ERROR] workspace network security group configuration failed to %s", action) + } + + return ws, State_Configuring, nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_action_test.go b/ibm/service/power/resource_ibm_pi_network_security_group_action_test.go new file mode 100644 index 0000000000..d7cb8683b2 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_action_test.go @@ -0,0 +1,44 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkSecurityGroupActionBasic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupActionConfigBasic(power.Enable), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_pi_network_security_group_action.network_security_group_action", "pi_action", power.Enable), + ), + }, + { + Config: testAccCheckIBMPINetworkSecurityGroupActionConfigBasic(power.Disable), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_pi_network_security_group_action.network_security_group_action", "pi_action", power.Disable), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkSecurityGroupActionConfigBasic(action string) string { + return fmt.Sprintf(` + resource "ibm_pi_network_security_group_action" "network_security_group_action" { + pi_action = "%[1]s" + pi_cloud_id = "%[2]s" + }`, action, acc.Pi_cloud_instance_id) +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_member.go b/ibm/service/power/resource_ibm_pi_network_security_group_member.go new file mode 100644 index 0000000000..50fd0e7614 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_member.go @@ -0,0 +1,386 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "slices" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" +) + +func ResourceIBMPINetworkSecurityGroupMember() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkSecurityGroupMemberCreate, + ReadContext: resourceIBMPINetworkSecurityGroupMemberRead, + DeleteContext: resourceIBMPINetworkSecurityGroupMemberDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_NetworkSecurityGroupID: { + Description: "network security group ID.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + }, + Arg_NetworkSecurityGroupMemberID: { + ConflictsWith: []string{Arg_Target, Arg_Type}, + Description: "network security group member ID.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + Arg_Target: { + ConflictsWith: []string{Arg_NetworkSecurityGroupMemberID}, + Description: "The target member to add. An IP4 address if ipv4-address type or a network interface ID if network-interface type.", + ForceNew: true, + Optional: true, + RequiredWith: []string{Arg_Type}, + Type: schema.TypeString, + }, + Arg_Type: { + ConflictsWith: []string{Arg_NetworkSecurityGroupMemberID}, + Description: "The type of member.", + ForceNew: true, + Optional: true, + RequiredWith: []string{Arg_Target}, + Type: schema.TypeString, + ValidateFunc: validate.ValidateAllowedStringValues([]string{IPV4_Address, Network_Interface}), + }, + // Attributes + Attr_CRN: { + Computed: true, + Description: "The network security group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IPv4 addresses and, or network interfaces in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the member in a network security group.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of a network interface included if the type is network-interface.", + Type: schema.TypeString, + }, + Attr_Target: { + Computed: true, + Description: "If ipv4-address type, then IPv4 address or if network-interface type, then network interface ID.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of member.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the network security group.", + Type: schema.TypeString, + }, + Attr_NetworkSecurityGroupMemberID: { + Computed: true, + Description: "The ID of the network security group.", + Type: schema.TypeString, + }, + Attr_Rules: { + Computed: true, + Description: "The list of rules in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Action: { + Computed: true, + Description: "The action to take if the rule matches network traffic.", + Type: schema.TypeString, + }, + Attr_DestinationPort: { + Computed: true, + Description: "The list of destination port.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_ID: { + Computed: true, + Description: "The ID of the rule in a network security group.", + Type: schema.TypeString, + }, + Attr_Protocol: { + Computed: true, + Description: "The list of protocol.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ICMPType: { + Computed: true, + Description: "IIf icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched.", + Type: schema.TypeString, + }, + Attr_TCPFlags: { + Computed: true, + Description: "If tcp type, the list of TCP flags and if not present then all flags are matched.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Flag: { + Computed: true, + Description: "TCP flag.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Type: { + Computed: true, + Description: "The protocol of the network traffic.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Remote: { + Computed: true, + Description: "List of remote.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the remote Network Address Group or network security group the rules apply to. Not required for default-network-address-group.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of remote group the rules apply to.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SourcePort: { + Computed: true, + Description: "List of source port", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Optional: true, + Type: schema.TypeList, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + } +} + +func resourceIBMPINetworkSecurityGroupMemberCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nsgID := d.Get(Arg_NetworkSecurityGroupID).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + if mbrID, ok := d.GetOk(Arg_NetworkSecurityGroupMemberID); ok { + err = nsgClient.DeleteMember(nsgID, mbrID.(string)) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkSecurityGroupMemberDeleted(ctx, nsgClient, nsgID, mbrID.(string), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, nsgID)) + } else { + target := d.Get(Arg_Target).(string) + mbrType := d.Get(Arg_Type).(string) + body := &models.NetworkSecurityGroupAddMember{ + Target: &target, + Type: &mbrType, + } + member, err := nsgClient.AddMember(nsgID, body) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", cloudInstanceID, nsgID, *member.ID)) + } + + return resourceIBMPINetworkSecurityGroupMemberRead(ctx, d, meta) +} + +func resourceIBMPINetworkSecurityGroupMemberRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, parts[0]) + networkSecurityGroup, err := nsgClient.Get(parts[1]) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), NotFound) { + d.SetId("") + return nil + } + return diag.FromErr(err) + } + + if networkSecurityGroup.Crn != nil { + d.Set(Attr_CRN, networkSecurityGroup.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkSecurityGroup.Crn)) + if err != nil { + log.Printf("Error on get of network security group (%s) user_tags: %s", parts[1], err) + } + d.Set(Arg_UserTags, userTags) + } + if len(networkSecurityGroup.Members) > 0 { + members := []map[string]interface{}{} + for _, mbr := range networkSecurityGroup.Members { + mbrMap := networkSecurityGroupMemberToMap(mbr) + members = append(members, mbrMap) + } + d.Set(Attr_Members, members) + } else { + d.Set(Attr_Members, nil) + } + d.Set(Attr_Name, networkSecurityGroup.Name) + d.Set(Attr_NetworkSecurityGroupMemberID, networkSecurityGroup.ID) + + if len(networkSecurityGroup.Rules) > 0 { + rules := []map[string]interface{}{} + for _, rule := range networkSecurityGroup.Rules { + ruleMap := networkSecurityGroupRuleToMap(rule) + rules = append(rules, ruleMap) + } + d.Set(Attr_Rules, rules) + } else { + d.Set(Attr_Rules, nil) + } + + return nil +} + +func resourceIBMPINetworkSecurityGroupMemberDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + parts, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + if len(parts) > 2 { + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, parts[0]) + err = nsgClient.DeleteMember(parts[1], parts[2]) + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkSecurityGroupMemberDeleted(ctx, nsgClient, parts[1], parts[2], d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + } + + d.SetId("") + + return nil +} +func isWaitForIBMPINetworkSecurityGroupMemberDeleted(ctx context.Context, client *instance.IBMPINetworkSecurityGroupClient, nsgID, nsgMemberID string, timeout time.Duration) (interface{}, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Deleting}, + Target: []string{State_NotFound}, + Refresh: isIBMPINetworkSecurityGroupMemberDeleteRefreshFunc(client, nsgID, nsgMemberID), + Delay: 10 * time.Second, + MinTimeout: Timeout_Active, + Timeout: timeout, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPINetworkSecurityGroupMemberDeleteRefreshFunc(client *instance.IBMPINetworkSecurityGroupClient, nsgID, nsgMemberID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + nsg, err := client.Get(nsgID) + if err != nil { + return nsg, "", err + } + var mbrIDs []string + for _, mbr := range nsg.Members { + mbrIDs = append(mbrIDs, *mbr.ID) + } + if !slices.Contains(mbrIDs, nsgMemberID) { + return nsg, State_NotFound, nil + } + return nsg, State_Deleting, nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_member_test.go b/ibm/service/power/resource_ibm_pi_network_security_group_member_test.go new file mode 100644 index 0000000000..e716bc6ff2 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_member_test.go @@ -0,0 +1,93 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkSecurityGroupMemberBasic(t *testing.T) { + + typeVar := "network-interface" + name := fmt.Sprintf("tf-nsg-name-%d", acctest.RandIntRange(10, 100)) + network_name := fmt.Sprintf("tf-nsg-network-%d", acctest.RandIntRange(10, 100)) + network_interface_name := fmt.Sprintf("tf-nsg-network-int-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupMemberConfigBasic(name, typeVar, network_name, network_interface_name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkSecurityGroupMemberExists("ibm_pi_network_security_group_member.network_security_group_member"), + resource.TestCheckResourceAttrSet("ibm_pi_network_security_group_member.network_security_group_member", power.Arg_NetworkSecurityGroupID), + resource.TestCheckResourceAttr("ibm_pi_network_security_group_member.network_security_group_member", "pi_type", typeVar), + resource.TestCheckResourceAttrSet("ibm_pi_network_security_group_member.network_security_group_member", power.Attr_Name), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkSecurityGroupMemberConfigBasic(name, typeVar, network_name, network_interface_name string) string { + return testAccCheckIBMPINetworkSecurityGroupConfigBasic(name) + fmt.Sprintf(` + resource "ibm_pi_network" "network" { + pi_cloud_instance_id = "%[1]s" + pi_network_name = "%[3]s" + pi_network_type = "vlan" + pi_cidr = "192.168.17.0/24" + } + resource "ibm_pi_network_interface" "network_interface" { + pi_cloud_instance_id = "%[1]s" + pi_name = "%[4]s" + pi_network_id = ibm_pi_network.network.network_id + depends_on = [ibm_pi_network.network] + } + resource "ibm_pi_network_security_group_member" "network_security_group_member" { + pi_cloud_instance_id = "%[1]s" + pi_network_security_group_id = ibm_pi_network_security_group.network_security_group.network_security_group_id + pi_target = ibm_pi_network_interface.network_interface.network_interface_id + pi_type = "%[2]s" + }`, acc.Pi_cloud_instance_id, typeVar, network_name, network_interface_name) +} + +func testAccCheckIBMPINetworkSecurityGroupMemberExists(n string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, nsgID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nsgClient.Get(nsgID) + if err != nil { + return err + } + return nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_rule.go b/ibm/service/power/resource_ibm_pi_network_security_group_rule.go new file mode 100644 index 0000000000..2d8461bbe4 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_rule.go @@ -0,0 +1,595 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceIBMPINetworkSecurityGroupRule() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPINetworkSecurityGroupRuleCreate, + ReadContext: resourceIBMPINetworkSecurityGroupRuleRead, + DeleteContext: resourceIBMPINetworkSecurityGroupRuleDelete, + Importer: &schema.ResourceImporter{}, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + // Arguments + Arg_Action: { + ConflictsWith: []string{Arg_NetworkSecurityGroupRuleID}, + Description: "The action to take if the rule matches network traffic.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + ValidateFunc: validate.ValidateAllowedStringValues([]string{Allow, Deny}), + }, + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_DestinationPorts: { + ConflictsWith: []string{Arg_NetworkSecurityGroupRuleID}, + Description: "Destination port ranges.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Default: 65535, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Optional: true, + Type: schema.TypeInt, + }, + Attr_Minimum: { + Default: 1, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Optional: true, + Type: schema.TypeInt, + }, + }, + }, + ForceNew: true, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + Arg_NetworkSecurityGroupID: { + Description: "The unique identifier of the network security group.", + ForceNew: true, + Required: true, + Type: schema.TypeString, + }, + Arg_NetworkSecurityGroupRuleID: { + ConflictsWith: []string{Arg_Action, Arg_DestinationPorts, Arg_Protocol, Arg_Remote, Arg_SourcePorts}, + Description: "The network security group rule id to remove.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + Arg_Protocol: { + ConflictsWith: []string{Arg_NetworkSecurityGroupRuleID}, + Description: "The protocol of the network traffic.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ICMPType: { + Description: "If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validate.ValidateAllowedStringValues([]string{All, DestinationUnreach, Echo, EchoReply, SourceQuench, TimeExceeded}), + }, + Attr_TCPFlags: { + Description: "If tcp type, the list of TCP flags and if not present then all flags are matched.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Flag: { + Description: "TCP flag.", + Required: true, + Type: schema.TypeString, + }, + }, + }, + Optional: true, + Type: schema.TypeSet, + }, + Attr_Type: { + Description: "The protocol of the network traffic.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validate.ValidateAllowedStringValues([]string{All, ICMP, TCP, UDP}), + }, + }, + }, + ForceNew: true, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + Arg_Remote: { + ConflictsWith: []string{Arg_NetworkSecurityGroupRuleID}, + Description: "The protocol of the network traffic.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Description: "The ID of the remote network address group or network security group the rules apply to. Not required for default-network-address-group.", + Optional: true, + Type: schema.TypeString, + }, + Attr_Type: { + Description: "The type of remote group (MAC addresses, IP addresses, CIDRs, external CIDRs) that are the originators of rule's network traffic to match.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validate.ValidateAllowedStringValues([]string{DefaultNAG, NAG, NSG}), + }, + }, + }, + ForceNew: true, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + Arg_SourcePorts: { + ConflictsWith: []string{Arg_NetworkSecurityGroupRuleID}, + Description: "Source port ranges.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Default: 65535, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Optional: true, + Type: schema.TypeInt, + }, + Attr_Minimum: { + Default: 1, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Optional: true, + Type: schema.TypeInt, + }, + }, + }, + ForceNew: true, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + + // Attributes + Attr_CRN: { + Computed: true, + Description: "The network security group's crn.", + Type: schema.TypeString, + }, + Attr_Members: { + Computed: true, + Description: "The list of IPv4 addresses and, or network interfaces in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the member in a network security group.", + Type: schema.TypeString, + }, + Attr_MacAddress: { + Computed: true, + Description: "The mac address of a network interface included if the type is network-interface.", + Type: schema.TypeString, + }, + Attr_Target: { + Computed: true, + Description: "If ipv4-address type, then IPv4 address or if network-interface type, then network interface ID.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of member.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the network security group.", + Type: schema.TypeString, + }, + Attr_NetworkSecurityGroupID: { + Computed: true, + Description: "The unique identifier of the network security group.", + Type: schema.TypeString, + }, + Attr_Rules: { + Computed: true, + Description: "The list of rules in the network security group.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Action: { + Computed: true, + Description: "The action to take if the rule matches network traffic.", + Type: schema.TypeString, + }, + Attr_DestinationPort: { + Computed: true, + Description: "Destination port ranges.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeInt, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeInt, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_ID: { + Computed: true, + Description: "The ID of the rule in a network security group.", + Type: schema.TypeString, + }, + Attr_Protocol: { + Computed: true, + Description: "The list of protocol.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ICMPType: { + Computed: true, + Description: "If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched.", + Type: schema.TypeString, + }, + Attr_TCPFlags: { + Computed: true, + Description: "If tcp type, the list of TCP flags and if not present then all flags are matched.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Flag: { + Computed: true, + Description: "TCP flag.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Type: { + Computed: true, + Description: "The protocol of the network traffic.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_Remote: { + Computed: true, + Description: "List of remote.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_ID: { + Computed: true, + Description: "The ID of the remote network address group or network security group the rules apply to. Not required for default-network-address-group.", + Type: schema.TypeString, + }, + Attr_Type: { + Computed: true, + Description: "The type of remote group the rules apply to.", + Type: schema.TypeString, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_SourcePort: { + Computed: true, + Description: "Source port ranges.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + Attr_Maximum: { + Computed: true, + Description: "The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number.", + Type: schema.TypeFloat, + }, + Attr_Minimum: { + Computed: true, + Description: "The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number.", + Type: schema.TypeFloat, + }, + }, + }, + Type: schema.TypeList, + }, + }, + }, + Type: schema.TypeList, + }, + Attr_UserTags: { + Computed: true, + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Type: schema.TypeSet, + }, + }, + } +} + +func resourceIBMPINetworkSecurityGroupRuleCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + nsgID := d.Get(Arg_NetworkSecurityGroupID).(string) + + if v, ok := d.GetOk(Arg_NetworkSecurityGroupRuleID); ok { + ruleID := v.(string) + err := nsgClient.DeleteRule(nsgID, ruleID) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), NotFound) { + d.SetId("") + return nil + } + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkSecurityGroupRuleRemove(ctx, nsgClient, nsgID, ruleID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, nsgID)) + } else { + action := d.Get(Arg_Action).(string) + + networkSecurityGroupAddRule := models.NetworkSecurityGroupAddRule{ + Action: &action, + } + + // Add protocol + protocol := d.Get(Arg_Protocol + ".0").(map[string]interface{}) + networkSecurityGroupAddRule.Protocol = networkSecurityGroupRuleMapToProtocol(protocol) + + // Add remote + remote := d.Get(Arg_Remote + ".0").(map[string]interface{}) + networkSecurityGroupAddRule.Remote = networkSecurityGroupRuleMapToRemote(remote) + + // Optional fields + destinationPort := d.Get(Arg_DestinationPorts + ".0").(map[string]interface{}) + networkSecurityGroupAddRule.DestinationPorts = networkSecurityGroupRuleMapToPort(destinationPort) + + sourcePort := d.Get(Arg_SourcePorts + ".0").(map[string]interface{}) + networkSecurityGroupAddRule.SourcePorts = networkSecurityGroupRuleMapToPort(sourcePort) + + networkSecurityGroup, err := nsgClient.AddRule(nsgID, &networkSecurityGroupAddRule) + ruleID := *networkSecurityGroup.ID + if err != nil { + return diag.FromErr(err) + } + _, err = isWaitForIBMPINetworkSecurityGroupRuleAdd(ctx, nsgClient, nsgID, ruleID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", cloudInstanceID, nsgID, ruleID)) + } + + return resourceIBMPINetworkSecurityGroupRuleRead(ctx, d, meta) +} + +func resourceIBMPINetworkSecurityGroupRuleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + cloudInstanceID, nsgID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + networkSecurityGroup, err := nsgClient.Get(nsgID) + if err != nil { + return diag.FromErr(err) + } + d.Set(Attr_Name, networkSecurityGroup.Name) + + if networkSecurityGroup.Crn != nil { + d.Set(Attr_CRN, networkSecurityGroup.Crn) + userTags, err := flex.GetTagsUsingCRN(meta, string(*networkSecurityGroup.Crn)) + if err != nil { + log.Printf("Error on get of network security group (%s) user_tags: %s", nsgID, err) + } + d.Set(Arg_UserTags, userTags) + } + + if len(networkSecurityGroup.Members) > 0 { + members := []map[string]interface{}{} + for _, mbr := range networkSecurityGroup.Members { + mbrMap := networkSecurityGroupMemberToMap(mbr) + members = append(members, mbrMap) + } + d.Set(Attr_Members, members) + } else { + d.Set(Attr_Members, []string{}) + } + + d.Set(Attr_NetworkSecurityGroupID, networkSecurityGroup.ID) + if len(networkSecurityGroup.Rules) > 0 { + rules := []map[string]interface{}{} + for _, rule := range networkSecurityGroup.Rules { + ruleMap := networkSecurityGroupRuleToMap(rule) + rules = append(rules, ruleMap) + } + d.Set(Attr_Rules, rules) + } else { + d.Set(Attr_Rules, []string{}) + } + return nil +} + +func resourceIBMPINetworkSecurityGroupRuleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + ids, err := flex.IdParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if len(ids) == 3 { + cloudInstanceID := ids[0] + nsgID := ids[1] + ruleID := ids[2] + + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(ctx, sess, cloudInstanceID) + + err = nsgClient.DeleteRule(nsgID, ruleID) + if err != nil { + return diag.FromErr(err) + } + + _, err = isWaitForIBMPINetworkSecurityGroupRuleRemove(ctx, nsgClient, nsgID, ruleID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + } + d.SetId("") + return nil +} + +func isWaitForIBMPINetworkSecurityGroupRuleAdd(ctx context.Context, client *instance.IBMPINetworkSecurityGroupClient, id, ruleID string, timeout time.Duration) (interface{}, error) { + + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Pending}, + Target: []string{State_Available}, + Refresh: isIBMPINetworkSecurityGroupRuleAddRefreshFunc(client, id, ruleID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: time.Minute, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPINetworkSecurityGroupRuleAddRefreshFunc(client *instance.IBMPINetworkSecurityGroupClient, id, ruleID string) retry.StateRefreshFunc { + + return func() (interface{}, string, error) { + networkSecurityGroup, err := client.Get(id) + if err != nil { + return nil, "", err + } + + if networkSecurityGroup.Rules != nil { + for _, rule := range networkSecurityGroup.Rules { + if *rule.ID == ruleID { + return networkSecurityGroup, State_Available, nil + } + + } + } + return networkSecurityGroup, State_Pending, nil + } +} + +func isWaitForIBMPINetworkSecurityGroupRuleRemove(ctx context.Context, client *instance.IBMPINetworkSecurityGroupClient, id, ruleID string, timeout time.Duration) (interface{}, error) { + + stateConf := &retry.StateChangeConf{ + Pending: []string{State_Pending}, + Target: []string{State_Removed}, + Refresh: isIBMPINetworkSecurityGroupRuleRemoveRefreshFunc(client, id, ruleID), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: time.Minute, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPINetworkSecurityGroupRuleRemoveRefreshFunc(client *instance.IBMPINetworkSecurityGroupClient, id, ruleID string) retry.StateRefreshFunc { + + return func() (interface{}, string, error) { + networkSecurityGroup, err := client.Get(id) + if err != nil { + return nil, "", err + } + + if networkSecurityGroup.Rules != nil { + foundRule := false + for _, rule := range networkSecurityGroup.Rules { + if *rule.ID == ruleID { + foundRule = true + return networkSecurityGroup, State_Pending, nil + } + } + if !foundRule { + return networkSecurityGroup, State_Removed, nil + } + } + return networkSecurityGroup, State_Pending, nil + } +} + +func networkSecurityGroupRuleMapToPort(portMap map[string]interface{}) *models.NetworkSecurityGroupRulePort { + networkSecurityGroupRulePort := models.NetworkSecurityGroupRulePort{} + if portMap[Attr_Maximum] != nil { + networkSecurityGroupRulePort.Maximum = int64(portMap[Attr_Maximum].(int)) + } + if portMap[Attr_Minimum] != nil { + networkSecurityGroupRulePort.Minimum = int64(portMap[Attr_Minimum].(int)) + } + return &networkSecurityGroupRulePort +} + +func networkSecurityGroupRuleMapToRemote(remoteMap map[string]interface{}) *models.NetworkSecurityGroupRuleRemote { + networkSecurityGroupRuleRemote := models.NetworkSecurityGroupRuleRemote{} + if remoteMap[Attr_ID].(string) != "" { + networkSecurityGroupRuleRemote.ID = remoteMap[Attr_ID].(string) + } + networkSecurityGroupRuleRemote.Type = remoteMap[Attr_Type].(string) + return &networkSecurityGroupRuleRemote +} + +func networkSecurityGroupRuleMapToProtocol(protocolMap map[string]interface{}) *models.NetworkSecurityGroupRuleProtocol { + networkSecurityGroupRuleProtocol := models.NetworkSecurityGroupRuleProtocol{} + networkSecurityGroupRuleProtocol.Type = protocolMap[Attr_Type].(string) + + if networkSecurityGroupRuleProtocol.Type == ICMP { + icmpType := protocolMap[Attr_ICMPType].(string) + networkSecurityGroupRuleProtocol.IcmpType = &icmpType + } else if networkSecurityGroupRuleProtocol.Type == TCP { + tcpMaps := protocolMap[Attr_TCPFlags].(*schema.Set) + networkSecurityGroupRuleProtocolTCPFlagArray := []*models.NetworkSecurityGroupRuleProtocolTCPFlag{} + for _, tcpMap := range tcpMaps.List() { + flag := tcpMap.(map[string]interface{}) + networkSecurityGroupRuleProtocolTCPFlag := models.NetworkSecurityGroupRuleProtocolTCPFlag{} + networkSecurityGroupRuleProtocolTCPFlag.Flag = flag[Attr_Flag].(string) + networkSecurityGroupRuleProtocolTCPFlagArray = append(networkSecurityGroupRuleProtocolTCPFlagArray, &networkSecurityGroupRuleProtocolTCPFlag) + } + networkSecurityGroupRuleProtocol.TCPFlags = networkSecurityGroupRuleProtocolTCPFlagArray + } + + return &networkSecurityGroupRuleProtocol +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_rule_test.go b/ibm/service/power/resource_ibm_pi_network_security_group_rule_test.go new file mode 100644 index 0000000000..f6234b2abf --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_rule_test.go @@ -0,0 +1,191 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkSecurityGroupRuleBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupRuleConfigAddRule(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkSecurityGroupRuleExists("ibm_pi_network_security_group_rule.network_security_group_rule"), + resource.TestCheckResourceAttrSet("ibm_pi_network_security_group_rule.network_security_group_rule", power.Arg_NetworkSecurityGroupID), + ), + }, + }, + }) +} + +func TestAccIBMPINetworkSecurityGroupRuleTCP(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupRuleConfigAddRuleTCP(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkSecurityGroupRuleExists("ibm_pi_network_security_group_rule.network_security_group_rule"), + resource.TestCheckResourceAttrSet("ibm_pi_network_security_group_rule.network_security_group_rule", power.Arg_NetworkSecurityGroupID), + ), + }, + }, + }) +} + +func TestAccIBMPINetworkSecurityGroupRuleRemove(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupRuleConfigRemoveRule(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkSecurityGroupRuleRemoved("ibm_pi_network_security_group_rule.network_security_group_rule", acc.Pi_network_security_group_rule_id), + resource.TestCheckResourceAttrSet("ibm_pi_network_security_group_rule.network_security_group_rule", power.Arg_NetworkSecurityGroupID), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkSecurityGroupRuleConfigAddRule() string { + return fmt.Sprintf(` + resource "ibm_pi_network_security_group_rule" "network_security_group_rule" { + pi_cloud_instance_id = "%[1]s" + pi_network_security_group_id = "%[2]s" + pi_action = "allow" + pi_protocol { + type = "all" + } + pi_remote { + id = "%[3]s" + type = "%[4]s" + } + }`, acc.Pi_cloud_instance_id, acc.Pi_network_security_group_id, acc.Pi_remote_id, acc.Pi_remote_type) +} + +func testAccCheckIBMPINetworkSecurityGroupRuleConfigAddRuleTCP() string { + return fmt.Sprintf(` + resource "ibm_pi_network_security_group_rule" "network_security_group_rule" { + pi_cloud_instance_id = "%[1]s" + pi_network_security_group_id = "%[2]s" + pi_action = "allow" + pi_destination_ports { + minimum = 1200 + maximum = 37466 + } + pi_source_ports { + minimum = 1000 + maximum = 19500 + } + pi_protocol { + tcp_flags { + flag = "ack" + } + tcp_flags { + flag = "syn" + } + tcp_flags { + flag = "psh" + } + type = "tcp" + } + pi_remote { + id = "%[3]s" + type = "%[4]s" + } + }`, acc.Pi_cloud_instance_id, acc.Pi_network_security_group_id, acc.Pi_remote_id, acc.Pi_remote_type) +} + +func testAccCheckIBMPINetworkSecurityGroupRuleConfigRemoveRule() string { + return fmt.Sprintf(` + resource "ibm_pi_network_security_group_rule" "network_security_group_rule" { + pi_cloud_instance_id = "%[1]s" + pi_network_security_group_id = "%[2]s" + pi_network_security_group_rule_id = "%[3]s" + }`, acc.Pi_cloud_instance_id, acc.Pi_network_security_group_id, acc.Pi_network_security_group_rule_id) +} + +func testAccCheckIBMPINetworkSecurityGroupRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, nsgID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nsgClient.Get(nsgID) + if err != nil { + return err + } + return nil + } +} + +func testAccCheckIBMPINetworkSecurityGroupRuleRemoved(n string, ruleID string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, nsgID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(context.Background(), sess, cloudInstanceID) + networkSecurityGroup, err := nsgClient.Get(nsgID) + if err != nil { + return err + } + foundRule := false + if networkSecurityGroup.Rules != nil { + for _, rule := range networkSecurityGroup.Rules { + if *rule.ID == ruleID { + foundRule = true + break + } + } + } + if foundRule { + return fmt.Errorf("NSG rule still exists") + } + return nil + } +} diff --git a/ibm/service/power/resource_ibm_pi_network_security_group_test.go b/ibm/service/power/resource_ibm_pi_network_security_group_test.go new file mode 100644 index 0000000000..233c8dcdf4 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_network_security_group_test.go @@ -0,0 +1,106 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" +) + +func TestAccIBMPINetworkSecurityGroupBasic(t *testing.T) { + name := fmt.Sprintf("tf-nsg-name-%d", acctest.RandIntRange(10, 100)) + nameUpdate := fmt.Sprintf("tf-nsg-name-update-%d", acctest.RandIntRange(10, 100)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPINetworkSecurityGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkSecurityGroupConfigBasic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMPINetworkSecurityGroupExists("ibm_pi_network_security_group.network_security_group"), + resource.TestCheckResourceAttr("ibm_pi_network_security_group.network_security_group", power.Arg_Name, name), + ), + }, + { + Config: testAccCheckIBMPINetworkSecurityGroupConfigBasic(nameUpdate), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_pi_network_security_group.network_security_group", power.Arg_Name, nameUpdate), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkSecurityGroupConfigBasic(name string) string { + return fmt.Sprintf(` + resource "ibm_pi_network_security_group" "network_security_group" { + pi_cloud_instance_id = "%[1]s" + pi_name = "%[2]s" + pi_user_tags = ["tag:test"] + }`, acc.Pi_cloud_instance_id, name) +} + +func testAccCheckIBMPINetworkSecurityGroupExists(n string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, nsgID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nsgClient.Get(nsgID) + if err != nil { + return err + } + return nil + } +} + +func testAccCheckIBMPINetworkSecurityGroupDestroy(s *terraform.State) error { + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_pi_network_security_group" { + continue + } + + cloudInstanceID, nsgID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + nsgClient := instance.NewIBMIPINetworkSecurityGroupClient(context.Background(), sess, cloudInstanceID) + _, err = nsgClient.Get(nsgID) + if err == nil { + return fmt.Errorf("network_security_group still exists: %s", rs.Primary.ID) + } + } + + return nil +} diff --git a/ibm/service/power/resource_ibm_pi_network_test.go b/ibm/service/power/resource_ibm_pi_network_test.go index cc55a7af24..b62dac6513 100644 --- a/ibm/service/power/resource_ibm_pi_network_test.go +++ b/ibm/service/power/resource_ibm_pi_network_test.go @@ -21,6 +21,7 @@ import ( func TestAccIBMPINetworkbasic(t *testing.T) { name := fmt.Sprintf("tf-pi-network-%d", acctest.RandIntRange(10, 100)) + networkRes := "ibm_pi_network.power_networks" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -29,25 +30,22 @@ func TestAccIBMPINetworkbasic(t *testing.T) { { Config: testAccCheckIBMPINetworkConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckIBMPINetworkExists("ibm_pi_network.power_networks"), - resource.TestCheckResourceAttr( - "ibm_pi_network.power_networks", "pi_network_name", name), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "id"), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "pi_gateway"), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "pi_ipaddress_range.#"), + testAccCheckIBMPINetworkExists(networkRes), + resource.TestCheckResourceAttr(networkRes, "pi_network_name", name), + resource.TestCheckResourceAttrSet(networkRes, "id"), + resource.TestCheckResourceAttrSet(networkRes, "pi_gateway"), + resource.TestCheckResourceAttrSet(networkRes, "pi_ipaddress_range.#"), ), }, { Config: testAccCheckIBMPINetworkConfigUpdateDNS(name), Check: resource.ComposeTestCheckFunc( - testAccCheckIBMPINetworkExists("ibm_pi_network.power_networks"), - resource.TestCheckResourceAttr( - "ibm_pi_network.power_networks", "pi_network_name", name), - resource.TestCheckResourceAttr( - "ibm_pi_network.power_networks", "pi_dns.#", "1"), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "id"), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "pi_gateway"), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "pi_ipaddress_range.#"), + testAccCheckIBMPINetworkExists(networkRes), + resource.TestCheckResourceAttr(networkRes, "pi_network_name", name), + resource.TestCheckResourceAttr(networkRes, "pi_dns.#", "1"), + resource.TestCheckResourceAttrSet(networkRes, "id"), + resource.TestCheckResourceAttrSet(networkRes, "pi_gateway"), + resource.TestCheckResourceAttrSet(networkRes, "pi_ipaddress_range.#"), ), }, }, @@ -102,7 +100,6 @@ func TestAccIBMPINetworkGatewaybasicSatellite(t *testing.T) { testAccCheckIBMPINetworkExists("ibm_pi_network.power_networks"), resource.TestCheckResourceAttr( "ibm_pi_network.power_networks", "pi_network_name", name), - resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "pi_gateway"), resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "id"), resource.TestCheckResourceAttrSet("ibm_pi_network.power_networks", "pi_ipaddress_range.#"), ), @@ -113,8 +110,6 @@ func TestAccIBMPINetworkGatewaybasicSatellite(t *testing.T) { testAccCheckIBMPINetworkExists("ibm_pi_network.power_networks"), resource.TestCheckResourceAttr( "ibm_pi_network.power_networks", "pi_network_name", name), - resource.TestCheckResourceAttr( - "ibm_pi_network.power_networks", "pi_gateway", "192.168.17.2"), resource.TestCheckResourceAttr( "ibm_pi_network.power_networks", "pi_ipaddress_range.0.pi_ending_ip_address", "192.168.17.254"), resource.TestCheckResourceAttr( @@ -157,6 +152,43 @@ func TestAccIBMPINetworkDHCPbasic(t *testing.T) { }) } +func TestAccIBMPINetworkUserTags(t *testing.T) { + name := fmt.Sprintf("tf-pi-network-%d", acctest.RandIntRange(10, 100)) + networkRes := "ibm_pi_network.power_networks" + userTagsString := `["env:dev","test_tag"]` + userTagsStringUpdated := `["env:dev","test_tag","test_tag2"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPINetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkUserTagsConfig(name, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPINetworkExists(networkRes), + resource.TestCheckResourceAttr(networkRes, "pi_network_name", name), + resource.TestCheckResourceAttrSet(networkRes, "id"), + resource.TestCheckResourceAttr(networkRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(networkRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(networkRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPINetworkUserTagsConfig(name, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPINetworkExists(networkRes), + resource.TestCheckResourceAttr(networkRes, "pi_network_name", name), + resource.TestCheckResourceAttrSet(networkRes, "id"), + resource.TestCheckResourceAttr(networkRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(networkRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(networkRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(networkRes, "pi_user_tags.*", "test_tag2"), + ), + }, + }, + }) +} + func testAccCheckIBMPINetworkDestroy(s *terraform.State) error { sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() if err != nil { @@ -263,7 +295,6 @@ func testAccCheckIBMPINetworkConfigGatewayUpdateDNS(name string) string { pi_network_name = "%s" pi_network_type = "vlan" pi_dns = ["127.0.0.1"] - pi_gateway = "192.168.17.2" pi_cidr = "192.168.17.0/24" pi_ipaddress_range { pi_ending_ip_address = "192.168.17.254" @@ -296,3 +327,14 @@ func testAccCheckIBMPINetworkConfigGatewayDHCPUpdateDNS(name string) string { } `, acc.Pi_cloud_instance_id, name) } + +func testAccCheckIBMPINetworkUserTagsConfig(name string, userTagsString string) string { + return fmt.Sprintf(` + resource "ibm_pi_network" "power_networks" { + pi_cloud_instance_id = "%s" + pi_network_name = "%s" + pi_network_type = "pub-vlan" + pi_user_tags = %s + } + `, acc.Pi_cloud_instance_id, name, userTagsString) +} diff --git a/ibm/service/power/resource_ibm_pi_shared_processor_pool.go b/ibm/service/power/resource_ibm_pi_shared_processor_pool.go index 5bc4aa2d33..a636931b90 100644 --- a/ibm/service/power/resource_ibm_pi_shared_processor_pool.go +++ b/ibm/service/power/resource_ibm_pi_shared_processor_pool.go @@ -72,8 +72,20 @@ func ResourceIBMPISharedProcessorPool() *schema.Resource { Optional: true, Description: "Placement group the shared processor pool is created in", }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, // Attributes + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_SharedProcessorPoolID: { Type: schema.TypeString, Computed: true, @@ -193,6 +205,9 @@ func resourceIBMPISharedProcessorPoolCreate(ctx context.Context, d *schema.Resou if pg, ok := d.GetOk(Arg_SharedProcessorPoolPlacementGroupID); ok { body.PlacementGroupID = pg.(string) } + if tags, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(tags.(*schema.Set)) + } spp, err := client.Create(body) if err != nil || spp == nil { @@ -206,6 +221,16 @@ func resourceIBMPISharedProcessorPoolCreate(ctx context.Context, d *schema.Resou return diag.FromErr(err) } + if _, ok := d.GetOk(Arg_UserTags); ok { + if spp.Crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(spp.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi shared processor pool (%s) pi_user_tags during creation: %s", *spp.ID, err) + } + } + } + return resourceIBMPISharedProcessorPoolRead(ctx, d, meta) } @@ -265,6 +290,14 @@ func resourceIBMPISharedProcessorPoolRead(ctx context.Context, d *schema.Resourc } d.Set(Arg_CloudInstanceID, cloudInstanceID) + if response.SharedProcessorPool.Crn != "" { + d.Set(Attr_CRN, response.SharedProcessorPool.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(response.SharedProcessorPool.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi shared processor pool (%s) pi_user_tags: %s", *response.SharedProcessorPool.ID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set(Arg_SharedProcessorPoolHostGroup, response.SharedProcessorPool.HostGroup) if response.SharedProcessorPool.Name != nil { @@ -395,6 +428,16 @@ func resourceIBMPISharedProcessorPoolUpdate(ctx context.Context, d *schema.Resou } } + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi shared processor pool (%s) pi_user_tags: %s", sppID, err) + } + } + } + return resourceIBMPISharedProcessorPoolRead(ctx, d, meta) } diff --git a/ibm/service/power/resource_ibm_pi_shared_processor_pool_test.go b/ibm/service/power/resource_ibm_pi_shared_processor_pool_test.go new file mode 100644 index 0000000000..1e7e572b63 --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_shared_processor_pool_test.go @@ -0,0 +1,149 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccIBMPISPPbasic(t *testing.T) { + name := fmt.Sprintf("tf_pi_spp_%d", acctest.RandIntRange(10, 100)) + sppRes := "ibm_pi_shared_processor_pool.power_shared_processor_pool" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPISPPDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPISPPConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPISPPExists(sppRes), + resource.TestCheckResourceAttr(sppRes, "pi_shared_processor_pool_name", name), + ), + }, + }, + }) +} + +func TestAccIBMPISPPUserTags(t *testing.T) { + name := fmt.Sprintf("tf_pi_spp_%d", acctest.RandIntRange(10, 100)) + sppRes := "ibm_pi_shared_processor_pool.power_shared_processor_pool" + userTagsString := `["env:dev","test_tag"]` + userTagsStringUpdated := `["env:dev","test_tag","test_tag2"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPISPPDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPISPPUserTagsConfig(name, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPISPPExists(sppRes), + resource.TestCheckResourceAttr(sppRes, "pi_shared_processor_pool_name", name), + resource.TestCheckResourceAttr(sppRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(sppRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(sppRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPISPPUserTagsConfig(name, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPISPPExists(sppRes), + resource.TestCheckResourceAttr(sppRes, "pi_shared_processor_pool_name", name), + resource.TestCheckResourceAttr(sppRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(sppRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(sppRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(sppRes, "pi_user_tags.*", "test_tag2"), + ), + }, + }, + }) +} + +func testAccCheckIBMPISPPDestroy(s *terraform.State) error { + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_pi_shared_processor_pool" { + continue + } + cloudInstanceID, sppID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + sppC := instance.NewIBMPISharedProcessorPoolClient(context.Background(), sess, cloudInstanceID) + spp, err := sppC.Get(sppID) + if err == nil { + return fmt.Errorf("PI SPP still exists: %s", *spp.SharedProcessorPool.ID) + } + } + + return nil +} + +func testAccCheckIBMPISPPExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + cloudInstanceID, sppID, err := splitID(rs.Primary.ID) + if err != nil { + return err + } + client := instance.NewIBMPISharedProcessorPoolClient(context.Background(), sess, cloudInstanceID) + + _, err = client.Get(sppID) + if err != nil { + return err + } + return nil + } +} + +func testAccCheckIBMPISPPConfig(name string) string { + return fmt.Sprintf(` + resource "ibm_pi_shared_processor_pool" "power_shared_processor_pool" { + pi_cloud_instance_id = "%[2]s" + pi_shared_processor_pool_host_group = "s922" + pi_shared_processor_pool_name = "%[1]s" + pi_shared_processor_pool_reserved_cores = "1" + }`, name, acc.Pi_cloud_instance_id) +} + +func testAccCheckIBMPISPPUserTagsConfig(name string, userTagsString string) string { + return fmt.Sprintf(` + resource "ibm_pi_shared_processor_pool" "power_shared_processor_pool" { + pi_cloud_instance_id = "%[2]s" + pi_shared_processor_pool_host_group = "s922" + pi_shared_processor_pool_name = "%[1]s" + pi_shared_processor_pool_reserved_cores = "1" + pi_user_tags = %[3]s + + }`, name, acc.Pi_cloud_instance_id, userTagsString) +} diff --git a/ibm/service/power/resource_ibm_pi_snapshot.go b/ibm/service/power/resource_ibm_pi_snapshot.go index 37b8d232a6..7db802f492 100644 --- a/ibm/service/power/resource_ibm_pi_snapshot.go +++ b/ibm/service/power/resource_ibm_pi_snapshot.go @@ -58,6 +58,13 @@ func ResourceIBMPISnapshot() *schema.Resource { Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, Arg_VolumeIDs: { Description: "A list of volume IDs of the instance that will be part of the snapshot. If none are provided, then all the volumes of the instance will be part of the snapshot.", DiffSuppressFunc: flex.ApplyOnce, @@ -73,6 +80,11 @@ func ResourceIBMPISnapshot() *schema.Resource { Description: "Creation date of the snapshot.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_LastUpdateDate: { Computed: true, Description: "The last updated date of the snapshot.", @@ -123,6 +135,10 @@ func resourceIBMPISnapshotCreate(ctx context.Context, d *schema.ResourceData, me log.Printf("no volumeids provided. Will snapshot the entire instance") } + if v, ok := d.GetOk(Arg_UserTags); ok { + snapshotBody.UserTags = flex.FlattenSet(v.(*schema.Set)) + } + snapshotResponse, err := client.CreatePvmSnapShot(instanceid, snapshotBody) if err != nil { log.Printf("[DEBUG] err %s", err) @@ -137,6 +153,16 @@ func resourceIBMPISnapshotCreate(ctx context.Context, d *schema.ResourceData, me return diag.FromErr(err) } + if _, ok := d.GetOk(Arg_UserTags); ok { + if snapshotResponse.Crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(snapshotResponse.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi snapshot (%s) pi_user_tags during creation: %s", *snapshotResponse.SnapshotID, err) + } + } + } + return resourceIBMPISnapshotRead(ctx, d, meta) } @@ -160,6 +186,14 @@ func resourceIBMPISnapshotRead(ctx context.Context, d *schema.ResourceData, meta d.Set(Arg_SnapShotName, snapshotdata.Name) d.Set(Attr_CreationDate, snapshotdata.CreationDate.String()) + if snapshotdata.Crn != "" { + d.Set(Attr_CRN, snapshotdata.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(snapshotdata.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of pi snapshot (%s) pi_user_tags: %s", *snapshotdata.SnapshotID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set(Attr_LastUpdateDate, snapshotdata.LastUpdateDate.String()) d.Set(Attr_SnapshotID, *snapshotdata.SnapshotID) d.Set(Attr_Status, snapshotdata.Status) @@ -198,6 +232,16 @@ func resourceIBMPISnapshotUpdate(ctx context.Context, d *schema.ResourceData, me } } + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi snapshot (%s) pi_user_tags: %s", snapshotID, err) + } + } + } + return resourceIBMPISnapshotRead(ctx, d, meta) } diff --git a/ibm/service/power/resource_ibm_pi_snapshot_test.go b/ibm/service/power/resource_ibm_pi_snapshot_test.go index 82d9bdfe25..fca0ff5ea1 100644 --- a/ibm/service/power/resource_ibm_pi_snapshot_test.go +++ b/ibm/service/power/resource_ibm_pi_snapshot_test.go @@ -40,6 +40,45 @@ func TestAccIBMPIInstanceSnapshotbasic(t *testing.T) { }) } +func TestAccIBMPIInstanceSnapshotUserTags(t *testing.T) { + name := fmt.Sprintf("tf-pi-instance-snapshot-%d", acctest.RandIntRange(10, 100)) + snapshotRes := "ibm_pi_snapshot.power_snapshot" + userTagsString := `["env:dev","test_tag"]` + userTagsStringUpdated := `["env:dev","test_tag","test_tag2"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIInstanceSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIInstanceSnapshotUserTagsConfig(name, power.OK, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIInstanceSnapshotExists(snapshotRes), + resource.TestCheckResourceAttr(snapshotRes, "pi_snap_shot_name", name), + resource.TestCheckResourceAttr(snapshotRes, "status", power.State_Available), + resource.TestCheckResourceAttrSet(snapshotRes, "id"), + resource.TestCheckResourceAttr(snapshotRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(snapshotRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(snapshotRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPIInstanceSnapshotUserTagsConfig(name, power.OK, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIInstanceSnapshotExists(snapshotRes), + resource.TestCheckResourceAttr(snapshotRes, "pi_snap_shot_name", name), + resource.TestCheckResourceAttr(snapshotRes, "status", power.State_Available), + resource.TestCheckResourceAttrSet(snapshotRes, "id"), + resource.TestCheckResourceAttr(snapshotRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(snapshotRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(snapshotRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(snapshotRes, "pi_user_tags.*", "test_tag2"), + ), + }, + }, + }) +} + func testAccCheckIBMPIInstanceSnapshotDestroy(s *terraform.State) error { sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() if err != nil { @@ -103,3 +142,15 @@ func testAccCheckIBMPIInstanceSnapshotConfig(name, healthStatus string) string { pi_volume_ids = [ibm_pi_volume.power_volume.volume_id] }`, acc.Pi_cloud_instance_id, name) } + +func testAccCheckIBMPIInstanceSnapshotUserTagsConfig(name, healthStatus string, userTagsString string) string { + return testAccCheckIBMPIInstanceConfig(name, healthStatus) + fmt.Sprintf(` + resource "ibm_pi_snapshot" "power_snapshot"{ + depends_on=[ibm_pi_instance.power_instance] + pi_instance_name = ibm_pi_instance.power_instance.pi_instance_name + pi_cloud_instance_id = "%s" + pi_snap_shot_name = "%s" + pi_user_tags = %s + pi_volume_ids = [ibm_pi_volume.power_volume.volume_id] + }`, acc.Pi_cloud_instance_id, name, userTagsString) +} diff --git a/ibm/service/power/resource_ibm_pi_volume.go b/ibm/service/power/resource_ibm_pi_volume.go index 7810c6f67e..365de4a32c 100644 --- a/ibm/service/power/resource_ibm_pi_volume.go +++ b/ibm/service/power/resource_ibm_pi_volume.go @@ -87,6 +87,21 @@ func ResourceIBMPIVolume() *schema.Resource { Optional: true, Type: schema.TypeBool, }, + Arg_ReplicationSites: { + Description: "List of replication sites for volume replication.", + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, Arg_VolumeName: { Description: "The name of the volume.", Required: true, @@ -136,6 +151,11 @@ func ResourceIBMPIVolume() *schema.Resource { Description: "The consistency group name if volume is a part of volume group.", Type: schema.TypeString, }, + Attr_CRN: { + Computed: true, + Description: "The CRN of this resource.", + Type: schema.TypeString, + }, Attr_DeleteOnTermination: { Computed: true, Description: "Indicates if the volume should be deleted when the server terminates.", @@ -171,6 +191,12 @@ func ResourceIBMPIVolume() *schema.Resource { Description: "The replication status of the volume.", Type: schema.TypeString, }, + Attr_ReplicationSites: { + Computed: true, + Description: "List of replication sites for volume replication.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_ReplicationType: { Computed: true, Description: "The replication type of the volume 'metro' or 'global'.", @@ -240,6 +266,13 @@ func resourceIBMPIVolumeCreate(ctx context.Context, d *schema.ResourceData, meta replicationEnabled := v.(bool) body.ReplicationEnabled = &replicationEnabled } + if v, ok := d.GetOk(Arg_ReplicationSites); ok { + if d.Get(Arg_ReplicationEnabled).(bool) { + body.ReplicationSites = flex.FlattenSet(v.(*schema.Set)) + } else { + return diag.Errorf("Replication (%s) must be enabled if replication sites are specified.", Arg_ReplicationEnabled) + } + } if ap, ok := d.GetOk(Arg_AffinityPolicy); ok { policy := ap.(string) body.AffinityPolicy = &policy @@ -265,6 +298,9 @@ func resourceIBMPIVolumeCreate(ctx context.Context, d *schema.ResourceData, meta } } + if v, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(v.(*schema.Set)) + } client := instance.NewIBMPIVolumeClient(ctx, sess, cloudInstanceID) vol, err := client.CreateVolume(body) @@ -280,6 +316,14 @@ func resourceIBMPIVolumeCreate(ctx context.Context, d *schema.ResourceData, meta return diag.FromErr(err) } + if _, ok := d.GetOk(Arg_UserTags); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, string(vol.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on update of volume (%s) pi_user_tags during creation: %s", volumeid, err) + } + } + return resourceIBMPIVolumeRead(ctx, d, meta) } @@ -304,6 +348,14 @@ func resourceIBMPIVolumeRead(ctx context.Context, d *schema.ResourceData, meta i if vol.VolumeID != nil { d.Set(Attr_VolumeID, vol.VolumeID) } + if vol.Crn != "" { + d.Set(Attr_CRN, vol.Crn) + tags, err := flex.GetGlobalTagsUsingCRN(meta, string(vol.Crn), "", UserTagType) + if err != nil { + log.Printf("Error on get of volume (%s) pi_user_tags: %s", *vol.VolumeID, err) + } + d.Set(Arg_UserTags, tags) + } d.Set(Arg_VolumeName, vol.Name) d.Set(Arg_VolumePool, vol.VolumePool) if vol.Shareable != nil { @@ -324,6 +376,7 @@ func resourceIBMPIVolumeRead(ctx context.Context, d *schema.ResourceData, meta i d.Set(Attr_MirroringState, vol.MirroringState) d.Set(Attr_PrimaryRole, vol.PrimaryRole) d.Set(Arg_ReplicationEnabled, vol.ReplicationEnabled) + d.Set(Attr_ReplicationSites, vol.ReplicationSites) d.Set(Attr_ReplicationStatus, vol.ReplicationStatus) d.Set(Attr_ReplicationType, vol.ReplicationType) d.Set(Attr_VolumeStatus, vol.State) @@ -383,6 +436,16 @@ func resourceIBMPIVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } } + if d.HasChange(Arg_UserTags) { + crn := d.Get(Attr_CRN) + if crn != nil && crn != "" { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of pi volume (%s) pi_user_tags: %s", volumeID, err) + } + } + } return resourceIBMPIVolumeRead(ctx, d, meta) } diff --git a/ibm/service/power/resource_ibm_pi_volume_clone.go b/ibm/service/power/resource_ibm_pi_volume_clone.go index f2a1d33ea2..37bd407c8b 100644 --- a/ibm/service/power/resource_ibm_pi_volume_clone.go +++ b/ibm/service/power/resource_ibm_pi_volume_clone.go @@ -52,6 +52,14 @@ func ResourceIBMPIVolumeClone() *schema.Resource { Optional: true, Type: schema.TypeBool, }, + Arg_UserTags: { + Description: "The user tags attached to this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, Arg_VolumeCloneName: { Description: "The base name of the newly cloned volume(s).", ForceNew: true, @@ -135,6 +143,10 @@ func resourceIBMPIVolumeCloneCreate(ctx context.Context, d *schema.ResourceData, body.TargetReplicationEnabled = flex.PtrToBool(d.Get(Arg_ReplicationEnabled).(bool)) } + if v, ok := d.GetOk(Arg_UserTags); ok { + body.UserTags = flex.FlattenSet(v.(*schema.Set)) + } + client := instance.NewIBMPICloneVolumeClient(ctx, sess, cloudInstanceID) volClone, err := client.Create(body) if err != nil { diff --git a/ibm/service/power/resource_ibm_pi_volume_group.go b/ibm/service/power/resource_ibm_pi_volume_group.go index a1031973fd..c98497d476 100644 --- a/ibm/service/power/resource_ibm_pi_volume_group.go +++ b/ibm/service/power/resource_ibm_pi_volume_group.go @@ -68,6 +68,12 @@ func ResourceIBMPIVolumeGroup() *schema.Resource { Description: "Consistency Group Name if volume is a part of volume group", Type: schema.TypeString, }, + Attr_ReplicationSites: { + Computed: true, + Description: "Indicates the replication sites of the volume group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, Attr_ReplicationStatus: { Computed: true, Description: "Volume Group Replication Status", @@ -168,6 +174,7 @@ func resourceIBMPIVolumeGroupRead(ctx context.Context, d *schema.ResourceData, m d.Set(Arg_VolumeGroupName, vg.Name) d.Set(Arg_VolumeIDs, vg.VolumeIDs) d.Set(Attr_ConsistencyGroupName, vg.ConsistencyGroupName) + d.Set(Attr_ReplicationSites, vg.ReplicationSites) d.Set(Attr_ReplicationStatus, vg.ReplicationStatus) if vg.StatusDescription != nil { d.Set(Attr_StatusDescriptionErrors, flattenVolumeGroupStatusDescription(vg.StatusDescription.Errors)) diff --git a/ibm/service/power/resource_ibm_pi_volume_test.go b/ibm/service/power/resource_ibm_pi_volume_test.go index 74ed0d8e97..dc149fc974 100644 --- a/ibm/service/power/resource_ibm_pi_volume_test.go +++ b/ibm/service/power/resource_ibm_pi_volume_test.go @@ -21,6 +21,7 @@ import ( func TestAccIBMPIVolumebasic(t *testing.T) { name := fmt.Sprintf("tf-pi-volume-%d", acctest.RandIntRange(10, 100)) + volumeRes := "ibm_pi_volume.power_volume" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -29,19 +30,16 @@ func TestAccIBMPIVolumebasic(t *testing.T) { { Config: testAccCheckIBMPIVolumeConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckIBMPIVolumeExists("ibm_pi_volume.power_volume"), - resource.TestCheckResourceAttr( - "ibm_pi_volume.power_volume", "pi_volume_name", name), + testAccCheckIBMPIVolumeExists(volumeRes), + resource.TestCheckResourceAttr(volumeRes, "pi_volume_name", name), ), }, { Config: testAccCheckIBMPIVolumeSizeConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckIBMPIVolumeExists("ibm_pi_volume.power_volume"), - resource.TestCheckResourceAttr( - "ibm_pi_volume.power_volume", "pi_volume_name", name), - resource.TestCheckResourceAttr( - "ibm_pi_volume.power_volume", "pi_volume_size", "30"), + testAccCheckIBMPIVolumeExists(volumeRes), + resource.TestCheckResourceAttr(volumeRes, "pi_volume_name", name), + resource.TestCheckResourceAttr(volumeRes, "pi_volume_size", "30"), ), }, }, @@ -260,3 +258,50 @@ func testAccCheckIBMPIVolumeUpdateBasicConfig(name, piCloudInstanceId, piStorage pi_volume_type = "%[4]v" }`, name, piCloudInstanceId, piStoragePool, piStorageType) } + +func TestAccIBMPIVolumeUserTags(t *testing.T) { + name := fmt.Sprintf("tf-pi-volume-%d", acctest.RandIntRange(10, 100)) + volumeRes := "ibm_pi_volume.power_volume" + userTagsString := `["env:dev","test_tag"]` + userTagsStringUpdated := `["env:dev","test_tag","test_tag2"]` + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIVolumeUserTagsConfig(name, userTagsString), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIVolumeExists(volumeRes), + resource.TestCheckResourceAttr(volumeRes, "pi_volume_name", name), + resource.TestCheckResourceAttr(volumeRes, "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr(volumeRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(volumeRes, "pi_user_tags.*", "test_tag"), + ), + }, + { + Config: testAccCheckIBMPIVolumeUserTagsConfig(name, userTagsStringUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIVolumeExists(volumeRes), + resource.TestCheckResourceAttr(volumeRes, "pi_volume_name", name), + resource.TestCheckResourceAttr(volumeRes, "pi_user_tags.#", "3"), + resource.TestCheckTypeSetElemAttr(volumeRes, "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr(volumeRes, "pi_user_tags.*", "test_tag"), + resource.TestCheckTypeSetElemAttr(volumeRes, "pi_user_tags.*", "test_tag2"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIVolumeUserTagsConfig(name string, userTagsString string) string { + return fmt.Sprintf(` + resource "ibm_pi_volume" "power_volume" { + pi_cloud_instance_id = "%[2]s" + pi_volume_name = "%[1]s" + pi_volume_shareable = true + pi_volume_size = 20 + pi_volume_type = "tier1" + pi_user_tags = %[3]s + }`, name, acc.Pi_cloud_instance_id, userTagsString) +} diff --git a/ibm/service/power/resource_ibm_pi_workspace.go b/ibm/service/power/resource_ibm_pi_workspace.go index 6315413830..179bf09507 100644 --- a/ibm/service/power/resource_ibm_pi_workspace.go +++ b/ibm/service/power/resource_ibm_pi_workspace.go @@ -19,13 +19,15 @@ import ( func ResourceIBMPIWorkspace() *schema.Resource { return &schema.Resource{ CreateContext: resourceIBMPIWorkspaceCreate, - ReadContext: resourceIBMPIWorkspaceRead, DeleteContext: resourceIBMPIWorkspaceDelete, + ReadContext: resourceIBMPIWorkspaceRead, + UpdateContext: resourceIBMPIWorkspaceUpdate, Importer: &schema.ResourceImporter{}, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -59,10 +61,23 @@ func ResourceIBMPIWorkspace() *schema.Resource { Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, + Arg_UserTags: { + Description: "List of user tags attached to the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + Type: schema.TypeSet, + }, // Attributes + Attr_CRN: { + Computed: true, + Description: "The Workspace crn.", + Type: schema.TypeString, + }, Attr_WorkspaceDetails: { Computed: true, + Deprecated: "This field is deprecated, use crn instead.", Description: "Workspace information.", Type: schema.TypeMap, }, @@ -89,16 +104,28 @@ func resourceIBMPIWorkspaceCreate(ctx context.Context, d *schema.ResourceData, m return diag.FromErr(err) } - d.SetId(*controller.GUID) - _, err = waitForResourceInstanceCreate(ctx, client, *controller.GUID, d.Timeout(schema.TimeoutCreate)) + cloudInstanceID := *controller.GUID + d.SetId(cloudInstanceID) + + _, err = waitForResourceWorkspaceCreate(ctx, client, cloudInstanceID, d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.FromErr(err) } + // Add user tags for newly created workspace + if tags, ok := d.GetOk(Arg_UserTags); ok { + if len(flex.FlattenSet(tags.(*schema.Set))) > 0 { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, *controller.CRN, "", UserTagType) + if err != nil { + log.Printf("Error on creation of workspace (%s) pi_user_tags: %s", *controller.CRN, err) + } + } + } return resourceIBMPIWorkspaceRead(ctx, d, meta) } -func waitForResourceInstanceCreate(ctx context.Context, client *instance.IBMPIWorkspacesClient, id string, timeout time.Duration) (interface{}, error) { +func waitForResourceWorkspaceCreate(ctx context.Context, client *instance.IBMPIWorkspacesClient, id string, timeout time.Duration) (interface{}, error) { stateConf := &retry.StateChangeConf{ Pending: []string{State_InProgress, State_Inactive, State_Provisioning}, Target: []string{State_Active}, @@ -137,11 +164,19 @@ func resourceIBMPIWorkspaceRead(ctx context.Context, d *schema.ResourceData, met return diag.FromErr(err) } d.Set(Arg_Name, controller.Name) + tags, err := flex.GetGlobalTagsUsingCRN(meta, *controller.CRN, "", UserTagType) + if err != nil { + log.Printf("Error on get of workspace (%s) pi_user_tags: %s", cloudInstanceID, err) + } + d.Set(Arg_UserTags, tags) + + d.Set(Attr_CRN, controller.CRN) + + // Deprecated Workspace Details Set wsDetails := map[string]interface{}{ Attr_CreationDate: controller.CreatedAt, - Attr_CRN: controller.TargetCRN, + Attr_CRN: controller.CRN, } - d.Set(Attr_WorkspaceDetails, flex.Flatten(wsDetails)) return nil @@ -159,7 +194,7 @@ func resourceIBMPIWorkspaceDelete(ctx context.Context, d *schema.ResourceData, m if err != nil && response != nil && response.StatusCode == 410 { return nil } - _, err = waitForResourceInstanceDelete(ctx, client, cloudInstanceID, d.Timeout(schema.TimeoutDelete)) + _, err = waitForResourceWorkspaceDelete(ctx, client, cloudInstanceID, d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.FromErr(err) } @@ -168,7 +203,7 @@ func resourceIBMPIWorkspaceDelete(ctx context.Context, d *schema.ResourceData, m return nil } -func waitForResourceInstanceDelete(ctx context.Context, client *instance.IBMPIWorkspacesClient, id string, timeout time.Duration) (interface{}, error) { +func waitForResourceWorkspaceDelete(ctx context.Context, client *instance.IBMPIWorkspacesClient, id string, timeout time.Duration) (interface{}, error) { stateConf := &retry.StateChangeConf{ Pending: []string{State_InProgress, State_Inactive, State_Active}, Target: []string{State_Removed, State_PendingReclamation}, @@ -199,3 +234,16 @@ func isIBMPIResourceDeleteRefreshFunc(client *instance.IBMPIWorkspacesClient, id } } } + +func resourceIBMPIWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + if d.HasChange(Arg_UserTags) { + if crn, ok := d.GetOk(Attr_CRN); ok { + oldList, newList := d.GetChange(Arg_UserTags) + err := flex.UpdateGlobalTagsUsingCRN(oldList, newList, meta, crn.(string), "", UserTagType) + if err != nil { + log.Printf("Error on update of workspace (%s) pi_user_tags: %s", crn, err) + } + } + } + return resourceIBMPIWorkspaceRead(ctx, d, meta) +} diff --git a/ibm/service/power/resource_ibm_pi_workspace_test.go b/ibm/service/power/resource_ibm_pi_workspace_test.go index dd5938119e..148a1337a9 100644 --- a/ibm/service/power/resource_ibm_pi_workspace_test.go +++ b/ibm/service/power/resource_ibm_pi_workspace_test.go @@ -7,8 +7,9 @@ import ( "strings" "testing" - st "github.com/IBM-Cloud/power-go-client/clients/instance" acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/power" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -34,14 +35,44 @@ func TestAccIBMPIWorkspaceBasic(t *testing.T) { }) } +func TestAccIBMPIWorkspaceUserTags(t *testing.T) { + name := fmt.Sprintf("tf-pi-workspace-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccIBMPIWorkspaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIWorkspaceUserTagConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIWorkspaceExists("ibm_pi_workspace.powervs_service_instance"), + resource.TestCheckResourceAttrSet("ibm_pi_workspace.powervs_service_instance", "id"), + resource.TestCheckResourceAttr("ibm_pi_workspace.powervs_service_instance", "pi_user_tags.#", "2"), + resource.TestCheckTypeSetElemAttr("ibm_pi_workspace.powervs_service_instance", "pi_user_tags.*", "env:dev"), + resource.TestCheckTypeSetElemAttr("ibm_pi_workspace.powervs_service_instance", "pi_user_tags.*", "dataresidency:france"), + ), + }, + }, + }) +} + func testAccCheckIBMPIWorkspaceConfig(name string) string { return fmt.Sprintf(` - resource "ibm_pi_workspace" "powervs_service_instance" { - pi_name = "%[1]s" - pi_datacenter = "dal12" - pi_resource_group_id = "%[2]s" - } - `, name, acc.Pi_resource_group_id) + resource "ibm_pi_workspace" "powervs_service_instance" { + pi_name = "%[1]s" + pi_datacenter = "dal12" + pi_resource_group_id = "%[2]s" + }`, name, acc.Pi_resource_group_id) +} + +func testAccCheckIBMPIWorkspaceUserTagConfig(name string) string { + return fmt.Sprintf(` + resource "ibm_pi_workspace" "powervs_service_instance" { + pi_name = "%[1]s" + pi_datacenter = "dal12" + pi_resource_group_id = "%[2]s" + pi_user_tags = ["env:dev", "dataresidency:france"] + }`, name, acc.Pi_resource_group_id) } func testAccIBMPIWorkspaceDestroy(s *terraform.State) error { @@ -54,7 +85,7 @@ func testAccIBMPIWorkspaceDestroy(s *terraform.State) error { continue } cloudInstanceID := rs.Primary.ID - client := st.NewIBMPIWorkspacesClient(context.Background(), sess, cloudInstanceID) + client := instance.NewIBMPIWorkspacesClient(context.Background(), sess, cloudInstanceID) workspace, resp, err := client.GetRC(cloudInstanceID) if err == nil { if *workspace.State == power.State_Active { @@ -71,7 +102,6 @@ func testAccIBMPIWorkspaceDestroy(s *terraform.State) error { func testAccCheckIBMPIWorkspaceExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] if !ok { @@ -88,7 +118,7 @@ func testAccCheckIBMPIWorkspaceExists(n string) resource.TestCheckFunc { } cloudInstanceID := rs.Primary.ID - client := st.NewIBMPIWorkspacesClient(context.Background(), sess, cloudInstanceID) + client := instance.NewIBMPIWorkspacesClient(context.Background(), sess, cloudInstanceID) _, _, err = client.GetRC(cloudInstanceID) if err != nil { return err diff --git a/ibm/service/schematics/resource_ibm_schematics_agent.go b/ibm/service/schematics/resource_ibm_schematics_agent.go index 02843f1efd..5dd0150fe9 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent.go @@ -10,6 +10,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" @@ -26,9 +27,9 @@ func ResourceIbmSchematicsAgent() *schema.Resource { DeleteContext: resourceIbmSchematicsAgentDelete, Importer: &schema.ResourceImporter{}, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -267,6 +268,11 @@ func ResourceIbmSchematicsAgent() *schema.Resource { }, }, }, + "run_destroy_resources": { + Type: schema.TypeInt, + Optional: true, + Description: "Argument which helps to run destroy resources job. Increment the value to destroy resources associated with agent deployment.", + }, "user_state": &schema.Schema{ Type: schema.TypeList, MaxItems: 1, @@ -766,7 +772,35 @@ func resourceIbmSchematicsAgentRead(context context.Context, d *schema.ResourceD return nil } +func isWaitForAgentDestroyResources(context context.Context, schematicsClient *schematicsv1.SchematicsV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for agent (%s) resources to be destroyed.", id) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", agentProvisioningStatusCodeJobInProgress, agentProvisioningStatusCodeJobPending, agentProvisioningStatusCodeJobReadyToExecute, agentProvisioningStatusCodeJobStopInProgress}, + Target: []string{agentProvisioningStatusCodeJobFinished, agentProvisioningStatusCodeJobFailed, agentProvisioningStatusCodeJobCancelled, agentProvisioningStatusCodeJobStopped, ""}, + Refresh: agentDestroyRefreshFunc(schematicsClient, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + return stateConf.WaitForStateContext(context) +} +func agentDestroyRefreshFunc(schematicsClient *schematicsv1.SchematicsV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ + AgentID: core.StringPtr(id), + Profile: core.StringPtr("detailed"), + } + agent, response, err := schematicsClient.GetAgentData(getAgentDataOptions) + if err != nil { + return nil, "", fmt.Errorf("[ERROR] Error Getting Agent: %s\n%s", err, response) + } + if agent.RecentDestroyJob.StatusCode != nil { + return agent, *agent.RecentDestroyJob.StatusCode, nil + } + return agent, agentProvisioningStatusCodeJobPending, nil + } +} func resourceIbmSchematicsAgentUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { schematicsClient, err := meta.(conns.ClientSession).SchematicsV1() if err != nil { @@ -778,8 +812,10 @@ func resourceIbmSchematicsAgentUpdate(context context.Context, d *schema.Resourc if err != nil { return diag.FromErr(err) } + iamAccessToken := session.Config.IAMAccessToken iamRefreshToken := session.Config.IAMRefreshToken ff := map[string]string{ + "Authorization": iamAccessToken, "refresh_token": iamRefreshToken, } updateAgentDataOptions.Headers = ff @@ -882,7 +918,23 @@ func resourceIbmSchematicsAgentUpdate(context context.Context, d *schema.Resourc updateAgentDataOptions.SetAgentMetadata(agentMetadata) hasChange = true } + if d.HasChange("run_destroy_resources") { + deleteAgentResourcesOptions := &schematicsv1.DeleteAgentResourcesOptions{} + deleteAgentResourcesOptions.Headers = ff + deleteAgentResourcesOptions.SetAgentID(d.Id()) + deleteAgentResourcesOptions.SetRefreshToken(iamRefreshToken) + + response, err := schematicsClient.DeleteAgentResourcesWithContext(context, deleteAgentResourcesOptions) + if err != nil { + log.Printf("[DEBUG] DeleteAgentResourcesWithContext failed %s\n%s", err, response) + } else { + _, err = isWaitForAgentDestroyResources(context, schematicsClient, *deleteAgentResourcesOptions.AgentID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + log.Printf("[DEBUG] waiting for agent deploy resources to be destroyed has failed %s", err) + } + } + } if hasChange { _, response, err := schematicsClient.UpdateAgentDataWithContext(context, updateAgentDataOptions) if err != nil { @@ -907,18 +959,40 @@ func resourceIbmSchematicsAgentDelete(context context.Context, d *schema.Resourc if err != nil { return diag.FromErr(err) } + iamAccessToken := session.Config.IAMAccessToken iamRefreshToken := session.Config.IAMRefreshToken ff := map[string]string{ + "Authorization": iamAccessToken, "refresh_token": iamRefreshToken, } deleteAgentDataOptions.Headers = ff deleteAgentDataOptions.SetAgentID(d.Id()) - response, err := schematicsClient.DeleteAgentDataWithContext(context, deleteAgentDataOptions) + // first try destroying resources associated with agent deploy and then delete the agent + + deleteAgentResourcesOptions := &schematicsv1.DeleteAgentResourcesOptions{} + deleteAgentResourcesOptions.Headers = ff + + deleteAgentResourcesOptions.SetAgentID(d.Id()) + deleteAgentResourcesOptions.SetRefreshToken(iamRefreshToken) + + response, err := schematicsClient.DeleteAgentResourcesWithContext(context, deleteAgentResourcesOptions) + if err != nil { + log.Printf("[DEBUG] DeleteAgentResourcesWithContext failed %s\n%s", err, response) + } else { + _, err = isWaitForAgentDestroyResources(context, schematicsClient, *deleteAgentResourcesOptions.AgentID, d.Timeout(schema.TimeoutDelete)) + if err != nil { + log.Printf("[DEBUG] waiting for agent deploy resources to be destroyed has failed %s", err) + } + } + + // After deploy associated resources are destroyed, now attempt to delete the agent + + deleteresponse, err := schematicsClient.DeleteAgentDataWithContext(context, deleteAgentDataOptions) if err != nil { - log.Printf("[DEBUG] DeleteAgentDataWithContext failed %s\n%s", err, response) - return diag.FromErr(fmt.Errorf("DeleteAgentDataWithContext failed %s\n%s", err, response)) + log.Printf("[DEBUG] DeleteAgentDataWithContext failed %s\n%s", err, deleteresponse) + return diag.FromErr(fmt.Errorf("DeleteAgentDataWithContext failed %s\n%s", err, deleteresponse)) } d.SetId("") diff --git a/ibm/service/vpc/data_source_ibm_is_lb.go b/ibm/service/vpc/data_source_ibm_is_lb.go index 7b55421d91..9f3f5f9759 100644 --- a/ibm/service/vpc/data_source_ibm_is_lb.go +++ b/ibm/service/vpc/data_source_ibm_is_lb.go @@ -17,6 +17,7 @@ const ( name = "name" poolAlgorithm = "algorithm" href = "href" + family = "family" poolProtocol = "protocol" poolCreatedAt = "created_at" poolProvisioningStatus = "provisioning_status" @@ -50,6 +51,11 @@ func DataSourceIBMISLB() *schema.Resource { Required: true, Description: "Load Balancer name", }, + isLBAccessMode: { + Type: schema.TypeString, + Computed: true, + Description: "The access mode of this load balancer", + }, "dns": { Type: schema.TypeList, Computed: true, @@ -74,7 +80,21 @@ func DataSourceIBMISLB() *schema.Resource { Computed: true, Description: "Load Balancer type", }, - + isLBAvailability: { + Type: schema.TypeString, + Computed: true, + Description: "The availability of this load balancer", + }, + isLBInstanceGroupsSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this load balancer supports instance groups.", + }, + isLBSourceIPPersistenceSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this load balancer supports source IP session persistence.", + }, isLBUdpSupported: { Type: schema.TypeBool, Computed: true, @@ -86,7 +106,11 @@ func DataSourceIBMISLB() *schema.Resource { Computed: true, Description: "Load Balancer status", }, - + isLbProfile: { + Type: schema.TypeMap, + Computed: true, + Description: "The profile to use for this load balancer", + }, isLBRouteMode: { Type: schema.TypeBool, Computed: true, @@ -354,6 +378,18 @@ func lbGetByName(d *schema.ResourceData, meta interface{}, name string) error { for _, lb := range allrecs { if *lb.Name == name { d.SetId(*lb.ID) + if lb.Availability != nil { + d.Set(isLBAvailability, *lb.Availability) + } + if lb.AccessMode != nil { + d.Set(isLBAccessMode, *lb.AccessMode) + } + if lb.InstanceGroupsSupported != nil { + d.Set(isLBInstanceGroupsSupported, *lb.InstanceGroupsSupported) + } + if lb.SourceIPSessionPersistenceSupported != nil { + d.Set(isLBSourceIPPersistenceSupported, *lb.SourceIPSessionPersistenceSupported) + } dnsList := make([]map[string]interface{}, 0) if lb.Dns != nil { dns := map[string]interface{}{} @@ -368,9 +404,18 @@ func lbGetByName(d *schema.ResourceData, meta interface{}, name string) error { } if *lb.IsPublic { d.Set(isLBType, "public") + } else if lb.IsPrivatePath != nil && *lb.IsPrivatePath { + d.Set(isLBType, "private_path") } else { d.Set(isLBType, "private") } + lbProfile := make(map[string]interface{}) + if lb.Profile != nil { + lbProfile[isLBName] = *lb.Profile.Name + lbProfile[href] = *lb.Profile.Href + lbProfile[family] = *lb.Profile.Family + } + d.Set(isLbProfile, lbProfile) d.Set(isLBStatus, *lb.ProvisioningStatus) if lb.RouteMode != nil { d.Set(isLBRouteMode, *lb.RouteMode) diff --git a/ibm/service/vpc/data_source_ibm_is_lb_profile.go b/ibm/service/vpc/data_source_ibm_is_lb_profile.go index a7151bbd99..2bc81ac8cf 100644 --- a/ibm/service/vpc/data_source_ibm_is_lb_profile.go +++ b/ibm/service/vpc/data_source_ibm_is_lb_profile.go @@ -24,6 +24,25 @@ func DataSourceIBMISLbProfile() *schema.Resource { Required: true, Description: "The name for this load balancer profile", }, + isLBAccessModes: { + Type: schema.TypeList, + Computed: true, + Description: "The access mode for a load balancer with this profile", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type for access mode", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "Access modes for this profile", + }, + }, + }, + }, "href": { Type: schema.TypeString, Computed: true, @@ -76,6 +95,19 @@ func dataSourceIBMISLbProfileRead(context context.Context, d *schema.ResourceDat d.Set("name", *lbProfile.Name) d.Set("href", *lbProfile.Href) d.Set("family", *lbProfile.Family) + if lbProfile.AccessModes != nil { + accessModes := lbProfile.AccessModes + AccessModesMap := map[string]interface{}{} + AccessModesList := []map[string]interface{}{} + if accessModes.Type != nil { + AccessModesMap["type"] = *accessModes.Type + } + if len(accessModes.Values) > 0 { + AccessModesMap["value"] = accessModes.Values + } + AccessModesList = append(AccessModesList, AccessModesMap) + d.Set(isLBAccessModes, AccessModesList) + } log.Printf("[INFO] lbprofile udp %v", lbProfile.UDPSupported) if lbProfile.UDPSupported != nil { udpSupport := lbProfile.UDPSupported diff --git a/ibm/service/vpc/data_source_ibm_is_lb_profiles.go b/ibm/service/vpc/data_source_ibm_is_lb_profiles.go index dc067f1710..143402b6a3 100644 --- a/ibm/service/vpc/data_source_ibm_is_lb_profiles.go +++ b/ibm/service/vpc/data_source_ibm_is_lb_profiles.go @@ -34,6 +34,44 @@ func DataSourceIBMISLbProfiles() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + isLBAccessModes: { + Type: schema.TypeList, + Computed: true, + Description: "The access mode for a load balancer with this profile", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type for access mode", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "Access modes for this profile", + }, + }, + }, + }, + "availability": { + Type: schema.TypeList, + Computed: true, + Description: "The availability mode for a load balancer with this profile", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of availability, one of [fixed, dependent]", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "The availability of this load balancer, one of [subnet, region]. Applicable only if type is fixed", + }, + }, + }, + }, "name": { Type: schema.TypeString, Computed: true, @@ -49,6 +87,44 @@ func DataSourceIBMISLbProfiles() *schema.Resource { Computed: true, Description: "The product family this load balancer profile belongs to", }, + "instance_groups_supported": { + Type: schema.TypeList, + Computed: true, + Description: "The instance groups support for the load balancer with this profile", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of support for instance groups, one of [fixed, dependent]", + }, + "value": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether Instance groups are supported for this profile. Applicable only if type is fixed", + }, + }, + }, + }, + "source_ip_session_persistence_supported": { + Type: schema.TypeList, + Computed: true, + Description: "The source IP session ip persistence support for a load balancer with this profile", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of support for session ip persistence, one of [fixed, dependent on configuration]", + }, + "value": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether session ip persistence are supported for this profile. Applicable only if type is fixed", + }, + }, + }, + }, "route_mode_supported": { Type: schema.TypeBool, Computed: true, @@ -175,6 +251,59 @@ func dataSourceIBMISLbProfilesRead(d *schema.ResourceData, meta interface{}) err } } } + + if profileCollector.AccessModes != nil { + accessModes := profileCollector.AccessModes + AccessModesMap := map[string]interface{}{} + AccessModesList := []map[string]interface{}{} + if accessModes.Type != nil { + AccessModesMap["type"] = *accessModes.Type + } + if len(accessModes.Values) > 0 { + AccessModesMap["value"] = accessModes.Values + } + AccessModesList = append(AccessModesList, AccessModesMap) + l[isLBAccessModes] = AccessModesList + } + if profileCollector.Availability != nil { + availabilitySupport := profileCollector.Availability.(*vpcv1.LoadBalancerProfileAvailability) + availabilitySupportMap := map[string]interface{}{} + availabilitySupportList := []map[string]interface{}{} + if availabilitySupport.Type != nil { + availabilitySupportMap["type"] = *availabilitySupport.Type + } + if availabilitySupport.Value != nil { + availabilitySupportMap["value"] = *availabilitySupport.Value + } + availabilitySupportList = append(availabilitySupportList, availabilitySupportMap) + l["availability"] = availabilitySupportList + } + if profileCollector.InstanceGroupsSupported != nil { + instanceGroupSupport := profileCollector.InstanceGroupsSupported.(*vpcv1.LoadBalancerProfileInstanceGroupsSupported) + instanceGroupSupportMap := map[string]interface{}{} + instanceGroupSupportList := []map[string]interface{}{} + if instanceGroupSupport.Type != nil { + instanceGroupSupportMap["type"] = *instanceGroupSupport.Type + } + if instanceGroupSupport.Value != nil { + instanceGroupSupportMap["value"] = *instanceGroupSupport.Value + } + instanceGroupSupportList = append(instanceGroupSupportList, instanceGroupSupportMap) + l["source_ip_session_persistence_supported"] = instanceGroupSupportList + } + if profileCollector.SourceIPSessionPersistenceSupported != nil { + sourceIpPersistenceSupport := profileCollector.SourceIPSessionPersistenceSupported.(*vpcv1.LoadBalancerProfileSourceIPSessionPersistenceSupported) + sourceIpPersistenceSupportMap := map[string]interface{}{} + sourceIpPersistenceSupportList := []map[string]interface{}{} + if sourceIpPersistenceSupport.Type != nil { + sourceIpPersistenceSupportMap["type"] = *sourceIpPersistenceSupport.Type + } + if sourceIpPersistenceSupport.Value != nil { + sourceIpPersistenceSupportMap["value"] = *sourceIpPersistenceSupport.Value + } + sourceIpPersistenceSupportList = append(sourceIpPersistenceSupportList, sourceIpPersistenceSupportMap) + l["instance_groups_supported"] = sourceIpPersistenceSupportList + } lbprofilesInfo = append(lbprofilesInfo, l) } d.SetId(dataSourceIBMISLbProfilesID(d)) diff --git a/ibm/service/vpc/data_source_ibm_is_lb_test.go b/ibm/service/vpc/data_source_ibm_is_lb_test.go index ca689d6d41..bbea1cadfb 100644 --- a/ibm/service/vpc/data_source_ibm_is_lb_test.go +++ b/ibm/service/vpc/data_source_ibm_is_lb_test.go @@ -29,6 +29,9 @@ func TestAccIBMISLBDatasource_basic(t *testing.T) { "data.ibm_is_lb.ds_lb", "name", name), resource.TestCheckResourceAttr( "data.ibm_is_lb.ds_lb", "route_mode", routeMode), + resource.TestCheckResourceAttrSet("data.ibm_is_lb.ds_lb", "availability"), + resource.TestCheckResourceAttrSet("data.ibm_is_lb.ds_lb", "instance_groups_supported"), + resource.TestCheckResourceAttrSet("data.ibm_is_lb.ds_lb", "source_ip_persistence_supported"), ), }, }, diff --git a/ibm/service/vpc/data_source_ibm_is_lbs.go b/ibm/service/vpc/data_source_ibm_is_lbs.go index 1655fa15db..51482b185f 100644 --- a/ibm/service/vpc/data_source_ibm_is_lbs.go +++ b/ibm/service/vpc/data_source_ibm_is_lbs.go @@ -32,6 +32,11 @@ func DataSourceIBMISLBS() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + isLBAccessMode: { + Type: schema.TypeString, + Computed: true, + Description: "The access mode of this load balancer", + }, ID: { Type: schema.TypeString, Computed: true, @@ -75,6 +80,21 @@ func DataSourceIBMISLBS() *schema.Resource { Computed: true, Description: "Load Balancer name", }, + isLBAvailability: { + Type: schema.TypeString, + Computed: true, + Description: "The availability of this load balancer", + }, + isLBInstanceGroupsSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this load balancer supports instance groups.", + }, + isLBSourceIPPersistenceSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this load balancer supports source IP session persistence.", + }, isLBUdpSupported: { Type: schema.TypeBool, Computed: true, @@ -321,6 +341,18 @@ func getLbs(d *schema.ResourceData, meta interface{}) error { lbInfo := make(map[string]interface{}) // log.Printf("******* lb ******** : (%+v)", lb) lbInfo[ID] = *lb.ID + if lb.Availability != nil { + lbInfo[isLBAvailability] = *lb.Availability + } + if lb.AccessMode != nil { + lbInfo[isLBAccessMode] = *lb.AccessMode + } + if lb.InstanceGroupsSupported != nil { + lbInfo[isLBInstanceGroupsSupported] = *lb.InstanceGroupsSupported + } + if lb.SourceIPSessionPersistenceSupported != nil { + lbInfo[isLBSourceIPPersistenceSupported] = *lb.SourceIPSessionPersistenceSupported + } lbInfo[isLBName] = *lb.Name dnsList := make([]map[string]interface{}, 0) if lb.Dns != nil { @@ -342,6 +374,8 @@ func getLbs(d *schema.ResourceData, meta interface{}) error { lbInfo[CreatedAt] = lb.CreatedAt.String() if *lb.IsPublic { lbInfo[isLBType] = "public" + } else if *lb.IsPrivatePath { + lbInfo[isLBType] = "private_path" } else { lbInfo[isLBType] = "private" } @@ -437,7 +471,7 @@ func getLbs(d *schema.ResourceData, meta interface{}) error { lbProfile := make(map[string]interface{}) lbProfile[name] = *lb.Profile.Name lbProfile[href] = *lb.Profile.Href - lbProfile["family"] = *lb.Profile.Family + lbProfile[family] = *lb.Profile.Family lbInfo[isLbProfile] = lbProfile } lbInfo[isLBResourceGroup] = *lb.ResourceGroup.ID diff --git a/ibm/service/vpc/data_source_ibm_is_lbs_test.go b/ibm/service/vpc/data_source_ibm_is_lbs_test.go index fd078bd5f5..9220036734 100644 --- a/ibm/service/vpc/data_source_ibm_is_lbs_test.go +++ b/ibm/service/vpc/data_source_ibm_is_lbs_test.go @@ -30,6 +30,9 @@ func TestAccIBMISLBSDatasource_basic(t *testing.T) { testAccCheckIBMISLBExists("ibm_is_lb.testacc_lb", lb), resource.TestCheckResourceAttr( "data.ibm_is_lb.ds_lb", "name", name), + resource.TestCheckResourceAttrSet("data.ibm_is_lbs.ds_lb", "load_balancers.0.availability"), + resource.TestCheckResourceAttrSet("data.ibm_is_lbs.ds_lb", "load_balancers.0.instance_groups_supported"), + resource.TestCheckResourceAttrSet("data.ibm_is_lbs.ds_lb", "load_balancers.0.source_ip_persistence_supported"), ), }, { diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway.go new file mode 100644 index 0000000000..c2093230c9 --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway.go @@ -0,0 +1,477 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func DataSourceIBMIsPrivatePathServiceGateway() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMIsPrivatePathServiceGatewayRead, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"private_path_service_gateway_name", "private_path_service_gateway"}, + Description: "The private path service gateway identifier.", + }, + "private_path_service_gateway_name": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"private_path_service_gateway_name", "private_path_service_gateway"}, + Description: "The private path service gateway name.", + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the private path service gateway was created.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this private path service gateway.", + }, + "default_access_policy": { + Type: schema.TypeString, + Computed: true, + Description: "The policy to use for bindings from accounts without an explicit account policy.", + }, + "endpoint_gateway_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of endpoint gateways using this private path service gateway.", + }, + "endpoint_gateway_binding_auto_delete": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether endpoint gateway bindings will be automatically deleted after endpoint_gateway_binding_auto_delete_timeout hours have passed.", + }, + "endpoint_gateway_binding_auto_delete_timeout": { + Type: schema.TypeInt, + Computed: true, + Description: "If endpoint_gateway_binding_auto_delete is true, the hours after which endpoint gateway bindings will be automatically deleted.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this private path service gateway.", + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the private path service gateway.", + }, + "load_balancer": { + Type: schema.TypeList, + Computed: true, + Description: "The load balancer for this private path service gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The load balancer's CRN.", + }, + "deleted": { + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted, and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The load balancer's canonical URL.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this load balancer. The name is unique across all load balancers in the VPC.", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this private path service gateway. The name is unique across all private path service gateways in the VPC.", + }, + "published": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates the availability of this private path service gateway- `true`: Any account can request access to this private path service gateway.- `false`: Access is restricted to the account that created this private path service gateway.", + }, + "region": { + Type: schema.TypeList, + Computed: true, + Description: "The region served by this private path service gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this region.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this region.", + }, + }, + }, + }, + "resource_group": { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this private path service gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this resource group.", + }, + }, + }, + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "service_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: "The fully qualified domain names for this private path service gateway.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vpc": { + Type: schema.TypeList, + Computed: true, + Description: "The VPC this private path service gateway resides in.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this VPC.", + }, + "deleted": { + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted, and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this VPC.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this VPC.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this VPC. The name is unique across all VPCs in the region.", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "zonal_affinity": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this private path service gateway has zonal affinity.- `true`: Traffic to the service from a zone will favor service endpoints in the same zone.- `false`: Traffic to the service from a zone will be load balanced across all zones in the region the service resides in.", + }, + }, + } +} + +func dataSourceIBMIsPrivatePathServiceGatewayRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + var privatePathServiceGateway *vpcv1.PrivatePathServiceGateway + if ppsgId, ok := d.GetOk("private_path_service_gateway"); ok { + + getPrivatePathServiceGatewayOptions := &vpcv1.GetPrivatePathServiceGatewayOptions{} + + getPrivatePathServiceGatewayOptions.SetID(ppsgId.(string)) + + ppsg, response, err := vpcClient.GetPrivatePathServiceGatewayWithContext(context, getPrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] GetPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + privatePathServiceGateway = ppsg + } else { + ppsgName := d.Get("private_path_service_gateway_name").(string) + + listPrivatePathServiceGatewaysOptions := &vpcv1.ListPrivatePathServiceGatewaysOptions{} + + privatePathServiceGatewayCollection, response, err := vpcClient.ListPrivatePathServiceGatewaysWithContext(context, listPrivatePathServiceGatewaysOptions) + if err != nil { + log.Printf("[DEBUG] ListPrivatePathServiceGatewaysWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("ListPrivatePathServiceGatewaysWithContext failed %s\n%s", err, response)) + } + if privatePathServiceGatewayCollection.PrivatePathServiceGateways != nil { + for _, ppsgItem := range privatePathServiceGatewayCollection.PrivatePathServiceGateways { + if *ppsgItem.Name == ppsgName { + privatePathServiceGateway = &ppsgItem + } + } + } + } + d.SetId(*privatePathServiceGateway.ID) + + if err = d.Set("created_at", flex.DateTimeToString(privatePathServiceGateway.CreatedAt)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + } + + if err = d.Set("crn", privatePathServiceGateway.CRN); err != nil { + return diag.FromErr(fmt.Errorf("Error setting crn: %s", err)) + } + + if err = d.Set("default_access_policy", privatePathServiceGateway.DefaultAccessPolicy); err != nil { + return diag.FromErr(fmt.Errorf("Error setting default_access_policy: %s", err)) + } + + if err = d.Set("endpoint_gateway_count", flex.IntValue(privatePathServiceGateway.EndpointGatewayCount)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_count: %s", err)) + } + if err = d.Set("endpoint_gateway_binding_auto_delete", privatePathServiceGateway.EndpointGatewayBindingAutoDelete); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_binding_auto_delete: %s", err)) + } + if err = d.Set("endpoint_gateway_binding_auto_delete_timeout", privatePathServiceGateway.EndpointGatewayBindingAutoDeleteTimeout); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_binding_auto_delete_timeout: %s", err)) + } + if err = d.Set("href", privatePathServiceGateway.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + + if err = d.Set("lifecycle_state", privatePathServiceGateway.LifecycleState); err != nil { + return diag.FromErr(fmt.Errorf("Error setting lifecycle_state: %s", err)) + } + + loadBalancer := []map[string]interface{}{} + if privatePathServiceGateway.LoadBalancer != nil { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayLoadBalancerReferenceToMap(privatePathServiceGateway.LoadBalancer) + if err != nil { + return diag.FromErr(err) + } + loadBalancer = append(loadBalancer, modelMap) + } + if err = d.Set("load_balancer", loadBalancer); err != nil { + return diag.FromErr(fmt.Errorf("Error setting load_balancer %s", err)) + } + + if err = d.Set("name", privatePathServiceGateway.Name); err != nil { + return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + } + + if err = d.Set("published", privatePathServiceGateway.Published); err != nil { + return diag.FromErr(fmt.Errorf("Error setting published: %s", err)) + } + + region := []map[string]interface{}{} + // if privatePathServiceGateway.Remote != nil && privatePathServiceGateway.Remote.Region != nil { + // modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayRegionReferenceToMap(privatePathServiceGateway.Remote.Region) + // if err != nil { + // return diag.FromErr(err) + // } + // region = append(region, modelMap) + // } + if err = d.Set("region", region); err != nil { + return diag.FromErr(fmt.Errorf("Error setting region %s", err)) + } + + resourceGroup := []map[string]interface{}{} + if privatePathServiceGateway.ResourceGroup != nil { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayResourceGroupReferenceToMap(privatePathServiceGateway.ResourceGroup) + if err != nil { + return diag.FromErr(err) + } + resourceGroup = append(resourceGroup, modelMap) + } + if err = d.Set("resource_group", resourceGroup); err != nil { + return diag.FromErr(fmt.Errorf("Error setting resource_group %s", err)) + } + + if err = d.Set("resource_type", privatePathServiceGateway.ResourceType); err != nil { + return diag.FromErr(fmt.Errorf("Error setting resource_type: %s", err)) + } + if err = d.Set("service_endpoints", privatePathServiceGateway.ServiceEndpoints); err != nil { + return diag.FromErr(fmt.Errorf("Error setting service_endpoints: %s", err)) + } + vpc := []map[string]interface{}{} + if privatePathServiceGateway.VPC != nil { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayVPCReferenceToMap(privatePathServiceGateway.VPC) + if err != nil { + return diag.FromErr(err) + } + vpc = append(vpc, modelMap) + } + if err = d.Set("vpc", vpc); err != nil { + return diag.FromErr(fmt.Errorf("Error setting vpc %s", err)) + } + + if err = d.Set("zonal_affinity", privatePathServiceGateway.ZonalAffinity); err != nil { + return diag.FromErr(fmt.Errorf("Error setting zonal_affinity: %s", err)) + } + + return nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayLoadBalancerReferenceToMap(model *vpcv1.LoadBalancerReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.CRN != nil { + modelMap["crn"] = *model.CRN + } + if model.Deleted != nil { + deletedMap, err := dataSourceIBMIsPrivatePathServiceGatewayLoadBalancerReferenceDeletedToMap(model.Deleted) + if err != nil { + return modelMap, err + } + modelMap["deleted"] = []map[string]interface{}{deletedMap} + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayLoadBalancerReferenceDeletedToMap(model *vpcv1.Deleted) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.MoreInfo != nil { + modelMap["more_info"] = *model.MoreInfo + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayRegionReferenceToMap(model *vpcv1.RegionReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayResourceGroupReferenceToMap(model *vpcv1.ResourceGroupReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayVPCReferenceToMap(model *vpcv1.VPCReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.CRN != nil { + modelMap["crn"] = *model.CRN + } + if model.Deleted != nil { + deletedMap, err := dataSourceIBMIsPrivatePathServiceGatewayVPCReferenceDeletedToMap(model.Deleted) + if err != nil { + return modelMap, err + } + modelMap["deleted"] = []map[string]interface{}{deletedMap} + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayVPCReferenceDeletedToMap(model *vpcv1.Deleted) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.MoreInfo != nil { + modelMap["more_info"] = *model.MoreInfo + } + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policies.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policies.go new file mode 100644 index 0000000000..d7885dad48 --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policies.go @@ -0,0 +1,194 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func DataSourceIBMIsPrivatePathServiceGatewayAccountPolicies() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesRead, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The private path service gateway identifier.", + }, + "account": { + Type: schema.TypeString, + Optional: true, + Description: "Filters the collection to resources with the specified account identifier.", + }, + "account_policies": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Collection of account policies.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "account": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The account for this access policy.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this account policy.", + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this account policy.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was updated.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + ppsgId := d.Get("private_path_service_gateway").(string) + listPrivatePathServiceGatewayAccountPoliciesOptions := &vpcv1.ListPrivatePathServiceGatewayAccountPoliciesOptions{} + + listPrivatePathServiceGatewayAccountPoliciesOptions.SetPrivatePathServiceGatewayID(ppsgId) + if accountIntf, ok := d.GetOk("account"); ok { + account := accountIntf.(string) + listPrivatePathServiceGatewayAccountPoliciesOptions.AccountID = &account + } + var pager *vpcv1.PrivatePathServiceGatewayAccountPoliciesPager + pager, err = vpcClient.NewPrivatePathServiceGatewayAccountPoliciesPager(listPrivatePathServiceGatewayAccountPoliciesOptions) + if err != nil { + return diag.FromErr(err) + } + + allItems, err := pager.GetAll() + if err != nil { + log.Printf("[DEBUG] PrivatePathServiceGatewayAccountPoliciesPager.GetAll() failed %s", err) + return diag.FromErr(fmt.Errorf("PrivatePathServiceGatewayAccountPoliciesPager.GetAll() failed %s", err)) + } + + d.SetId(ppsgId) + + mapSlice := []map[string]interface{}{} + for _, modelItem := range allItems { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesPrivatePathServiceGatewayAccountPolicyToMap(&modelItem) + if err != nil { + return diag.FromErr(err) + } + mapSlice = append(mapSlice, modelMap) + } + + if err = d.Set("account_policies", mapSlice); err != nil { + return diag.FromErr(fmt.Errorf("Error setting account_policies %s", err)) + } + + return nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesPrivatePathServiceGatewayAccountPolicyToMap(model *vpcv1.PrivatePathServiceGatewayAccountPolicy) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.AccessPolicy != nil { + modelMap["access_policy"] = *model.AccessPolicy + } + if model.Account != nil { + accountMap, err := dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesAccountReferenceToMap(model.Account) + if err != nil { + return modelMap, err + } + modelMap["account"] = []map[string]interface{}{accountMap} + } + if model.CreatedAt != nil { + modelMap["created_at"] = flex.DateTimeToString(model.CreatedAt) + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + // if model.UpdatedAt != nil { + // modelMap["updated_at"] = flex.DateTimeToString(model.UpdatedAt) + // } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesAccountReferenceToMap(model *vpcv1.AccountReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesPrivatePathServiceGatewayAccountPolicyCollectionFirstToMap(model *vpcv1.PrivatePathServiceGatewayAccountPolicyCollectionFirst) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPoliciesPrivatePathServiceGatewayAccountPolicyCollectionNextToMap(model *vpcv1.PrivatePathServiceGatewayAccountPolicyCollectionNext) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policies_test.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policies_test.go new file mode 100644 index 0000000000..b45cbe54ae --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policies_test.go @@ -0,0 +1,58 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMIsPrivatePathServiceGatewayAccountPoliciesDataSourceBasic(t *testing.T) { + accessPolicy1 := "deny" + accessPolicy := "deny" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayAccountPoliciesDataSourceConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name, accessPolicy1), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.#"), + resource.TestCheckResourceAttr("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.access_policy", accessPolicy1), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.account.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.account.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.account.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policies.is_private_path_service_gateway_account_policies", "account_policies.0.resource_type"), + ), + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayAccountPoliciesDataSourceConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name, accessPolicy1 string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + resource "ibm_is_private_path_service_gateway_account_policy" "is_private_path_service_gateway_account_policy" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + access_policy = "%s" + account = "%s" + } + + data "ibm_is_private_path_service_gateway_account_policies" "is_private_path_service_gateway_account_policies" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + } + `, accessPolicy1, acc.AccountId) +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policy.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policy.go new file mode 100644 index 0000000000..00fe020dcd --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policy.go @@ -0,0 +1,144 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func DataSourceIBMIsPrivatePathServiceGatewayAccountPolicy() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMIsPrivatePathServiceGatewayAccountPolicyRead, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The private path service gateway identifier.", + }, + "account_policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The account policy identifier.", + }, + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "account": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The account for this access policy.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this account policy.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was updated.", + }, + }, + } +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPolicyRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + getPrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.GetPrivatePathServiceGatewayAccountPolicyOptions{} + + getPrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(d.Get("private_path_service_gateway").(string)) + getPrivatePathServiceGatewayAccountPolicyOptions.SetID(d.Get("account_policy").(string)) + + privatePathServiceGatewayAccountPolicy, response, err := vpcClient.GetPrivatePathServiceGatewayAccountPolicyWithContext(context, getPrivatePathServiceGatewayAccountPolicyOptions) + if err != nil { + log.Printf("[DEBUG] GetPrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetPrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response)) + } + + d.SetId(fmt.Sprintf("%s/%s", *getPrivatePathServiceGatewayAccountPolicyOptions.PrivatePathServiceGatewayID, *getPrivatePathServiceGatewayAccountPolicyOptions.ID)) + + if err = d.Set("access_policy", privatePathServiceGatewayAccountPolicy.AccessPolicy); err != nil { + return diag.FromErr(fmt.Errorf("Error setting access_policy: %s", err)) + } + + account := []map[string]interface{}{} + if privatePathServiceGatewayAccountPolicy.Account != nil { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayAccountPolicyAccountReferenceToMap(privatePathServiceGatewayAccountPolicy.Account) + if err != nil { + return diag.FromErr(err) + } + account = append(account, modelMap) + } + if err = d.Set("account", account); err != nil { + return diag.FromErr(fmt.Errorf("Error setting account %s", err)) + } + + if err = d.Set("created_at", flex.DateTimeToString(privatePathServiceGatewayAccountPolicy.CreatedAt)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + } + + if err = d.Set("href", privatePathServiceGatewayAccountPolicy.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + + if err = d.Set("resource_type", privatePathServiceGatewayAccountPolicy.ResourceType); err != nil { + return diag.FromErr(fmt.Errorf("Error setting resource_type: %s", err)) + } + + // if err = d.Set("updated_at", flex.DateTimeToString(privatePathServiceGatewayAccountPolicy.UpdatedAt)); err != nil { + // return diag.FromErr(fmt.Errorf("Error setting updated_at: %s", err)) + // } + + return nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayAccountPolicyAccountReferenceToMap(model *vpcv1.AccountReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policy_test.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policy_test.go new file mode 100644 index 0000000000..a4b240092f --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_account_policy_test.go @@ -0,0 +1,57 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMIsPrivatePathServiceGatewayAccountPolicyDataSourceBasic(t *testing.T) { + accessPolicy := "deny" + accessPolicy1 := "review" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyDataSourceConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name, accessPolicy1), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "private_path_service_gateway"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "access_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "account.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "resource_type"), + ), + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyDataSourceConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name, accessPolicy1 string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + resource "ibm_is_private_path_service_gateway_account_policy" "is_private_path_service_gateway_account_policy" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + access_policy = "%s" + account = "%s" + } + data "ibm_is_private_path_service_gateway_account_policy" "is_private_path_service_gateway_account_policy" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + account_policy = ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy.id + } + `, accessPolicy1, acc.AccountId) +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_binding.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_binding.go new file mode 100644 index 0000000000..e443908f27 --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_binding.go @@ -0,0 +1,164 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func DataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBinding() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingRead, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The private path service gateway identifier.", + }, + "endpoint_gateway_binding": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The endpoint gateway binding identifier.", + }, + "account": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The account that created the endpoint gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the endpoint gateway binding was created.", + }, + "expiration_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The expiration date and time for the endpoint gateway binding.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this endpoint gateway binding.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the endpoint gateway binding.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The status of the endpoint gateway binding- `denied`: endpoint gateway binding was denied- `expired`: endpoint gateway binding has expired- `pending`: endpoint gateway binding is awaiting review- `permitted`: endpoint gateway binding was permittedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the endpoint gateway binding was updated.", + }, + }, + } +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + getPrivatePathServiceGatewayEndpointGatewayBindingOptions := &vpcv1.GetPrivatePathServiceGatewayEndpointGatewayBindingOptions{} + + getPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetPrivatePathServiceGatewayID(d.Get("private_path_service_gateway").(string)) + getPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetID(d.Get("endpoint_gateway_binding").(string)) + + privatePathServiceGatewayEndpointGatewayBinding, response, err := vpcClient.GetPrivatePathServiceGatewayEndpointGatewayBindingWithContext(context, getPrivatePathServiceGatewayEndpointGatewayBindingOptions) + if err != nil { + log.Printf("[DEBUG] GetPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response)) + } + + d.SetId(fmt.Sprintf("%s//%s", *getPrivatePathServiceGatewayEndpointGatewayBindingOptions.PrivatePathServiceGatewayID, *privatePathServiceGatewayEndpointGatewayBinding.ID)) + + account := []map[string]interface{}{} + if privatePathServiceGatewayEndpointGatewayBinding.Account != nil { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingAccountReferenceToMap(privatePathServiceGatewayEndpointGatewayBinding.Account) + if err != nil { + return diag.FromErr(err) + } + account = append(account, modelMap) + } + if err = d.Set("account", account); err != nil { + return diag.FromErr(fmt.Errorf("Error setting account %s", err)) + } + + if err = d.Set("created_at", flex.DateTimeToString(privatePathServiceGatewayEndpointGatewayBinding.CreatedAt)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + } + + if err = d.Set("expiration_at", flex.DateTimeToString(privatePathServiceGatewayEndpointGatewayBinding.ExpirationAt)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting expiration_at: %s", err)) + } + + if err = d.Set("href", privatePathServiceGatewayEndpointGatewayBinding.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + + if err = d.Set("lifecycle_state", privatePathServiceGatewayEndpointGatewayBinding.LifecycleState); err != nil { + return diag.FromErr(fmt.Errorf("Error setting lifecycle_state: %s", err)) + } + + if err = d.Set("resource_type", privatePathServiceGatewayEndpointGatewayBinding.ResourceType); err != nil { + return diag.FromErr(fmt.Errorf("Error setting resource_type: %s", err)) + } + + if err = d.Set("status", privatePathServiceGatewayEndpointGatewayBinding.Status); err != nil { + return diag.FromErr(fmt.Errorf("Error setting status: %s", err)) + } + + // if privatePathServiceGatewayEndpointGatewayBinding.UpdatedAt != nil { + // if err = d.Set("updated_at", flex.DateTimeToString(privatePathServiceGatewayEndpointGatewayBinding.UpdatedAt)); err != nil { + // return diag.FromErr(fmt.Errorf("Error setting updated_at: %s", err)) + // } + // } + + return nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingAccountReferenceToMap(model *vpcv1.AccountReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_binding_test.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_binding_test.go new file mode 100644 index 0000000000..c2f5643e9e --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_binding_test.go @@ -0,0 +1,68 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMIsPrivatePathServiceGatewayEndpointGatewayBindingDataSourceBasic(t *testing.T) { + accessPolicy := "deny" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + targetName := fmt.Sprintf("tf-egw-target%d", acctest.RandIntRange(10, 100)) + egwName := fmt.Sprintf("tf-egw%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayEndpointGatewayBindingDataSourceConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name, egwName, targetName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "private_path_service_gateway"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.account.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.account.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.lifecycle_state"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.expiration_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.status"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_binding.is_private_path_service_gateway_endpoint_gateway_binding", "endpoint_gateway_bindings.0.updated_at"), + ), + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayEndpointGatewayBindingDataSourceConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name, egwName, targetName string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + resource "ibm_is_virtual_endpoint_gateway" "endpoint_gateway" { + name = "%s" + target { + name = "%s" + resource_type = "private_path_service_gateway" + } + vpc = ibm_is_vpc.testacc_vpc.id + resource_group = data.ibm_resource_group.test_acc.id + } + data "ibm_is_private_path_service_gateway_endpoint_gateway_bindings" "is_private_path_service_gateway_endpoint_gateway_bindings" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + } + data "ibm_is_private_path_service_gateway_endpoint_gateway_binding" "is_private_path_service_gateway_endpoint_gateway_binding" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + endpoint_gateway_binding = data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings.endpoint_gateway_bindings.0.id + } + `, egwName, targetName) +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_bindings.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_bindings.go new file mode 100644 index 0000000000..0dfe70246b --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_bindings.go @@ -0,0 +1,220 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func DataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindings() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsRead, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": { + Type: schema.TypeString, + Required: true, + Description: "The private path service gateway identifier.", + }, + "account": { + Type: schema.TypeString, + Optional: true, + Description: "Filters the collection to resources with the specified account identifier.", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Filters the collection to resources with the specified status.", + }, + "endpoint_gateway_bindings": { + Type: schema.TypeList, + Computed: true, + Description: "Collection of endpoint gateway bindings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeList, + Computed: true, + Description: "The account that created the endpoint gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the endpoint gateway binding was created.", + }, + "expiration_at": { + Type: schema.TypeString, + Computed: true, + Description: "The expiration date and time for the endpoint gateway binding.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this endpoint gateway binding.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this endpoint gateway binding.", + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the endpoint gateway binding.", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the endpoint gateway binding- `denied`: endpoint gateway binding was denied- `expired`: endpoint gateway binding has expired- `pending`: endpoint gateway binding is awaiting review- `permitted`: endpoint gateway binding was permittedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + }, + "updated_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the endpoint gateway binding was updated.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + listPrivatePathServiceGatewayEndpointGatewayBindingsOptions := &vpcv1.ListPrivatePathServiceGatewayEndpointGatewayBindingsOptions{} + + listPrivatePathServiceGatewayEndpointGatewayBindingsOptions.SetPrivatePathServiceGatewayID(d.Get("private_path_service_gateway").(string)) + + if accountIntf, ok := d.GetOk("account"); ok { + account := accountIntf.(string) + listPrivatePathServiceGatewayEndpointGatewayBindingsOptions.AccountID = &account + } + + if statusIntf, ok := d.GetOk("status"); ok { + status := statusIntf.(string) + listPrivatePathServiceGatewayEndpointGatewayBindingsOptions.Status = &status + } + + var pager *vpcv1.PrivatePathServiceGatewayEndpointGatewayBindingsPager + pager, err = vpcClient.NewPrivatePathServiceGatewayEndpointGatewayBindingsPager(listPrivatePathServiceGatewayEndpointGatewayBindingsOptions) + if err != nil { + return diag.FromErr(err) + } + + allItems, err := pager.GetAll() + if err != nil { + log.Printf("[DEBUG] PrivatePathServiceGatewayEndpointGatewayBindingsPager.GetAll() failed %s", err) + return diag.FromErr(fmt.Errorf("PrivatePathServiceGatewayEndpointGatewayBindingsPager.GetAll() failed %s", err)) + } + + d.SetId(*listPrivatePathServiceGatewayEndpointGatewayBindingsOptions.PrivatePathServiceGatewayID) + + mapSlice := []map[string]interface{}{} + for _, modelItem := range allItems { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsPrivatePathServiceGatewayEndpointGatewayBindingToMap(&modelItem) + if err != nil { + return diag.FromErr(err) + } + mapSlice = append(mapSlice, modelMap) + } + + if err = d.Set("endpoint_gateway_bindings", mapSlice); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_bindings %s", err)) + } + + return nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsPrivatePathServiceGatewayEndpointGatewayBindingToMap(model *vpcv1.PrivatePathServiceGatewayEndpointGatewayBinding) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Account != nil { + accountMap, err := dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsAccountReferenceToMap(model.Account) + if err != nil { + return modelMap, err + } + modelMap["account"] = []map[string]interface{}{accountMap} + } + if model.CreatedAt != nil { + modelMap["created_at"] = model.CreatedAt.String() + } + if model.ExpirationAt != nil { + modelMap["expiration_at"] = model.ExpirationAt.String() + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.LifecycleState != nil { + modelMap["lifecycle_state"] = *model.LifecycleState + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + if model.Status != nil { + modelMap["status"] = *model.Status + } + // if model.UpdatedAt != nil { + // modelMap["updated_at"] = model.UpdatedAt.String() + // } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsAccountReferenceToMap(model *vpcv1.AccountReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsPrivatePathServiceGatewayEndpointGatewayBindingCollectionFirstToMap(model *vpcv1.PrivatePathServiceGatewayEndpointGatewayBindingCollectionFirst) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsPrivatePathServiceGatewayEndpointGatewayBindingCollectionNextToMap(model *vpcv1.PrivatePathServiceGatewayEndpointGatewayBindingCollectionNext) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_bindings_test.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_bindings_test.go new file mode 100644 index 0000000000..166267609b --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_endpoint_gateway_bindings_test.go @@ -0,0 +1,64 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsDataSourceBasic(t *testing.T) { + accessPolicy := "deny" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + targetName := fmt.Sprintf("tf-egw-target%d", acctest.RandIntRange(10, 100)) + egwName := fmt.Sprintf("tf-egw%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsDataSourceConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name, egwName, targetName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "private_path_service_gateway"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.account.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.account.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.lifecycle_state"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.expiration_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.status"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.is_private_path_service_gateway_endpoint_gateway_bindings", "endpoint_gateway_bindings.0.updated_at"), + ), + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayEndpointGatewayBindingsDataSourceConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name, egwName, targetName string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + resource "ibm_is_virtual_endpoint_gateway" "endpoint_gateway" { + name = "%s" + target { + name = "%s" + resource_type = "private_path_service_gateway" + } + vpc = ibm_is_vpc.testacc_vpc.id + resource_group = data.ibm_resource_group.test_acc.id + } + data "ibm_is_private_path_service_gateway_endpoint_gateway_bindings" "is_private_path_service_gateway_endpoint_gateway_bindings" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + } + `, egwName, targetName) +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_test.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_test.go new file mode 100644 index 0000000000..5f9ed5f848 --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateway_test.go @@ -0,0 +1,95 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMIsPrivatePathServiceGatewayDataSourceBasic(t *testing.T) { + accessPolicy := "deny" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayDataSourceConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "default_access_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "endpoint_gateways_count"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "lifecycle_state"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "published"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "load_balancer.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "load_balancer.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "load_balancer.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "load_balancer.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "load_balancer.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "resource_group.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "resource_group.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "resource_group.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "service_endpoints"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "vpc.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "vpc.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "vpc.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "vpc.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "vpc.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway", "zonal_affinity"), + + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "default_access_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "endpoint_gateways_count"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "lifecycle_state"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "published"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "load_balancer.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "load_balancer.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "load_balancer.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "load_balancer.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "load_balancer.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "resource_group.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "resource_group.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "resource_group.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "service_endpoints"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "vpc.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "vpc.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "vpc.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "vpc.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "vpc.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway.is_private_path_service_gateway_by_name", "zonal_affinity"), + ), + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayDataSourceConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + data "ibm_is_private_path_service_gateway" "is_private_path_service_gateway" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + } + data "ibm_is_private_path_service_gateway" "is_private_path_service_gateway_by_name" { + private_path_service_gateway_name = ibm_is_private_path_service_gateway.is_private_path_service_gateway.name + } + `) +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateways.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateways.go new file mode 100644 index 0000000000..cb40543672 --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateways.go @@ -0,0 +1,472 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func DataSourceIBMIsPrivatePathServiceGateways() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMIsPrivatePathServiceGatewaysRead, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateways": { + Type: schema.TypeList, + Computed: true, + Description: "Collection of private path service gateways.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the private path service gateway was created.", + }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this private path service gateway.", + }, + "default_access_policy": { + Type: schema.TypeString, + Computed: true, + Description: "The policy to use for bindings from accounts without an explicit account policy.", + }, + "endpoint_gateway_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of endpoint gateways using this private path service gateway.", + }, + "endpoint_gateway_binding_auto_delete": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether endpoint gateway bindings will be automatically deleted after endpoint_gateway_binding_auto_delete_timeout hours have passed.", + }, + "endpoint_gateway_binding_auto_delete_timeout": { + Type: schema.TypeInt, + Computed: true, + Description: "If endpoint_gateway_binding_auto_delete is true, the hours after which endpoint gateway bindings will be automatically deleted.", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this private path service gateway.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this private path service gateway.", + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: "The lifecycle state of the private path service gateway.", + }, + "load_balancer": { + Type: schema.TypeList, + Computed: true, + Description: "The load balancer for this private path service gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The load balancer's CRN.", + }, + "deleted": { + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted, and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The load balancer's canonical URL.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this load balancer.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this load balancer. The name is unique across all load balancers in the VPC.", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this private path service gateway. The name is unique across all private path service gateways in the VPC.", + }, + "published": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates the availability of this private path service gateway- `true`: Any account can request access to this private path service gateway.- `false`: Access is restricted to the account that created this private path service gateway.", + }, + "region": { + Type: schema.TypeList, + Computed: true, + Description: "The region served by this private path service gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this region.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this region.", + }, + }, + }, + }, + "resource_group": { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this private path service gateway.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this resource group.", + }, + }, + }, + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "service_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: "The fully qualified domain names for this private path service gateway.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vpc": { + Type: schema.TypeList, + Computed: true, + Description: "The VPC this private path service gateway resides in.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The CRN for this VPC.", + }, + "deleted": { + Type: schema.TypeList, + Computed: true, + Description: "If present, this property indicates the referenced resource has been deleted, and providessome supplementary information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about deleted resources.", + }, + }, + }, + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this VPC.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this VPC.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name for this VPC. The name is unique across all VPCs in the region.", + }, + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + }, + }, + }, + "zonal_affinity": { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this private path service gateway has zonal affinity.- `true`: Traffic to the service from a zone will favor service endpoints in the same zone.- `false`: Traffic to the service from a zone will be load balanced across all zones in the region the service resides in.", + }, + }, + }, + }, + }, + } +} + +func dataSourceIBMIsPrivatePathServiceGatewaysRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + listPrivatePathServiceGatewaysOptions := &vpcv1.ListPrivatePathServiceGatewaysOptions{} + + privatePathServiceGatewayCollection, response, err := vpcClient.ListPrivatePathServiceGatewaysWithContext(context, listPrivatePathServiceGatewaysOptions) + if err != nil { + log.Printf("[DEBUG] ListPrivatePathServiceGatewaysWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("ListPrivatePathServiceGatewaysWithContext failed %s\n%s", err, response)) + } + + d.SetId(dataSourceIBMIsPrivatePathServiceGatewaysID(d)) + + privatePathServiceGateways := []map[string]interface{}{} + if privatePathServiceGatewayCollection.PrivatePathServiceGateways != nil { + for _, modelItem := range privatePathServiceGatewayCollection.PrivatePathServiceGateways { + modelMap, err := dataSourceIBMIsPrivatePathServiceGatewaysPrivatePathServiceGatewayToMap(&modelItem) + if err != nil { + return diag.FromErr(err) + } + privatePathServiceGateways = append(privatePathServiceGateways, modelMap) + } + } + if err = d.Set("private_path_service_gateways", privatePathServiceGateways); err != nil { + return diag.FromErr(fmt.Errorf("Error setting private_path_service_gateways %s", err)) + } + + return nil +} + +// dataSourceIBMIsPrivatePathServiceGatewaysID returns a reasonable ID for the list. +func dataSourceIBMIsPrivatePathServiceGatewaysID(d *schema.ResourceData) string { + return time.Now().UTC().String() +} + +func dataSourceIBMIsPrivatePathServiceGatewaysPrivatePathServiceGatewayCollectionFirstToMap(model *vpcv1.PrivatePathServiceGatewayCollectionFirst) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysPrivatePathServiceGatewayCollectionNextToMap(model *vpcv1.PrivatePathServiceGatewayCollectionNext) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysPrivatePathServiceGatewayToMap(model *vpcv1.PrivatePathServiceGateway) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.CreatedAt != nil { + modelMap["created_at"] = flex.DateTimeToString(model.CreatedAt) + } + if model.CRN != nil { + modelMap["crn"] = *model.CRN + } + if model.DefaultAccessPolicy != nil { + modelMap["default_access_policy"] = *model.DefaultAccessPolicy + } + if model.EndpointGatewayCount != nil { + modelMap["endpoint_gateway_count"] = *model.EndpointGatewayCount + } + if model.EndpointGatewayBindingAutoDelete != nil { + modelMap["endpoint_gateway_binding_auto_delete"] = *model.EndpointGatewayBindingAutoDelete + } + if model.EndpointGatewayBindingAutoDeleteTimeout != nil { + modelMap["endpoint_gateway_binding_auto_delete_timeout"] = *model.EndpointGatewayBindingAutoDeleteTimeout + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.LifecycleState != nil { + modelMap["lifecycle_state"] = *model.LifecycleState + } + if model.LoadBalancer != nil { + loadBalancerMap, err := dataSourceIBMIsPrivatePathServiceGatewaysLoadBalancerReferenceToMap(model.LoadBalancer) + if err != nil { + return modelMap, err + } + modelMap["load_balancer"] = []map[string]interface{}{loadBalancerMap} + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.Published != nil { + modelMap["published"] = *model.Published + } + // if model.Remote != nil && model.Remote.Region != nil { + // regionMap, err := dataSourceIBMIsPrivatePathServiceGatewaysRegionReferenceToMap(model.Remote.Region) + // if err != nil { + // return modelMap, err + // } + // modelMap["region"] = []map[string]interface{}{regionMap} + // } + if model.ResourceGroup != nil { + resourceGroupMap, err := dataSourceIBMIsPrivatePathServiceGatewaysResourceGroupReferenceToMap(model.ResourceGroup) + if err != nil { + return modelMap, err + } + modelMap["resource_group"] = []map[string]interface{}{resourceGroupMap} + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + if model.ServiceEndpoints != nil { + modelMap["service_endpoints"] = model.ServiceEndpoints + } + if model.VPC != nil { + vpcMap, err := dataSourceIBMIsPrivatePathServiceGatewaysVPCReferenceToMap(model.VPC) + if err != nil { + return modelMap, err + } + modelMap["vpc"] = []map[string]interface{}{vpcMap} + } + if model.ZonalAffinity != nil { + modelMap["zonal_affinity"] = *model.ZonalAffinity + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysLoadBalancerReferenceToMap(model *vpcv1.LoadBalancerReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.CRN != nil { + modelMap["crn"] = *model.CRN + } + if model.Deleted != nil { + deletedMap, err := dataSourceIBMIsPrivatePathServiceGatewaysLoadBalancerReferenceDeletedToMap(model.Deleted) + if err != nil { + return modelMap, err + } + modelMap["deleted"] = []map[string]interface{}{deletedMap} + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysLoadBalancerReferenceDeletedToMap(model *vpcv1.Deleted) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.MoreInfo != nil { + modelMap["more_info"] = *model.MoreInfo + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysRegionReferenceToMap(model *vpcv1.RegionReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysResourceGroupReferenceToMap(model *vpcv1.ResourceGroupReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysVPCReferenceToMap(model *vpcv1.VPCReference) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.CRN != nil { + modelMap["crn"] = *model.CRN + } + if model.Deleted != nil { + deletedMap, err := dataSourceIBMIsPrivatePathServiceGatewaysVPCReferenceDeletedToMap(model.Deleted) + if err != nil { + return modelMap, err + } + modelMap["deleted"] = []map[string]interface{}{deletedMap} + } + if model.Href != nil { + modelMap["href"] = *model.Href + } + if model.ID != nil { + modelMap["id"] = *model.ID + } + if model.Name != nil { + modelMap["name"] = *model.Name + } + if model.ResourceType != nil { + modelMap["resource_type"] = *model.ResourceType + } + return modelMap, nil +} + +func dataSourceIBMIsPrivatePathServiceGatewaysVPCReferenceDeletedToMap(model *vpcv1.Deleted) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.MoreInfo != nil { + modelMap["more_info"] = *model.MoreInfo + } + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_private_path_service_gateways_test.go b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateways_test.go new file mode 100644 index 0000000000..d360458021 --- /dev/null +++ b/ibm/service/vpc/data_source_ibm_is_private_path_service_gateways_test.go @@ -0,0 +1,60 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" +) + +func TestAccIBMIsPrivatePathServiceGatewaysDataSourceBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewaysDataSourceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.default_access_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.endpoint_gateway_count"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.lifecycle_state"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.published"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.load_balancer.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.load_balancer.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.load_balancer.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.load_balancer.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.load_balancer.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.resource_group.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.resource_group.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.resource_group.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.service_endpoints"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.vpc.0.name"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.vpc.0.crn"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.vpc.0.id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.vpc.0.href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.vpc.0.resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateways.is_private_path_service_gateways", "private_path_service_gateways.0.zonal_affinity"), + ), + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewaysDataSourceConfigBasic() string { + return fmt.Sprintf(` + data "ibm_is_private_path_service_gateways" "is_private_path_service_gateways" { + } + `) +} diff --git a/ibm/service/vpc/data_source_ibm_is_subnet.go b/ibm/service/vpc/data_source_ibm_is_subnet.go index 57885ea49c..66a400eb0d 100644 --- a/ibm/service/vpc/data_source_ibm_is_subnet.go +++ b/ibm/service/vpc/data_source_ibm_is_subnet.go @@ -174,6 +174,11 @@ func DataSourceIBMISSubnet() *schema.Resource { Computed: true, Description: "The user-defined name for this routing table.", }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The crn for this routing table.", + }, "resource_type": { Type: schema.TypeString, Computed: true, @@ -354,6 +359,9 @@ func dataSourceSubnetRoutingTableToMap(routingTableItem vpcv1.RoutingTableRefere if routingTableItem.Name != nil { routingTableMap["name"] = routingTableItem.Name } + if routingTableItem.CRN != nil { + routingTableMap["crn"] = routingTableItem.CRN + } if routingTableItem.ResourceType != nil { routingTableMap["resource_type"] = routingTableItem.ResourceType } diff --git a/ibm/service/vpc/data_source_ibm_is_subnets.go b/ibm/service/vpc/data_source_ibm_is_subnets.go index 09950f0026..381b00018c 100644 --- a/ibm/service/vpc/data_source_ibm_is_subnets.go +++ b/ibm/service/vpc/data_source_ibm_is_subnets.go @@ -155,6 +155,11 @@ func DataSourceIBMISSubnets() *schema.Resource { Computed: true, Description: "The user-defined name for this routing table.", }, + "crn": { + Type: schema.TypeString, + Computed: true, + Description: "The crn for this routing table.", + }, "resource_type": { Type: schema.TypeString, Computed: true, diff --git a/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateway.go b/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateway.go index 8d24c6dbc6..c2c03005f7 100644 --- a/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateway.go +++ b/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateway.go @@ -61,6 +61,32 @@ func DataSourceIBMISEndpointGateway() *schema.Resource { Computed: true, Description: "Endpoint gateway lifecycle state", }, + isVirtualEndpointGatewayLifecycleReasons: { + Type: schema.TypeList, + Computed: true, + Description: "The reasons for the current lifecycle_state (if any).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the reason for this lifecycle state.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the reason for this lifecycle state.", + }, + + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about the reason for this lifecycle state.", + }, + }, + }, + }, isVirtualEndpointGatewaySecurityGroups: { Type: schema.TypeSet, Computed: true, @@ -113,6 +139,11 @@ func DataSourceIBMISEndpointGateway() *schema.Resource { Computed: true, Description: "The target name", }, + isVirtualEndpointGatewayTargetCRN: { + Type: schema.TypeString, + Computed: true, + Description: "The target crn", + }, isVirtualEndpointGatewayTargetResourceType: { Type: schema.TypeString, Computed: true, @@ -173,6 +204,9 @@ func dataSourceIBMISEndpointGatewayRead( d.Set(isVirtualEndpointGatewayHealthState, result.HealthState) d.Set(isVirtualEndpointGatewayCreatedAt, result.CreatedAt.String()) d.Set(isVirtualEndpointGatewayLifecycleState, result.LifecycleState) + if err := d.Set(isVirtualEndpointGatewayLifecycleReasons, resourceEGWFlattenLifecycleReasons(result.LifecycleReasons)); err != nil { + return fmt.Errorf("[ERROR] Error setting lifecycle_reasons: %s", err) + } d.Set(isVirtualEndpointGatewayResourceType, result.ResourceType) d.Set(isVirtualEndpointGatewayIPs, flattenIPs(result.Ips)) d.Set(isVirtualEndpointGatewayResourceGroupID, result.ResourceGroup.ID) diff --git a/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateways.go b/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateways.go index 2d27412b0a..d3f40faffa 100644 --- a/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateways.go +++ b/ibm/service/vpc/data_source_ibm_is_virtual_endpoint_gateways.go @@ -91,6 +91,32 @@ func DataSourceIBMISEndpointGateways() *schema.Resource { Computed: true, Description: "Endpoint gateway lifecycle state", }, + isVirtualEndpointGatewayLifecycleReasons: { + Type: schema.TypeList, + Computed: true, + Description: "The reasons for the current lifecycle_state (if any).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the reason for this lifecycle state.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the reason for this lifecycle state.", + }, + + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about the reason for this lifecycle state.", + }, + }, + }, + }, isVirtualEndpointGatewaySecurityGroups: { Type: schema.TypeSet, Computed: true, @@ -213,6 +239,7 @@ func dataSourceIBMISEndpointGatewaysRead(d *schema.ResourceData, meta interface{ endpointGatewayOutput[isVirtualEndpointGatewayResourceType] = (*endpointGateway.ResourceType) endpointGatewayOutput[isVirtualEndpointGatewayHealthState] = *endpointGateway.HealthState endpointGatewayOutput[isVirtualEndpointGatewayLifecycleState] = *endpointGateway.LifecycleState + endpointGatewayOutput[isVirtualEndpointGatewayLifecycleReasons] = resourceEGWFlattenLifecycleReasons(endpointGateway.LifecycleReasons) endpointGatewayOutput[isVirtualEndpointGatewayResourceGroupID] = *endpointGateway.ResourceGroup.ID endpointGatewayOutput[isVirtualEndpointGatewayCRN] = *endpointGateway.CRN endpointGatewayOutput[isVirtualEndpointGatewayVpcID] = *endpointGateway.VPC.ID diff --git a/ibm/service/vpc/data_source_ibm_is_volume.go b/ibm/service/vpc/data_source_ibm_is_volume.go index 95af9adeab..70e09d61bd 100644 --- a/ibm/service/vpc/data_source_ibm_is_volume.go +++ b/ibm/service/vpc/data_source_ibm_is_volume.go @@ -37,6 +37,23 @@ func DataSourceIBMISVolume() *schema.Resource { Computed: true, Description: "Indicates whether a running virtual server instance has an attachment to this volume.", }, + // defined_performance changes + "adjustable_capacity_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable capacity for this volume.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "adjustable_iops_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable IOPS for this volume.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, isVolumeAttachmentState: { Type: schema.TypeString, Computed: true, @@ -467,6 +484,15 @@ func volumeGet(d *schema.ResourceData, meta interface{}, name string) error { if vol.HealthState != nil { d.Set(isVolumeHealthState, *vol.HealthState) } + + if err = d.Set("adjustable_capacity_states", vol.AdjustableCapacityStates); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting adjustable_capacity_states: %s", err), "(Data) ibm_is_volume", "read", "set-adjustable_capacity_states") + } + + if err = d.Set("adjustable_iops_states", vol.AdjustableIopsStates); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting adjustable_iops_states: %s", err), "(Data) ibm_is_volume", "read", "set-adjustable_iops_states") + } + return nil } diff --git a/ibm/service/vpc/data_source_ibm_is_volume_profile.go b/ibm/service/vpc/data_source_ibm_is_volume_profile.go index 26dab62937..627335b481 100644 --- a/ibm/service/vpc/data_source_ibm_is_volume_profile.go +++ b/ibm/service/vpc/data_source_ibm_is_volume_profile.go @@ -4,7 +4,13 @@ package vpc import ( + "context" + "fmt" + "log" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -15,7 +21,7 @@ const ( func DataSourceIBMISVolumeProfile() *schema.Resource { return &schema.Resource{ - Read: dataSourceIBMISVolumeProfileRead, + ReadContext: dataSourceIBMISVolumeProfileRead, Schema: map[string]*schema.Schema{ @@ -24,17 +30,202 @@ func DataSourceIBMISVolumeProfile() *schema.Resource { Required: true, Description: "Volume profile name", }, - - isVolumeProfileFamily: { + "boot_capacity": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "capacity": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "family": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "Volume profile family", + Description: "The product family this volume profile belongs to.The enumerated values for this property may[expand](https://cloud.ibm.com/apidocs/vpc#property-value-expansion) in the future.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this volume profile.", + }, + "iops": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + // defined_performance changes + "adjustable_capacity_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable capacity for a volume with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "adjustable_iops_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable IOPS for a volume with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, }, }, } } -func dataSourceIBMISVolumeProfileRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceIBMISVolumeProfileRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { name := d.Get(isVolumeProfile).(string) @@ -45,21 +236,334 @@ func dataSourceIBMISVolumeProfileRead(d *schema.ResourceData, meta interface{}) return nil } -func volumeProfileGet(d *schema.ResourceData, meta interface{}, name string) error { +func volumeProfileGet(d *schema.ResourceData, meta interface{}, name string) diag.Diagnostics { sess, err := vpcClient(meta) if err != nil { - return err + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profile", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } getVolumeProfileOptions := &vpcv1.GetVolumeProfileOptions{ Name: &name, } - profile, _, err := sess.GetVolumeProfile(getVolumeProfileOptions) + volumeProfile, _, err := sess.GetVolumeProfile(getVolumeProfileOptions) if err != nil { - return err + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetVolumeProfileWithContext failed: %s", err.Error()), "(Data) ibm_is_volume_profile", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } // For lack of anything better, compose our id from profile name. - d.SetId(*profile.Name) - d.Set(isVolumeProfile, *profile.Name) - d.Set(isVolumeProfileFamily, *profile.Family) + d.SetId(*volumeProfile.Name) + + bootCapacity := []map[string]interface{}{} + if volumeProfile.BootCapacity != nil { + modelMap, err := DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityToMap(volumeProfile.BootCapacity) + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + bootCapacity = append(bootCapacity, modelMap) + } + if err = d.Set("boot_capacity", bootCapacity); err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("Error setting boot_capacity: %s", err), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + + capacity := []map[string]interface{}{} + if volumeProfile.Capacity != nil { + modelMap, err := DataSourceIBMIsVolumeProfileVolumeProfileCapacityToMap(volumeProfile.Capacity) + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + capacity = append(capacity, modelMap) + } + if err = d.Set("capacity", capacity); err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("Error setting capacity: %s", err), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + + if err = d.Set("family", volumeProfile.Family); err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("Error setting family: %s", err), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + + if err = d.Set("href", volumeProfile.Href); err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("Error setting href: %s", err), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + + iops := []map[string]interface{}{} + if volumeProfile.Iops != nil { + modelMap, err := DataSourceIBMIsVolumeProfileVolumeProfileIopsToMap(volumeProfile.Iops) + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + iops = append(iops, modelMap) + } + if err = d.Set("iops", iops); err != nil { + tfErr := flex.TerraformErrorf(err, fmt.Sprintf("Error setting iops: %s", err), "(Data) ibm_is_volume_profile", "read") + return tfErr.GetDiag() + } + // defined_performance changes + + adjustableCapacityStates := []map[string]interface{}{} + if volumeProfile.AdjustableCapacityStates != nil { + modelMap, err := DataSourceIBMIsVolumeProfileVolumeProfileAdjustableCapacityStatesToMap(volumeProfile.AdjustableCapacityStates) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profile", "read", "adjustable_capacity_states-to-map").GetDiag() + } + adjustableCapacityStates = append(adjustableCapacityStates, modelMap) + } + if err = d.Set("adjustable_capacity_states", adjustableCapacityStates); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting adjustable_capacity_states: %s", err), "(Data) ibm_is_volume_profile", "read", "set-adjustable_capacity_states").GetDiag() + } + + adjustableIopsStates := []map[string]interface{}{} + if volumeProfile.AdjustableIopsStates != nil { + modelMap, err := DataSourceIBMIsVolumeProfileVolumeProfileAdjustableIopsStatesToMap(volumeProfile.AdjustableIopsStates) + if err != nil { + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profile", "read", "adjustable_iops_states-to-map").GetDiag() + } + adjustableIopsStates = append(adjustableIopsStates, modelMap) + } + if err = d.Set("adjustable_iops_states", adjustableIopsStates); err != nil { + return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting adjustable_iops_states: %s", err), "(Data) ibm_is_volume_profile", "read", "set-adjustable_iops_states").GetDiag() + } + return nil } + +func DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityToMap(model vpcv1.VolumeProfileBootCapacityIntf) (map[string]interface{}, error) { + if _, ok := model.(*vpcv1.VolumeProfileBootCapacityFixed); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityFixedToMap(model.(*vpcv1.VolumeProfileBootCapacityFixed)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacityRange); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityRangeToMap(model.(*vpcv1.VolumeProfileBootCapacityRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacityEnum); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityEnumToMap(model.(*vpcv1.VolumeProfileBootCapacityEnum)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacityDependentRange); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityDependentRangeToMap(model.(*vpcv1.VolumeProfileBootCapacityDependentRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacity); ok { + modelMap := make(map[string]interface{}) + model := model.(*vpcv1.VolumeProfileBootCapacity) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.Value != nil { + modelMap["value"] = flex.IntValue(model.Value) + } + if model.Default != nil { + modelMap["default"] = flex.IntValue(model.Default) + } + if model.Max != nil { + modelMap["max"] = flex.IntValue(model.Max) + } + if model.Min != nil { + modelMap["min"] = flex.IntValue(model.Min) + } + if model.Step != nil { + modelMap["step"] = flex.IntValue(model.Step) + } + if model.Values != nil { + modelMap["values"] = model.Values + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized vpcv1.VolumeProfileBootCapacityIntf subtype encountered") + } +} + +func DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityFixedToMap(model *vpcv1.VolumeProfileBootCapacityFixed) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["value"] = flex.IntValue(model.Value) + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityRangeToMap(model *vpcv1.VolumeProfileBootCapacityRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityEnumToMap(model *vpcv1.VolumeProfileBootCapacityEnum) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileBootCapacityDependentRangeToMap(model *vpcv1.VolumeProfileBootCapacityDependentRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileCapacityToMap(model vpcv1.VolumeProfileCapacityIntf) (map[string]interface{}, error) { + if _, ok := model.(*vpcv1.VolumeProfileCapacityFixed); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileCapacityFixedToMap(model.(*vpcv1.VolumeProfileCapacityFixed)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacityRange); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileCapacityRangeToMap(model.(*vpcv1.VolumeProfileCapacityRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacityEnum); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileCapacityEnumToMap(model.(*vpcv1.VolumeProfileCapacityEnum)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacityDependentRange); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileCapacityDependentRangeToMap(model.(*vpcv1.VolumeProfileCapacityDependentRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacity); ok { + modelMap := make(map[string]interface{}) + model := model.(*vpcv1.VolumeProfileCapacity) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.Value != nil { + modelMap["value"] = flex.IntValue(model.Value) + } + if model.Default != nil { + modelMap["default"] = flex.IntValue(model.Default) + } + if model.Max != nil { + modelMap["max"] = flex.IntValue(model.Max) + } + if model.Min != nil { + modelMap["min"] = flex.IntValue(model.Min) + } + if model.Step != nil { + modelMap["step"] = flex.IntValue(model.Step) + } + if model.Values != nil { + modelMap["values"] = model.Values + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized vpcv1.VolumeProfileCapacityIntf subtype encountered") + } +} + +func DataSourceIBMIsVolumeProfileVolumeProfileCapacityFixedToMap(model *vpcv1.VolumeProfileCapacityFixed) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["value"] = flex.IntValue(model.Value) + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileCapacityRangeToMap(model *vpcv1.VolumeProfileCapacityRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileCapacityEnumToMap(model *vpcv1.VolumeProfileCapacityEnum) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileCapacityDependentRangeToMap(model *vpcv1.VolumeProfileCapacityDependentRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileIopsToMap(model vpcv1.VolumeProfileIopsIntf) (map[string]interface{}, error) { + if _, ok := model.(*vpcv1.VolumeProfileIopsFixed); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileIopsFixedToMap(model.(*vpcv1.VolumeProfileIopsFixed)) + } else if _, ok := model.(*vpcv1.VolumeProfileIopsRange); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileIopsRangeToMap(model.(*vpcv1.VolumeProfileIopsRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileIopsEnum); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileIopsEnumToMap(model.(*vpcv1.VolumeProfileIopsEnum)) + } else if _, ok := model.(*vpcv1.VolumeProfileIopsDependentRange); ok { + return DataSourceIBMIsVolumeProfileVolumeProfileIopsDependentRangeToMap(model.(*vpcv1.VolumeProfileIopsDependentRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileIops); ok { + modelMap := make(map[string]interface{}) + model := model.(*vpcv1.VolumeProfileIops) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.Value != nil { + modelMap["value"] = flex.IntValue(model.Value) + } + if model.Default != nil { + modelMap["default"] = flex.IntValue(model.Default) + } + if model.Max != nil { + modelMap["max"] = flex.IntValue(model.Max) + } + if model.Min != nil { + modelMap["min"] = flex.IntValue(model.Min) + } + if model.Step != nil { + modelMap["step"] = flex.IntValue(model.Step) + } + if model.Values != nil { + modelMap["values"] = model.Values + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized vpcv1.VolumeProfileIopsIntf subtype encountered") + } +} + +func DataSourceIBMIsVolumeProfileVolumeProfileIopsFixedToMap(model *vpcv1.VolumeProfileIopsFixed) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["value"] = flex.IntValue(model.Value) + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileIopsRangeToMap(model *vpcv1.VolumeProfileIopsRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileIopsEnumToMap(model *vpcv1.VolumeProfileIopsEnum) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileIopsDependentRangeToMap(model *vpcv1.VolumeProfileIopsDependentRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileAdjustableCapacityStatesToMap(model *vpcv1.VolumeProfileAdjustableCapacityStates) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfileVolumeProfileAdjustableIopsStatesToMap(model *vpcv1.VolumeProfileAdjustableIopsStates) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} diff --git a/ibm/service/vpc/data_source_ibm_is_volume_profile_test.go b/ibm/service/vpc/data_source_ibm_is_volume_profile_test.go index f59a294373..baf5de3b26 100644 --- a/ibm/service/vpc/data_source_ibm_is_volume_profile_test.go +++ b/ibm/service/vpc/data_source_ibm_is_volume_profile_test.go @@ -29,11 +29,40 @@ func TestAccIBMISVolumeProfileDataSource_basic(t *testing.T) { }, }) } +func TestAccIBMISVolumeProfileDataSource_sdpbasic(t *testing.T) { + resName := "data.ibm_is_volume_profile.test1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVolumeProfileDataSourceSdpConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resName, "name", "sdp"), + resource.TestCheckResourceAttrSet(resName, "family"), + resource.TestCheckResourceAttrSet(resName, "adjustable_capacity_states.#"), + resource.TestCheckResourceAttrSet(resName, "adjustable_capacity_states.0.values.#"), + resource.TestCheckResourceAttrSet(resName, "adjustable_iops_states.#"), + resource.TestCheckResourceAttrSet(resName, "adjustable_iops_states.0.values.#"), + resource.TestCheckResourceAttrSet(resName, "boot_capacity.#"), + resource.TestCheckResourceAttrSet(resName, "capacity.#"), + resource.TestCheckResourceAttrSet(resName, "iops.#"), + ), + }, + }, + }) +} func testAccCheckIBMISVolumeProfileDataSourceConfig() string { return fmt.Sprintf(` - -data "ibm_is_volume_profile" "test1" { - name = "%s" -}`, acc.VolumeProfileName) + data "ibm_is_volume_profile" "test1" { + name = "%s" + }`, acc.VolumeProfileName) +} +func testAccCheckIBMISVolumeProfileDataSourceSdpConfig() string { + return fmt.Sprintf(` + data "ibm_is_volume_profile" "test1" { + name = "%s" + }`, "sdp") } diff --git a/ibm/service/vpc/data_source_ibm_is_volume_profiles.go b/ibm/service/vpc/data_source_ibm_is_volume_profiles.go index 7315e82f3a..8599bf7ce4 100644 --- a/ibm/service/vpc/data_source_ibm_is_volume_profiles.go +++ b/ibm/service/vpc/data_source_ibm_is_volume_profiles.go @@ -4,11 +4,15 @@ package vpc import ( + "context" + "encoding/json" "fmt" + "log" "time" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -18,7 +22,7 @@ const ( func DataSourceIBMISVolumeProfiles() *schema.Resource { return &schema.Resource{ - Read: dataSourceIBMISVolumeProfilesRead, + ReadContext: dataSourceIBMISVolumeProfilesRead, Schema: map[string]*schema.Schema{ @@ -28,13 +32,201 @@ func DataSourceIBMISVolumeProfiles() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, + "boot_capacity": &schema.Schema{ + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, }, - "family": { - Type: schema.TypeString, + "capacity": &schema.Schema{ + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The product family this volume profile belongs to.The enumerated values for this property may[expand](https://cloud.ibm.com/apidocs/vpc#property-value-expansion) in the future.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this volume profile.", + }, + "iops": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "value": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The value for this profile field.", + }, + "default": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The default value for this profile field.", + }, + "max": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The maximum value for this profile field.", + }, + "min": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The minimum value for this profile field.", + }, + "step": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The increment step value for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The permitted values for this profile field.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this volume profile.", + }, + // defined_performance changes + "adjustable_capacity_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable capacity for a volume with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "adjustable_iops_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The type for this profile field.", + }, + "values": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable IOPS for a volume with this profile.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, }, }, }, @@ -43,11 +235,13 @@ func DataSourceIBMISVolumeProfiles() *schema.Resource { } } -func dataSourceIBMISVolumeProfilesRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceIBMISVolumeProfilesRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { err := volumeProfilesList(d, meta) if err != nil { - return err + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profiles", "read") + log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage()) + return tfErr.GetDiag() } return nil } @@ -83,12 +277,12 @@ func volumeProfilesList(d *schema.ResourceData, meta interface{}) error { // } profilesInfo := make([]map[string]interface{}, 0) for _, profile := range allrecs { - - l := map[string]interface{}{ - "name": *profile.Name, - "family": *profile.Family, + modelMap, err := DataSourceIBMIsVolumeProfilesVolumeProfileToMap(&profile) + if err != nil { + tfErr := flex.TerraformErrorf(err, err.Error(), "(Data) ibm_is_volume_profiles", "read") + return tfErr } - profilesInfo = append(profilesInfo, l) + profilesInfo = append(profilesInfo, modelMap) } d.SetId(dataSourceIBMISVolumeProfilesID(d)) d.Set(isVolumeProfiles, profilesInfo) @@ -99,3 +293,273 @@ func volumeProfilesList(d *schema.ResourceData, meta interface{}) error { func dataSourceIBMISVolumeProfilesID(d *schema.ResourceData) string { return time.Now().UTC().String() } + +func DataSourceIBMIsVolumeProfilesVolumeProfileToMap(model *vpcv1.VolumeProfile) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.BootCapacity != nil { + bootCapacityMap, err := DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityToMap(model.BootCapacity) + if err != nil { + return modelMap, err + } + modelMap["boot_capacity"] = []map[string]interface{}{bootCapacityMap} + } + if model.Capacity != nil { + capacityMap, err := DataSourceIBMIsVolumeProfilesVolumeProfileCapacityToMap(model.Capacity) + if err != nil { + return modelMap, err + } + modelMap["capacity"] = []map[string]interface{}{capacityMap} + } + modelMap["family"] = *model.Family + modelMap["href"] = *model.Href + if model.Iops != nil { + iopsMap, err := DataSourceIBMIsVolumeProfilesVolumeProfileIopsToMap(model.Iops) + if err != nil { + return modelMap, err + } + modelMap["iops"] = []map[string]interface{}{iopsMap} + } + modelMap["name"] = *model.Name + if model.AdjustableCapacityStates != nil { + adjustableCapacityStates, err := DataSourceIBMIsVolumeProfileVolumeProfileAdjustableCapacityStatesToMap(model.AdjustableCapacityStates) + if err != nil { + return modelMap, err + } + modelMap["adjustable_capacity_states"] = []map[string]interface{}{adjustableCapacityStates} + } + if model.AdjustableIopsStates != nil { + adjustableIopsStates, err := DataSourceIBMIsVolumeProfileVolumeProfileAdjustableIopsStatesToMap(model.AdjustableIopsStates) + if err != nil { + return modelMap, err + } + modelMap["adjustable_iops_states"] = []map[string]interface{}{adjustableIopsStates} + } + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityToMap(model vpcv1.VolumeProfileBootCapacityIntf) (map[string]interface{}, error) { + if _, ok := model.(*vpcv1.VolumeProfileBootCapacityFixed); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityFixedToMap(model.(*vpcv1.VolumeProfileBootCapacityFixed)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacityRange); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityRangeToMap(model.(*vpcv1.VolumeProfileBootCapacityRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacityEnum); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityEnumToMap(model.(*vpcv1.VolumeProfileBootCapacityEnum)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacityDependentRange); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityDependentRangeToMap(model.(*vpcv1.VolumeProfileBootCapacityDependentRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileBootCapacity); ok { + modelMap := make(map[string]interface{}) + model := model.(*vpcv1.VolumeProfileBootCapacity) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.Value != nil { + modelMap["value"] = flex.IntValue(model.Value) + } + if model.Default != nil { + modelMap["default"] = flex.IntValue(model.Default) + } + if model.Max != nil { + modelMap["max"] = flex.IntValue(model.Max) + } + if model.Min != nil { + modelMap["min"] = flex.IntValue(model.Min) + } + if model.Step != nil { + modelMap["step"] = flex.IntValue(model.Step) + } + if model.Values != nil { + modelMap["values"] = model.Values + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized vpcv1.VolumeProfileBootCapacityIntf subtype encountered") + } +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityFixedToMap(model *vpcv1.VolumeProfileBootCapacityFixed) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["value"] = flex.IntValue(model.Value) + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityRangeToMap(model *vpcv1.VolumeProfileBootCapacityRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityEnumToMap(model *vpcv1.VolumeProfileBootCapacityEnum) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileBootCapacityDependentRangeToMap(model *vpcv1.VolumeProfileBootCapacityDependentRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileCapacityToMap(model vpcv1.VolumeProfileCapacityIntf) (map[string]interface{}, error) { + if _, ok := model.(*vpcv1.VolumeProfileCapacityFixed); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileCapacityFixedToMap(model.(*vpcv1.VolumeProfileCapacityFixed)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacityRange); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileCapacityRangeToMap(model.(*vpcv1.VolumeProfileCapacityRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacityEnum); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileCapacityEnumToMap(model.(*vpcv1.VolumeProfileCapacityEnum)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacityDependentRange); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileCapacityDependentRangeToMap(model.(*vpcv1.VolumeProfileCapacityDependentRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileCapacity); ok { + modelMap := make(map[string]interface{}) + model := model.(*vpcv1.VolumeProfileCapacity) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.Value != nil { + modelMap["value"] = flex.IntValue(model.Value) + } + if model.Default != nil { + modelMap["default"] = flex.IntValue(model.Default) + } + if model.Max != nil { + modelMap["max"] = flex.IntValue(model.Max) + } + if model.Min != nil { + modelMap["min"] = flex.IntValue(model.Min) + } + if model.Step != nil { + modelMap["step"] = flex.IntValue(model.Step) + } + if model.Values != nil { + modelMap["values"] = model.Values + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized vpcv1.VolumeProfileCapacityIntf subtype encountered") + } +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileCapacityFixedToMap(model *vpcv1.VolumeProfileCapacityFixed) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["value"] = flex.IntValue(model.Value) + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileCapacityRangeToMap(model *vpcv1.VolumeProfileCapacityRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileCapacityEnumToMap(model *vpcv1.VolumeProfileCapacityEnum) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileCapacityDependentRangeToMap(model *vpcv1.VolumeProfileCapacityDependentRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileIopsToMap(model vpcv1.VolumeProfileIopsIntf) (map[string]interface{}, error) { + if _, ok := model.(*vpcv1.VolumeProfileIopsFixed); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileIopsFixedToMap(model.(*vpcv1.VolumeProfileIopsFixed)) + } else if _, ok := model.(*vpcv1.VolumeProfileIopsRange); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileIopsRangeToMap(model.(*vpcv1.VolumeProfileIopsRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileIopsEnum); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileIopsEnumToMap(model.(*vpcv1.VolumeProfileIopsEnum)) + } else if _, ok := model.(*vpcv1.VolumeProfileIopsDependentRange); ok { + return DataSourceIBMIsVolumeProfilesVolumeProfileIopsDependentRangeToMap(model.(*vpcv1.VolumeProfileIopsDependentRange)) + } else if _, ok := model.(*vpcv1.VolumeProfileIops); ok { + modelMap := make(map[string]interface{}) + model := model.(*vpcv1.VolumeProfileIops) + if model.Type != nil { + modelMap["type"] = *model.Type + } + if model.Value != nil { + modelMap["value"] = flex.IntValue(model.Value) + } + if model.Default != nil { + modelMap["default"] = flex.IntValue(model.Default) + } + if model.Max != nil { + modelMap["max"] = flex.IntValue(model.Max) + } + if model.Min != nil { + modelMap["min"] = flex.IntValue(model.Min) + } + if model.Step != nil { + modelMap["step"] = flex.IntValue(model.Step) + } + if model.Values != nil { + modelMap["values"] = model.Values + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized vpcv1.VolumeProfileIopsIntf subtype encountered") + } +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileIopsFixedToMap(model *vpcv1.VolumeProfileIopsFixed) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["type"] = *model.Type + modelMap["value"] = flex.IntValue(model.Value) + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileIopsRangeToMap(model *vpcv1.VolumeProfileIopsRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileIopsEnumToMap(model *vpcv1.VolumeProfileIopsEnum) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["default"] = flex.IntValue(model.Default) + modelMap["type"] = *model.Type + modelMap["values"] = model.Values + return modelMap, nil +} + +func DataSourceIBMIsVolumeProfilesVolumeProfileIopsDependentRangeToMap(model *vpcv1.VolumeProfileIopsDependentRange) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + modelMap["max"] = flex.IntValue(model.Max) + modelMap["min"] = flex.IntValue(model.Min) + modelMap["step"] = flex.IntValue(model.Step) + modelMap["type"] = *model.Type + return modelMap, nil +} + +func printfull(response interface{}) string { + output, err := json.MarshalIndent(response, "", " ") + if err == nil { + return fmt.Sprintf("%+v\n", string(output)) + } + return fmt.Sprintf("Error : %#v", response) +} diff --git a/ibm/service/vpc/data_source_ibm_is_volume_profiles_test.go b/ibm/service/vpc/data_source_ibm_is_volume_profiles_test.go index 75568c2633..5cee03d018 100644 --- a/ibm/service/vpc/data_source_ibm_is_volume_profiles_test.go +++ b/ibm/service/vpc/data_source_ibm_is_volume_profiles_test.go @@ -29,6 +29,29 @@ func TestAccIBMISVolumeProfilesDataSource_basic(t *testing.T) { }, }) } +func TestAccIBMISVolumeProfilesDataSource_Sdp(t *testing.T) { + resName := "data.ibm_is_volume_profiles.test1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVolumeProfilesDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resName, "profiles.0.name"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.family"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.adjustable_capacity_states.#"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.adjustable_iops_states.#"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.boot_capacity.#"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.capacity.#"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.href"), + resource.TestCheckResourceAttrSet(resName, "profiles.0.iops.#"), + ), + }, + }, + }) +} func testAccCheckIBMISVolumeProfilesDataSourceConfig() string { // status filter defaults to empty diff --git a/ibm/service/vpc/data_source_ibm_is_volume_test.go b/ibm/service/vpc/data_source_ibm_is_volume_test.go index b35015d9b0..4c2b577530 100644 --- a/ibm/service/vpc/data_source_ibm_is_volume_test.go +++ b/ibm/service/vpc/data_source_ibm_is_volume_test.go @@ -49,6 +49,47 @@ func TestAccIBMISVolumeDatasource_basic(t *testing.T) { }, }) } +func TestAccIBMISVolumeDatasource_Sdp(t *testing.T) { + name := fmt.Sprintf("tf-vol-%d", acctest.RandIntRange(10, 100)) + zone := "eu-gb-1" + resName := "data.ibm_is_volume.testacc_dsvol" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVolumeDataSourceSdpConfig(name, zone), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + resName, "name", name), + resource.TestCheckResourceAttr( + resName, "zone", zone), + resource.TestCheckResourceAttrSet( + resName, "active"), + resource.TestCheckResourceAttrSet( + resName, "attachment_state"), + resource.TestCheckResourceAttrSet( + resName, "bandwidth"), + resource.TestCheckResourceAttrSet( + resName, "busy"), + resource.TestCheckResourceAttrSet( + resName, "created_at"), + resource.TestCheckResourceAttrSet( + resName, "resource_group"), + resource.TestCheckResourceAttrSet( + resName, "profile"), + resource.TestCheckResourceAttrSet( + resName, "adjustable_capacity_states.#"), + resource.TestCheckResourceAttrSet( + resName, "adjustable_iops_states.#"), + resource.TestCheckResourceAttr( + resName, "profile", "sdp"), + ), + }, + }, + }) +} func TestAccIBMISVolumeDatasource_from_snapshot(t *testing.T) { resName := "data.ibm_is_volume.testacc_dsvol" @@ -152,6 +193,17 @@ func testAccCheckIBMISVolumeDataSourceConfig(name, zone string) string { name = ibm_is_volume.testacc_volume.name }`, name, zone) } +func testAccCheckIBMISVolumeDataSourceSdpConfig(name, zone string) string { + return fmt.Sprintf(` + resource "ibm_is_volume" "testacc_volume"{ + name = "%s" + profile = "sdp" + zone = "%s" + } + data "ibm_is_volume" "testacc_dsvol" { + name = ibm_is_volume.testacc_volume.name + }`, name, zone) +} func testAccCheckIBMISVolumeDataSourceWithCatalogOffering(vpcname, subnetname, sshname, publicKey, name, planCrn, versionCrn string) string { return fmt.Sprintf(` diff --git a/ibm/service/vpc/data_source_ibm_is_volumes.go b/ibm/service/vpc/data_source_ibm_is_volumes.go index 8a8e449da4..014814b4ea 100644 --- a/ibm/service/vpc/data_source_ibm_is_volumes.go +++ b/ibm/service/vpc/data_source_ibm_is_volumes.go @@ -392,6 +392,25 @@ func DataSourceIBMIsVolumes() *schema.Resource { }, }, }, + + // defined_performance changes + + "adjustable_capacity_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable capacity for this volume.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "adjustable_iops_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable IOPS for this volume.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, isVolumesStatus: &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -806,6 +825,8 @@ func dataSourceVolumeCollectionVolumesToMap(volumesItem vpcv1.Volume, meta inter } volumesMap[isVolumeHealthReasons] = healthReasonsList } + volumesMap["adjustable_capacity_states"] = volumesItem.AdjustableCapacityStates + volumesMap["adjustable_iops_states"] = volumesItem.AdjustableIopsStates if volumesItem.CatalogOffering != nil { versionCrn := "" if volumesItem.CatalogOffering.Version != nil && volumesItem.CatalogOffering.Version.CRN != nil { diff --git a/ibm/service/vpc/data_source_ibm_is_volumes_test.go b/ibm/service/vpc/data_source_ibm_is_volumes_test.go index 5e9a147e39..82b1339725 100644 --- a/ibm/service/vpc/data_source_ibm_is_volumes_test.go +++ b/ibm/service/vpc/data_source_ibm_is_volumes_test.go @@ -42,6 +42,24 @@ func TestAccIBMIsVolumesDataSourceBasic(t *testing.T) { }, }) } +func TestAccIBMIsVolumesDataSourceSdpBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsVolumesDataSourceSdpConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_volumes.is_volumes", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_volumes.is_volumes", "volumes.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_volumes.is_volumes", "volumes.0.adjustable_iops_states.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_volumes.is_volumes", "volumes.0.adjustable_capacity_states.#"), + resource.TestCheckResourceAttr("data.ibm_is_volumes.is_volumes", "volumes.0.profile.0.name", "sdp"), + ), + }, + }, + }) +} func TestAccIBMIsVolumesFromSnapshotDataSourceBasic(t *testing.T) { resName := "data.ibm_is_volumes.is_volumes" vpcname := fmt.Sprintf("tf-vpc-%d", acctest.RandIntRange(10, 100)) @@ -146,6 +164,12 @@ func testAccCheckIBMIsVolumesDataSourceConfigBasic() string { } `) } +func testAccCheckIBMIsVolumesDataSourceSdpConfig() string { + return fmt.Sprintf(` + data "ibm_is_volumes" "is_volumes" { + } + `) +} func testAccCheckIBMIsVolumesDataSourceConfigFilterByZone() string { return fmt.Sprintf(` diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_default_routing_table.go b/ibm/service/vpc/data_source_ibm_is_vpc_default_routing_table.go index 2aef27b943..c6989d9125 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_default_routing_table.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_default_routing_table.go @@ -12,6 +12,7 @@ import ( const ( isDefaultRoutingTableID = "default_routing_table" isDefaultRoutingTableHref = "href" + isDefaultRoutingTableCrn = "crn" isDefaultRoutingTableName = "name" isDefaultRoutingTableResourceType = "resource_type" isDefaultRoutingTableCreatedAt = "created_at" @@ -24,6 +25,10 @@ const ( isDefaultRTTransitGatewayIngress = "route_transit_gateway_ingress" isDefaultRTVPCZoneIngress = "route_vpc_zone_ingress" isDefaultRTDefault = "is_default" + isDefaultRTResourceGroup = "resource_group" + isDefaultRTResourceGroupHref = "href" + isDefaultRTResourceGroupId = "id" + isDefaultRTResourceGroupName = "name" ) func DataSourceIBMISVPCDefaultRoutingTable() *schema.Resource { @@ -50,6 +55,11 @@ func DataSourceIBMISVPCDefaultRoutingTable() *schema.Resource { Computed: true, Description: "Default Routing table Name", }, + isDefaultRoutingTableCrn: { + Type: schema.TypeString, + Computed: true, + Description: "Default Routing table Crn", + }, isDefaultRoutingTableResourceType: { Type: schema.TypeString, Computed: true, @@ -128,6 +138,30 @@ func DataSourceIBMISVPCDefaultRoutingTable() *schema.Resource { }, }, }, + isDefaultRTResourceGroup: { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this volume.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + isDefaultRTResourceGroupHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + isDefaultRTResourceGroupId: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + isDefaultRTResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, }, } } @@ -150,6 +184,7 @@ func dataSourceIBMISVPCDefaultRoutingTableGet(d *schema.ResourceData, meta inter d.Set(isDefaultRoutingTableID, *result.ID) d.Set(isDefaultRoutingTableHref, *result.Href) d.Set(isDefaultRoutingTableName, *result.Name) + d.Set(isDefaultRoutingTableCrn, *result.CRN) d.Set(isDefaultRoutingTableResourceType, *result.ResourceType) createdAt := *result.CreatedAt d.Set(isDefaultRoutingTableCreatedAt, createdAt.String()) @@ -181,6 +216,12 @@ func dataSourceIBMISVPCDefaultRoutingTableGet(d *schema.ResourceData, meta inter } } d.Set(isDefaultRoutingTableRoutesList, routesInfo) + resourceGroupList := []map[string]interface{}{} + if result.ResourceGroup != nil { + resourceGroupMap := routingTableResourceGroupToMap(*result.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + } + d.Set(isDefaultRTResourceGroup, resourceGroupList) d.Set(isDefaultRTVpcID, vpcID) d.SetId(*result.ID) return nil diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go index 30bd93f954..d62e557870 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go @@ -59,7 +59,11 @@ func DataSourceIBMIBMIsVPCRoutingTable() *schema.Resource { Optional: true, Description: "The routing table identifier.", }, - + rtCrn: &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The routing table CRN.", + }, "advertise_routes_to": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -194,6 +198,30 @@ func DataSourceIBMIBMIsVPCRoutingTable() *schema.Resource { }, }, }, + rtResourceGroup: { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this volume.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + rtResourceGroupHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + rtResourceGroupId: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + rtResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, }, } } @@ -311,6 +339,11 @@ func dataSourceIBMIBMIsVPCRoutingTableRead(context context.Context, d *schema.Re if err = d.Set("advertise_routes_to", routingTable.AdvertiseRoutesTo); err != nil { return diag.FromErr(fmt.Errorf("[ERROR] Error setting value of advertise_routes_to: %s", err)) } + + if err = d.Set(rtCrn, routingTable.CRN); err != nil { + return diag.FromErr(fmt.Errorf("[ERROR] Error setting value of crn: %s", err)) + } + routes := []map[string]interface{}{} if routingTable.Routes != nil { for _, modelItem := range routingTable.Routes { @@ -339,6 +372,15 @@ func dataSourceIBMIBMIsVPCRoutingTableRead(context context.Context, d *schema.Re return diag.FromErr(fmt.Errorf("[ERROR] Error setting subnets %s", err)) } + resourceGroupList := []map[string]interface{}{} + if routingTable.ResourceGroup != nil { + resourceGroupMap := routingTableResourceGroupToMap(*routingTable.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + } + if err = d.Set(rtResourceGroup, resourceGroupList); err != nil { + return diag.FromErr(fmt.Errorf("[ERROR] Error setting resource group %s", err)) + } + return nil } diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go b/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go index ffec0e8c74..fd034a80ab 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go @@ -17,6 +17,7 @@ import ( const ( isRoutingTableAcceptRoutesFrom = "accept_routes_from" isRoutingTableID = "routing_table" + isRoutingTableCrn = "routing_table_crn" isRoutingTableHref = "href" isRoutingTableName = "name" isRoutingTableResourceType = "resource_type" @@ -72,6 +73,11 @@ func DataSourceIBMISVPCRoutingTables() *schema.Resource { Computed: true, Description: "Routing Table ID", }, + isRoutingTableCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The crn of routing table", + }, "advertise_routes_to": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -168,6 +174,30 @@ func DataSourceIBMISVPCRoutingTables() *schema.Resource { }, }, }, + rtResourceGroup: { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this volume.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + rtResourceGroupHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + rtResourceGroupId: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + rtResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, }, }, }, @@ -222,6 +252,9 @@ func dataSourceIBMISVPCRoutingTablesList(d *schema.ResourceData, meta interface{ if routingTable.ID != nil { rtable[isRoutingTableID] = *routingTable.ID } + if routingTable.CRN != nil { + rtable[isRoutingTableCrn] = *routingTable.CRN + } if routingTable.Href != nil { rtable[isRoutingTableHref] = *routingTable.Href } @@ -278,6 +311,14 @@ func dataSourceIBMISVPCRoutingTablesList(d *schema.ResourceData, meta interface{ } } rtable[isRoutingTableRoutesList] = routesInfo + + resourceGroupList := []map[string]interface{}{} + if routingTable.ResourceGroup != nil { + resourceGroupMap := routingTableResourceGroupToMap(*routingTable.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + } + rtable[rtResourceGroup] = resourceGroupList + vpcRoutingTables = append(vpcRoutingTables, rtable) } diff --git a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection.go b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection.go index d45844768a..1bdfcb1274 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection.go +++ b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection.go @@ -86,6 +86,11 @@ func DataSourceIBMISVPNGatewayConnection() *schema.Resource { }, }, }, + "distribute_traffic": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether the traffic is distributed between the `up` tunnels of the VPN gateway connection when the VPC route's next hop is a VPN connection. If `false`, the traffic is only routed through the `up` tunnel with the lower `public_ip` address.", + }, "href": { Type: schema.TypeString, Computed: true, @@ -574,6 +579,10 @@ func setvpnGatewayConnectionIntfDatasourceData(d *schema.ResourceData, vpn_gatew if err = d.Set("resource_type", vpnGatewayConnection.ResourceType); err != nil { return fmt.Errorf("[ERROR] Error setting resource_type: %s", err) } + if err = d.Set("distribute_traffic", vpnGatewayConnection.DistributeTraffic); err != nil { + return fmt.Errorf("[ERROR] Error setting distribute_traffic: %s", err) + } + if err = d.Set("status", vpnGatewayConnection.Status); err != nil { return fmt.Errorf("[ERROR] Error setting status: %s", err) } @@ -678,6 +687,9 @@ func setvpnGatewayConnectionIntfDatasourceData(d *schema.ResourceData, vpn_gatew if err = d.Set("status", vpnGatewayConnection.Status); err != nil { return fmt.Errorf("[ERROR] Error setting status: %s", err) } + if err = d.Set("distribute_traffic", vpnGatewayConnection.DistributeTraffic); err != nil { + return fmt.Errorf("[ERROR] Error setting distribute_traffic: %s", err) + } if err := d.Set("status_reasons", resourceVPNGatewayConnectionFlattenLifecycleReasons(vpnGatewayConnection.StatusReasons)); err != nil { return fmt.Errorf("[ERROR] Error setting status_reasons: %s", err) } @@ -779,6 +791,9 @@ func setvpnGatewayConnectionIntfDatasourceData(d *schema.ResourceData, vpn_gatew if err = d.Set("status", vpnGatewayConnection.Status); err != nil { return fmt.Errorf("[ERROR] Error setting status: %s", err) } + if err = d.Set("distribute_traffic", vpnGatewayConnection.DistributeTraffic); err != nil { + return fmt.Errorf("[ERROR] Error setting distribute_traffic: %s", err) + } if err := d.Set("status_reasons", resourceVPNGatewayConnectionFlattenLifecycleReasons(vpnGatewayConnection.StatusReasons)); err != nil { return fmt.Errorf("[ERROR] Error setting status_reasons: %s", err) } diff --git a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection_test.go b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection_test.go index 60e4027d7c..6f2b7c9ee6 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection_test.go +++ b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connection_test.go @@ -97,6 +97,94 @@ func TestAccIBMIsVPNGatewayConnectionDataSourceBasic(t *testing.T) { }, }) } +func TestAccIBMIsVPNGatewayConnectionDataSourceDistrbuteTraffic(t *testing.T) { + vpcname := fmt.Sprintf("tfvpnuat-vpc-%d", acctest.RandIntRange(100, 200)) + subnetname := fmt.Sprintf("tfvpnuat-subnet-%d", acctest.RandIntRange(100, 200)) + vpngwname := fmt.Sprintf("tfvpnuat-vpngw-%d", acctest.RandIntRange(100, 200)) + name := fmt.Sprintf("tfvpnuat-createname-%d", acctest.RandIntRange(100, 200)) + dt := true + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMIsVPNGatewayConnectionDataSourceDistributeTrafficConfig(vpcname, subnetname, vpngwname, name, dt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "admin_state_up"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "authentication_mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.action"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.interval"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.timeout"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "name"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "peer_address"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "psk"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "status"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "distribute_traffic"), + ), + }, + { + Config: testAccCheckIBMIsVPNGatewayConnectionDataSourceDistributeTrafficConfig(vpcname, subnetname, vpngwname, name, dt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "admin_state_up"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "authentication_mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.action"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.interval"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.timeout"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "name"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "peer_address"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "psk"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "status"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example1", "distribute_traffic"), + ), + }, + { + Config: testAccCheckIBMIsVPNGatewayConnectionDataSourceDistributeTrafficConfig(vpcname, subnetname, vpngwname, name, dt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "admin_state_up"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "authentication_mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.action"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.interval"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.timeout"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "name"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "peer_address"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "psk"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "status"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example2", "distribute_traffic")), + }, + { + Config: testAccCheckIBMIsVPNGatewayConnectionDataSourceDistributeTrafficConfig(vpcname, subnetname, vpngwname, name, dt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "admin_state_up"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "authentication_mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.action"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.interval"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example", "dead_peer_detection.0.timeout"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "mode"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "name"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "peer_address"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "psk"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "resource_type"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "status"), + resource.TestCheckResourceAttrSet("data.ibm_is_vpn_gateway_connection.example3", "distribute_traffic"), + ), + }, + }, + }) +} func testAccCheckIBMIsVPNGatewayConnectionDataSourceConfigBasic(vpc, subnet, vpngwname, name string) string { return fmt.Sprintf(` @@ -143,3 +231,50 @@ func testAccCheckIBMIsVPNGatewayConnectionDataSourceConfigBasic(vpc, subnet, vpn } `, vpc, subnet, acc.ISZoneName, acc.ISCIDR, vpngwname, name) } + +func testAccCheckIBMIsVPNGatewayConnectionDataSourceDistributeTrafficConfig(vpc, subnet, vpngwname, name string, distributeTraffic bool) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "example" { + name = "%s" + + } + resource "ibm_is_subnet" "example" { + name = "%s" + vpc = ibm_is_vpc.example.id + zone = "%s" + ipv4_cidr_block = "%s" + + } + resource "ibm_is_vpn_gateway" "example" { + name = "%s" + subnet = ibm_is_subnet.example.id + mode = "policy" + + } + resource "ibm_is_vpn_gateway_connection" "example" { + name = "%s" + vpn_gateway = ibm_is_vpn_gateway.example.id + peer_address = "1.2.3.4" + peer_cidrs = [ibm_is_subnet.example.ipv4_cidr_block] + local_cidrs = [ibm_is_subnet.example.ipv4_cidr_block] + preshared_key = "VPNDemoPassword" + distribute_traffic = %t + } + data "ibm_is_vpn_gateway_connection" "example" { + vpn_gateway = ibm_is_vpn_gateway.example.id + vpn_gateway_connection = ibm_is_vpn_gateway_connection.example.gateway_connection + } + data "ibm_is_vpn_gateway_connection" "example1" { + vpn_gateway = ibm_is_vpn_gateway.example.id + vpn_gateway_connection_name = ibm_is_vpn_gateway_connection.example.name + } + data "ibm_is_vpn_gateway_connection" "example2" { + vpn_gateway_name = ibm_is_vpn_gateway.example.name + vpn_gateway_connection = ibm_is_vpn_gateway_connection.example.gateway_connection + } + data "ibm_is_vpn_gateway_connection" "example3" { + vpn_gateway_name = ibm_is_vpn_gateway.example.name + vpn_gateway_connection_name = ibm_is_vpn_gateway_connection.example.name + } + `, vpc, subnet, acc.ISZoneName, acc.ISCIDR, vpngwname, name, distributeTraffic) +} diff --git a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections.go b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections.go index ca77dc9826..cef607163f 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections.go +++ b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections.go @@ -67,6 +67,11 @@ func DataSourceIBMISVPNGatewayConnections() *schema.Resource { Computed: true, Description: "Interval for dead peer detection interval", }, + "distribute_traffic": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether the traffic is distributed between the `up` tunnels of the VPN gateway connection when the VPC route's next hop is a VPN connection. If `false`, the traffic is only routed through the `up` tunnel with the lower `public_ip` address.", + }, isVPNGatewayConnectionDeadPeerDetectionTimeout: { Type: schema.TypeInt, Computed: true, @@ -345,6 +350,7 @@ func getvpnGatewayConnectionIntfData(vpnGatewayConnectionIntf vpcv1.VPNGatewayCo } gatewayconnection["mode"] = vpnGatewayConnection.Mode gatewayconnection["name"] = vpnGatewayConnection.Name + gatewayconnection["distribute_traffic"] = vpnGatewayConnection.DistributeTraffic // breaking changes gatewayconnection["establish_mode"] = vpnGatewayConnection.EstablishMode @@ -399,6 +405,7 @@ func getvpnGatewayConnectionIntfData(vpnGatewayConnectionIntf vpcv1.VPNGatewayCo if vpnGatewayConnection.IkePolicy != nil { gatewayconnection["ike_policy"] = vpnGatewayConnection.IkePolicy.ID } + gatewayconnection["distribute_traffic"] = vpnGatewayConnection.DistributeTraffic if vpnGatewayConnection.IpsecPolicy != nil { gatewayconnection["ipsec_policy"] = vpnGatewayConnection.IpsecPolicy.ID @@ -455,6 +462,7 @@ func getvpnGatewayConnectionIntfData(vpnGatewayConnectionIntf vpcv1.VPNGatewayCo gatewayconnection[isVPNGatewayConnectionDeadPeerDetectionInterval] = vpnGatewayConnection.DeadPeerDetection.Interval gatewayconnection[isVPNGatewayConnectionDeadPeerDetectionTimeout] = vpnGatewayConnection.DeadPeerDetection.Timeout } + gatewayconnection["distribute_traffic"] = vpnGatewayConnection.DistributeTraffic gatewayconnection["href"] = vpnGatewayConnection.Href if vpnGatewayConnection.IkePolicy != nil { gatewayconnection["ike_policy"] = vpnGatewayConnection.IkePolicy.ID diff --git a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections_test.go b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections_test.go index fa504aaa12..7d44133e77 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections_test.go +++ b/ibm/service/vpc/data_source_ibm_is_vpn_gateway_connections_test.go @@ -35,6 +35,29 @@ func TestAccIBMISVpnGatewayConnectionsDataSource_basic(t *testing.T) { }, }) } +func TestAccIBMISVpnGatewayConnectionsDataSource_distributeTraffic(t *testing.T) { + var vpnGatewayConnection string + node := "data.ibm_is_vpn_gateway_connections.test1" + vpcname := fmt.Sprintf("tfvpnuat-vpc-%d", acctest.RandIntRange(100, 200)) + subnetname := fmt.Sprintf("tfvpnuat-subnet-%d", acctest.RandIntRange(100, 200)) + vpngwname := fmt.Sprintf("tfvpnuat-vpngw-%d", acctest.RandIntRange(100, 200)) + name := fmt.Sprintf("tfvpnuat-createname-%d", acctest.RandIntRange(100, 200)) + dt := true + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVpnGatewayconnectionsDataSourceDistributeTrafficConfig(vpcname, subnetname, vpngwname, name, dt), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPNGatewayConnectionExists("ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection", vpnGatewayConnection), + resource.TestCheckResourceAttrSet(node, "connections.#"), + ), + }, + }, + }) +} func testAccCheckIBMISVpnGatewayconnectionsDataSourceConfig(vpc, subnet, vpngwname, name string) string { // status filter defaults to empty @@ -71,3 +94,39 @@ func testAccCheckIBMISVpnGatewayconnectionsDataSourceConfig(vpc, subnet, vpngwna }`, vpc, subnet, acc.ISZoneName, acc.ISCIDR, vpngwname, name) } +func testAccCheckIBMISVpnGatewayconnectionsDataSourceDistributeTrafficConfig(vpc, subnet, vpngwname, name string, distributeTraffic bool) string { + // status filter defaults to empty + return fmt.Sprintf(` + + data "ibm_resource_group" "rg" { + is_default = true + } + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + resource_group = data.ibm_resource_group.rg.id + } + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = "${ibm_is_vpc.testacc_vpc.id}" + zone = "%s" + ipv4_cidr_block = "%s" + resource_group = data.ibm_resource_group.rg.id + } + resource "ibm_is_vpn_gateway" "testacc_vpnGateway" { + name = "%s" + subnet = "${ibm_is_subnet.testacc_subnet.id}" + resource_group = data.ibm_resource_group.rg.id + + } + resource "ibm_is_vpn_gateway_connection" "testacc_VPNGatewayConnection" { + name = "%s" + vpn_gateway = "${ibm_is_vpn_gateway.testacc_vpnGateway.id}" + peer_address = "1.2.3.4" + preshared_key = "VPNDemoPassword" + distribute_traffic = %t + } + data "ibm_is_vpn_gateway_connections" "test1" { + vpn_gateway = ibm_is_vpn_gateway.testacc_vpnGateway.id + }`, vpc, subnet, acc.ISZoneName, acc.ISCIDR, vpngwname, name, distributeTraffic) + +} diff --git a/ibm/service/vpc/resource_ibm_is_instance.go b/ibm/service/vpc/resource_ibm_is_instance.go index fe23b877e1..cb2e000aa3 100644 --- a/ibm/service/vpc/resource_ibm_is_instance.go +++ b/ibm/service/vpc/resource_ibm_is_instance.go @@ -1202,17 +1202,19 @@ func ResourceIBMISInstance() *schema.Resource { Computed: true, }, isInstanceBootSize: { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validate.InvokeValidator("ibm_is_instance", isInstanceBootSize), + Type: schema.TypeInt, + Optional: true, + Computed: true, + // ValidateFunc: validate.InvokeValidator("ibm_is_instance", isInstanceBootSize), }, isInstanceBootIOPS: { Type: schema.TypeInt, Computed: true, + Optional: true, }, isInstanceBootProfile: { Type: schema.TypeString, + Optional: true, Computed: true, }, isInstanceBootVolumeTags: { @@ -1765,7 +1767,7 @@ func ResourceIBMISInstanceValidator() *validate.ResourceValidator { return &ibmISInstanceValidator } -func instanceCreateByImage(d *schema.ResourceData, meta interface{}, profile, name, vpcID, zone, image string) error { +func instanceCreateByImage(d *schema.ResourceData, meta interface{}, profile, name, vpcID, zone, image, bootProfile string) error { sess, err := vpcClient(meta) if err != nil { return err @@ -1854,6 +1856,12 @@ func instanceCreateByImage(d *schema.ResourceData, meta interface{}, profile, na sizeInt64 := int64(size) volTemplate.Capacity = &sizeInt64 } + iopsOk, ok := bootvol[isInstanceBootIOPS] + iops := iopsOk.(int) + if iops != 0 && ok { + iopsInt64 := int64(iops) + volTemplate.Iops = &iopsInt64 + } enc, ok := bootvol[isInstanceBootEncryption] encstr := enc.(string) if ok && encstr != "" { @@ -1861,10 +1869,11 @@ func instanceCreateByImage(d *schema.ResourceData, meta interface{}, profile, na CRN: &encstr, } } - - volprof := "general-purpose" + if bootProfile == "" { + bootProfile = "general-purpose" + } volTemplate.Profile = &vpcv1.VolumeProfileIdentity{ - Name: &volprof, + Name: &bootProfile, } var userTags *schema.Set if v, ok := bootvol[isInstanceBootVolumeTags]; ok { @@ -2315,6 +2324,12 @@ func instanceCreateByCatalogOffering(d *schema.ResourceData, meta interface{}, p sizeInt64 := int64(size) volTemplate.Capacity = &sizeInt64 } + iopsOk, ok := bootvol[isInstanceBootIOPS] + iops := iopsOk.(int) + if iops != 0 && ok { + iopsInt64 := int64(iops) + volTemplate.Iops = &iopsInt64 + } enc, ok := bootvol[isInstanceBootEncryption] encstr := enc.(string) if ok && encstr != "" { @@ -2734,6 +2749,12 @@ func instanceCreateByTemplate(d *schema.ResourceData, meta interface{}, profile, sizeInt64 := int64(size) volTemplate.Capacity = &sizeInt64 } + iopsOk, ok := bootvol[isInstanceBootIOPS] + iops := iopsOk.(int) + if iops != 0 && ok { + iopsInt64 := int64(iops) + volTemplate.Iops = &iopsInt64 + } enc, ok := bootvol[isInstanceBootEncryption] encstr := enc.(string) if ok && encstr != "" { @@ -3152,6 +3173,12 @@ func instanceCreateBySnapshot(d *schema.ResourceData, meta interface{}, profile, sizeInt64 := int64(size) volTemplate.Capacity = &sizeInt64 } + iopsOk, ok := bootvol[isInstanceBootIOPS] + iops := iopsOk.(int) + if iops != 0 && ok { + iopsInt64 := int64(iops) + volTemplate.Iops = &iopsInt64 + } enc, ok := bootvol[isInstanceBootEncryption] encstr := enc.(string) if ok && encstr != "" { @@ -3923,6 +3950,7 @@ func resourceIBMisInstanceCreate(d *schema.ResourceData, meta interface{}) error zone := d.Get(isInstanceZone).(string) image := d.Get(isInstanceImage).(string) snapshot := d.Get("boot_volume.0.snapshot").(string) + bootProfile := d.Get("boot_volume.0.profile").(string) snapshotcrn := d.Get("boot_volume.0.snapshot_crn").(string) volume := d.Get("boot_volume.0.volume_id").(string) template := d.Get(isInstanceSourceTemplate).(string) @@ -3952,7 +3980,7 @@ func resourceIBMisInstanceCreate(d *schema.ResourceData, meta interface{}) error return err } } else { - err := instanceCreateByImage(d, meta, profile, name, vpcID, zone, image) + err := instanceCreateByImage(d, meta, profile, name, vpcID, zone, image, bootProfile) if err != nil { return err } @@ -5116,6 +5144,7 @@ func instanceUpdate(d *schema.ResourceData, meta interface{}) error { } bootVolSize := "boot_volume.0.size" + bootIopsSize := "boot_volume.0.iops" if d.HasChange(bootVolSize) && !d.IsNewResource() { old, new := d.GetChange(bootVolSize) @@ -5149,6 +5178,36 @@ func instanceUpdate(d *schema.ResourceData, meta interface{}) error { return err } } + if d.HasChange(bootIopsSize) && !d.IsNewResource() { + _, new := d.GetChange(bootIopsSize) + + bootVolIops := int64(new.(int)) + volId := d.Get("boot_volume.0.volume_id").(string) + updateVolumeOptions := &vpcv1.UpdateVolumeOptions{ + ID: &volId, + } + volPatchModel := &vpcv1.VolumePatch{ + Iops: &bootVolIops, + } + volPatchModelAsPatch, err := volPatchModel.AsPatch() + + if err != nil { + return (fmt.Errorf("[ERROR] Error encountered while apply as patch for boot iops of instance %s", err)) + } + + updateVolumeOptions.VolumePatch = volPatchModelAsPatch + + vol, res, err := instanceC.UpdateVolume(updateVolumeOptions) + + if vol == nil || err != nil { + return (fmt.Errorf("[ERROR] Error encountered while expanding boot iops of instance %s/n%s", err, res)) + } + + _, err = isWaitForVolumeAvailable(instanceC, volId, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } bootVolTags := "boot_volume.0.tags" if d.HasChange(bootVolTags) && !d.IsNewResource() { var userTags *schema.Set diff --git a/ibm/service/vpc/resource_ibm_is_instance_test.go b/ibm/service/vpc/resource_ibm_is_instance_test.go index 0a9af1624d..f11dcbc6d4 100644 --- a/ibm/service/vpc/resource_ibm_is_instance_test.go +++ b/ibm/service/vpc/resource_ibm_is_instance_test.go @@ -75,6 +75,99 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVE }, }) } +func TestAccIBMISInstance_sdpbasic(t *testing.T) { + var instance string + vpcname := fmt.Sprintf("tf-vpc-%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-instnace-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tf-subnet-%d", acctest.RandIntRange(10, 100)) + publicKey := strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) + sshname := fmt.Sprintf("tf-ssh-%d", acctest.RandIntRange(10, 100)) + userData1 := "a" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISInstanceSdpConfig(vpcname, subnetname, sshname, publicKey, name, userData1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceExists("ibm_is_instance.testacc_instance", instance), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "user_data", userData1), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.size", "250"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.iops", "10000"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.profile", "sdp"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "zone", acc.ISZoneName), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "vcpu.#"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "vcpu.0.manufacturer"), + ), + }, + { + Config: testAccCheckIBMISInstanceSdpCapacityUpdateConfig(vpcname, subnetname, sshname, publicKey, name, userData1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceExists("ibm_is_instance.testacc_instance", instance), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "user_data", userData1), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "zone", acc.ISZoneName), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.size", "25000"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.iops", "10000"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.profile", "sdp"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "primary_network_interface.0.port_speed"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "vcpu.#"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "vcpu.0.manufacturer"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "numa_count"), + ), + }, + { + Config: testAccCheckIBMISInstanceSdpIopsUpdateConfig(vpcname, subnetname, sshname, publicKey, name, userData1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceExists("ibm_is_instance.testacc_instance", instance), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "user_data", userData1), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "zone", acc.ISZoneName), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.size", "25000"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.iops", "28000"), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.profile", "sdp"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "primary_network_interface.0.port_speed"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "vcpu.#"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "vcpu.0.manufacturer"), + resource.TestCheckResourceAttrSet( + "ibm_is_instance.testacc_instance", "numa_count"), + ), + }, + }, + }) +} func TestAccIBMISInstanceWithoutKeys_basic(t *testing.T) { var instance string @@ -1457,6 +1550,114 @@ func testAccCheckIBMISInstanceConfig(vpcname, subnetname, sshname, publicKey, na } }`, vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, userData, acc.ISZoneName) } +func testAccCheckIBMISInstanceSdpConfig(vpcname, subnetname, sshname, publicKey, name, userData string) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + ipv4_cidr_block = "%s" + } + + resource "ibm_is_ssh_key" "testacc_sshkey" { + name = "%s" + public_key = "%s" + } + + resource "ibm_is_instance" "testacc_instance" { + name = "%s" + image = "%s" + profile = "%s" + boot_volume { + size = 250 + profile = "sdp" + iops = 10000 + } + primary_network_interface { + subnet = ibm_is_subnet.testacc_subnet.id + } + user_data = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + keys = [ibm_is_ssh_key.testacc_sshkey.id] + }`, vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, userData, acc.ISZoneName) +} +func testAccCheckIBMISInstanceSdpIopsUpdateConfig(vpcname, subnetname, sshname, publicKey, name, userData string) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + ipv4_cidr_block = "%s" + } + + resource "ibm_is_ssh_key" "testacc_sshkey" { + name = "%s" + public_key = "%s" + } + + resource "ibm_is_instance" "testacc_instance" { + name = "%s" + image = "%s" + profile = "%s" + boot_volume { + size = 25000 + profile = "sdp" + iops = 28000 + } + primary_network_interface { + subnet = ibm_is_subnet.testacc_subnet.id + } + user_data = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + keys = [ibm_is_ssh_key.testacc_sshkey.id] + }`, vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, userData, acc.ISZoneName) +} +func testAccCheckIBMISInstanceSdpCapacityUpdateConfig(vpcname, subnetname, sshname, publicKey, name, userData string) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + ipv4_cidr_block = "%s" + } + + resource "ibm_is_ssh_key" "testacc_sshkey" { + name = "%s" + public_key = "%s" + } + + resource "ibm_is_instance" "testacc_instance" { + name = "%s" + image = "%s" + profile = "%s" + boot_volume { + size = 25000 + profile = "sdp" + iops = 10000 + } + primary_network_interface { + subnet = ibm_is_subnet.testacc_subnet.id + } + user_data = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + keys = [ibm_is_ssh_key.testacc_sshkey.id] + }`, vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, userData, acc.ISZoneName) +} func testAccCheckIBMISInstanceWithoutKeysConfig(vpcname, subnetname, name, userData string) string { return fmt.Sprintf(` diff --git a/ibm/service/vpc/resource_ibm_is_instance_volume_attachment.go b/ibm/service/vpc/resource_ibm_is_instance_volume_attachment.go index 1e1075549b..8557117de3 100644 --- a/ibm/service/vpc/resource_ibm_is_instance_volume_attachment.go +++ b/ibm/service/vpc/resource_ibm_is_instance_volume_attachment.go @@ -245,7 +245,7 @@ func ResourceIBMISInstanceVolumeAttachmentValidator() *validate.ResourceValidato ValidateFunctionIdentifier: validate.IntBetween, Type: validate.TypeInt, MinValue: "10", - MaxValue: "16000"}) + MaxValue: "64000"}) validateSchema = append(validateSchema, validate.ValidateSchema{ @@ -263,7 +263,7 @@ func ResourceIBMISInstanceVolumeAttachmentValidator() *validate.ResourceValidato ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, Type: validate.TypeString, Optional: true, - AllowedValues: "general-purpose, 5iops-tier, 10iops-tier, custom", + AllowedValues: "general-purpose, 5iops-tier, 10iops-tier, custom, sdp", }) validateSchema = append(validateSchema, validate.ValidateSchema{ @@ -367,7 +367,10 @@ func instanceVolAttachmentCreate(d *schema.ResourceData, meta interface{}, insta if iops != 0 { volProtoVol.Iops = &iops } - volProfileStr := "custom" + volProfileStr := d.Get(isInstanceVolProfile).(string) + if volProfileStr == "" { + volProfileStr = "custom" + } volProtoVol.Profile = &vpcv1.VolumeProfileIdentity{ Name: &volProfileStr, } @@ -595,8 +598,39 @@ func instanceVolAttUpdate(d *schema.ResourceData, meta interface{}) error { if volIdOk, ok := d.GetOk(isInstanceVolAttVol); ok { volId = volIdOk.(string) } - - if volId != "" && (d.HasChange(isInstanceVolIops) || d.HasChange(isInstanceVolProfile)) { // || d.HasChange(isInstanceVolAttTags) + volProfile := "" + if volProfileOk, ok := d.GetOk(isInstanceVolProfile); ok { + volProfile = volProfileOk.(string) + } + if volId != "" && d.HasChange(isInstanceVolIops) && !d.HasChange(isInstanceVolProfile) && volProfile == "sdp" { // || d.HasChange(isInstanceVolAttTags) + updateVolumeProfileOptions := &vpcv1.UpdateVolumeOptions{ + ID: &volId, + } + volumeProfilePatchModel := &vpcv1.VolumePatch{} + if d.HasChange(isVolumeIops) { + iops := int64(d.Get(isVolumeIops).(int)) + volumeProfilePatchModel.Iops = &iops + } + volumeProfilePatch, err := volumeProfilePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("[ERROR] Error calling asPatch for volumeProfilePatch: %s", err) + } + optionsget := &vpcv1.GetVolumeOptions{ + ID: &volId, + } + _, response, err := instanceC.GetVolume(optionsget) + if err != nil { + return fmt.Errorf("[ERROR] Error getting Boot Volume (%s): %s\n%s", id, err, response) + } + eTag := response.Headers.Get("ETag") + updateVolumeProfileOptions.IfMatch = &eTag + updateVolumeProfileOptions.VolumePatch = volumeProfilePatch + _, response, err = instanceC.UpdateVolume(updateVolumeProfileOptions) + if err != nil { + return fmt.Errorf("[ERROR] Error updating volume profile/iops/userTags: %s\n%s", err, response) + } + isWaitForVolumeAvailable(instanceC, volId, d.Timeout(schema.TimeoutCreate)) + } else if volId != "" && (d.HasChange(isInstanceVolIops) || d.HasChange(isInstanceVolProfile)) { // || d.HasChange(isInstanceVolAttTags) insId := d.Get(isInstanceId).(string) getinsOptions := &vpcv1.GetInstanceOptions{ ID: &insId, @@ -691,30 +725,31 @@ func instanceVolAttUpdate(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("[ERROR] Error Getting Volume (%s): %s\n%s", id, err, response) } + if *vol.Profile.Name != "sdp" { + if vol.VolumeAttachments == nil || len(vol.VolumeAttachments) == 0 || *vol.VolumeAttachments[0].Name == "" { + return fmt.Errorf("[ERROR] Error volume capacity can't be updated since volume %s is not attached to any instance for VolumePatch", id) + } - if vol.VolumeAttachments == nil || len(vol.VolumeAttachments) == 0 || *vol.VolumeAttachments[0].Name == "" { - return fmt.Errorf("[ERROR] Error volume capacity can't be updated since volume %s is not attached to any instance for VolumePatch", id) - } - - getinsOptions := &vpcv1.GetInstanceOptions{ - ID: &instanceId, - } - instance, response, err := instanceC.GetInstance(getinsOptions) - if err != nil || instance == nil { - return fmt.Errorf("[ERROR] Error retrieving Instance (%s) : %s\n%s", instanceId, err, response) - } - if instance != nil && *instance.Status != "running" { - actiontype := "start" - createinsactoptions := &vpcv1.CreateInstanceActionOptions{ - InstanceID: &instanceId, - Type: &actiontype, + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: &instanceId, } - _, response, err = instanceC.CreateInstanceAction(createinsactoptions) - if err != nil { + instance, response, err := instanceC.GetInstance(getinsOptions) + if err != nil || instance == nil { + return fmt.Errorf("[ERROR] Error retrieving Instance (%s) : %s\n%s", instanceId, err, response) + } + if instance != nil && *instance.Status != "running" { + actiontype := "start" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: &instanceId, + Type: &actiontype, + } + _, response, err = instanceC.CreateInstanceAction(createinsactoptions) + if err != nil { + return fmt.Errorf("[ERROR] Error starting Instance (%s) : %s\n%s", instanceId, err, response) + } + _, err = isWaitForInstanceAvailable(instanceC, instanceId, d.Timeout(schema.TimeoutCreate), d) return fmt.Errorf("[ERROR] Error starting Instance (%s) : %s\n%s", instanceId, err, response) } - _, err = isWaitForInstanceAvailable(instanceC, instanceId, d.Timeout(schema.TimeoutCreate), d) - return fmt.Errorf("[ERROR] Error starting Instance (%s) : %s\n%s", instanceId, err, response) } capacity := int64(d.Get(isVolumeCapacity).(int)) updateVolumeOptions := &vpcv1.UpdateVolumeOptions{ diff --git a/ibm/service/vpc/resource_ibm_is_instance_volume_attachment_test.go b/ibm/service/vpc/resource_ibm_is_instance_volume_attachment_test.go index 6e99984c0f..0233ec7c17 100644 --- a/ibm/service/vpc/resource_ibm_is_instance_volume_attachment_test.go +++ b/ibm/service/vpc/resource_ibm_is_instance_volume_attachment_test.go @@ -85,6 +85,90 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVE }, }) } +func TestAccIBMISInstanceVolumeAttachment_sdpbasic(t *testing.T) { + var instanceVolAtt string + vpcname := fmt.Sprintf("tf-vpc-%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-instnace-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tf-subnet-%d", acctest.RandIntRange(10, 100)) + publicKey := strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) + sshname := fmt.Sprintf("tf-ssh-%d", acctest.RandIntRange(10, 100)) + attName := fmt.Sprintf("tf-volatt-%d", acctest.RandIntRange(10, 100)) + autoDelete := true + volName := fmt.Sprintf("tf-vol-%d", acctest.RandIntRange(10, 100)) + volUpdateName := fmt.Sprintf("tf-vol-update-%d", acctest.RandIntRange(10, 100)) + iops1 := int64(10000) + iops2 := int64(28000) + + capacity1 := int64(1000) + capacity2 := int64(22000) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISInstanceVolumeAttachmentSdpConfig(vpcname, subnetname, sshname, publicKey, name, attName, volName, autoDelete, capacity1, iops1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceVolumeAttachmentExists("ibm_is_instance_volume_attachment.testacc_att", instanceVolAtt), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "name", attName), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "delete_volume_on_instance_delete", fmt.Sprintf("%t", autoDelete)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "capacity", fmt.Sprintf("%d", capacity1)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "iops", "10000"), + ), + }, + { + Config: testAccCheckIBMISInstanceVolumeAttachmentSdpConfig(vpcname, subnetname, sshname, publicKey, name, attName, volUpdateName, autoDelete, capacity1, iops1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceVolumeAttachmentExists("ibm_is_instance_volume_attachment.testacc_att", instanceVolAtt), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "name", attName), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "delete_volume_on_instance_delete", fmt.Sprintf("%t", autoDelete)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "capacity", fmt.Sprintf("%d", capacity1)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "iops", "10000"), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "volume_name", volUpdateName), + ), + }, + { + Config: testAccCheckIBMISInstanceVolumeAttachmentSdpConfig(vpcname, subnetname, sshname, publicKey, name, attName, volName, autoDelete, capacity1, iops2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceVolumeAttachmentExists("ibm_is_instance_volume_attachment.testacc_att", instanceVolAtt), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "name", attName), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "delete_volume_on_instance_delete", fmt.Sprintf("%t", autoDelete)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "capacity", fmt.Sprintf("%d", capacity1)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "iops", "28000"), + ), + }, + + { + Config: testAccCheckIBMISInstanceVolumeAttachmentSdpConfig(vpcname, subnetname, sshname, publicKey, name, attName, volName, autoDelete, capacity2, iops2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceVolumeAttachmentExists("ibm_is_instance_volume_attachment.testacc_att", instanceVolAtt), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "name", attName), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "delete_volume_on_instance_delete", fmt.Sprintf("%t", autoDelete)), + resource.TestCheckResourceAttr( + "ibm_is_instance_volume_attachment.testacc_att", "capacity", fmt.Sprintf("%d", capacity2)), + ), + }, + }, + }) +} func TestAccIBMISInstanceVolumeAttachment_crn(t *testing.T) { var instanceVolAtt string vpcname := fmt.Sprintf("tf-vpc-%d", acctest.RandIntRange(10, 100)) @@ -277,6 +361,54 @@ func testAccCheckIBMISInstanceVolumeAttachmentConfig(vpcname, subnetname, sshnam `, vpcname, subnetname, acc.ISZoneName, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, acc.ISZoneName, attName, capacity, iops, autoDelete, volName) } +func testAccCheckIBMISInstanceVolumeAttachmentSdpConfig(vpcname, subnetname, sshname, publicKey, name, attName, volName string, autoDelete bool, capacity, iops int64) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + total_ipv4_address_count = 16 + } + + resource "ibm_is_ssh_key" "testacc_sshkey" { + name = "%s" + public_key = "%s" + } + + resource "ibm_is_instance" "testacc_instance" { + name = "%s" + image = "%s" + profile = "%s" + primary_network_interface { + subnet = ibm_is_subnet.testacc_subnet.id + } + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + keys = [ibm_is_ssh_key.testacc_sshkey.id] + network_interfaces { + subnet = ibm_is_subnet.testacc_subnet.id + name = "eth1" + } + } + resource "ibm_is_instance_volume_attachment" "testacc_att" { + instance = ibm_is_instance.testacc_instance.id + + name = "%s" + profile = "sdp" + capacity = %d + iops = %d + + delete_volume_on_instance_delete = %t + volume_name = "%s" + } + + `, vpcname, subnetname, acc.ISZoneName, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, acc.ISZoneName, attName, capacity, iops, autoDelete, volName) +} + func testAccCheckIBMISInstanceVolumeAttachmentCrnConfig(vpcname, subnetname, sshname, publicKey, name, attName, volName string, autoDelete bool) string { return fmt.Sprintf(` resource "ibm_is_vpc" "testacc_vpc" { diff --git a/ibm/service/vpc/resource_ibm_is_lb.go b/ibm/service/vpc/resource_ibm_is_lb.go index 4e58d0483c..d637f2abd5 100644 --- a/ibm/service/vpc/resource_ibm_is_lb.go +++ b/ibm/service/vpc/resource_ibm_is_lb.go @@ -19,29 +19,34 @@ import ( ) const ( - isLBName = "name" - isLBStatus = "status" - isLBCrn = "crn" - isLBTags = "tags" - isLBType = "type" - isLBSubnets = "subnets" - isLBHostName = "hostname" - isLBPublicIPs = "public_ips" - isLBPrivateIPs = "private_ips" - isLBListeners = "listeners" - isLBPools = "pools" - isLBOperatingStatus = "operating_status" - isLBDeleting = "deleting" - isLBDeleted = "done" - isLBProvisioning = "provisioning" - isLBProvisioningDone = "done" - isLBResourceGroup = "resource_group" - isLBProfile = "profile" - isLBRouteMode = "route_mode" - isLBUdpSupported = "udp_supported" - isLBLogging = "logging" - isLBSecurityGroups = "security_groups" - isLBSecurityGroupsSupported = "security_group_supported" + isLBAvailability = "availability" + isLBAccessMode = "access_mode" + isLBAccessModes = "access_modes" + isLBInstanceGroupsSupported = "instance_groups_supported" + isLBSourceIPPersistenceSupported = "source_ip_session_persistence_supported" + isLBName = "name" + isLBStatus = "status" + isLBCrn = "crn" + isLBTags = "tags" + isLBType = "type" + isLBSubnets = "subnets" + isLBHostName = "hostname" + isLBPublicIPs = "public_ips" + isLBPrivateIPs = "private_ips" + isLBListeners = "listeners" + isLBPools = "pools" + isLBOperatingStatus = "operating_status" + isLBDeleting = "deleting" + isLBDeleted = "done" + isLBProvisioning = "provisioning" + isLBProvisioningDone = "done" + isLBResourceGroup = "resource_group" + isLBProfile = "profile" + isLBRouteMode = "route_mode" + isLBUdpSupported = "udp_supported" + isLBLogging = "logging" + isLBSecurityGroups = "security_groups" + isLBSecurityGroupsSupported = "security_group_supported" isLBAccessTags = "access_tags" ) @@ -95,6 +100,27 @@ func ResourceIBMISLB() *schema.Resource { ValidateFunc: validate.InvokeValidator("ibm_is_lb", isLBType), Description: "Load Balancer type", }, + + isLBAvailability: { + Type: schema.TypeString, + Computed: true, + Description: "The availability of this load balancer", + }, + isLBAccessMode: { + Type: schema.TypeString, + Computed: true, + Description: "The access mode of this load balancer", + }, + isLBInstanceGroupsSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this load balancer supports instance groups.", + }, + isLBSourceIPPersistenceSupported: { + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this load balancer supports source IP session persistence.", + }, "dns": { Type: schema.TypeList, Optional: true, @@ -293,8 +319,8 @@ func ResourceIBMISLB() *schema.Resource { func ResourceIBMISLBValidator() *validate.ResourceValidator { validateSchema := make([]validate.ValidateSchema, 0) - lbtype := "public, private" - isLBProfileAllowedValues := "network-fixed" + lbtype := "public, private, private_path" + isLBProfileAllowedValues := "network-fixed, network-private-path" validateSchema = append(validateSchema, validate.ValidateSchema{ @@ -360,6 +386,7 @@ func resourceIBMISLBCreate(d *schema.ResourceData, meta interface{}) error { // subnets := flex.ExpandStringList((d.Get(isLBSubnets).(*schema.Set)).List()) var lbType, rg string + isPrivatePath := false isPublic := true if types, ok := d.GetOk(isLBType); ok { lbType = types.(string) @@ -369,11 +396,16 @@ func resourceIBMISLBCreate(d *schema.ResourceData, meta interface{}) error { isPublic = false } + if lbType == "private_path" { + isPrivatePath = true + isPublic = false + } + if grp, ok := d.GetOk(isLBResourceGroup); ok { rg = grp.(string) } - err := lbCreate(d, meta, name, lbType, rg, subnets, isPublic, isLogging, securityGroups) + err := lbCreate(d, meta, name, lbType, rg, subnets, isPublic, isPrivatePath, isLogging, securityGroups) if err != nil { return err } @@ -381,15 +413,16 @@ func resourceIBMISLBCreate(d *schema.ResourceData, meta interface{}) error { return resourceIBMISLBRead(d, meta) } -func lbCreate(d *schema.ResourceData, meta interface{}, name, lbType, rg string, subnets *schema.Set, isPublic, isLogging bool, securityGroups *schema.Set) error { +func lbCreate(d *schema.ResourceData, meta interface{}, name, lbType, rg string, subnets *schema.Set, isPublic, isPrivatePath, isLogging bool, securityGroups *schema.Set) error { sess, err := vpcClient(meta) if err != nil { return err } options := &vpcv1.CreateLoadBalancerOptions{ - IsPublic: &isPublic, - Name: &name, + IsPublic: &isPublic, + IsPrivatePath: &isPrivatePath, + Name: &name, } if dnsIntf, ok := d.GetOk("dns"); ok { @@ -515,6 +548,19 @@ func lbGet(d *schema.ResourceData, meta interface{}, id string) error { } return fmt.Errorf("[ERROR] Error getting Load Balancer : %s\n%s", err, response) } + if lb.Availability != nil { + d.Set(isLBAvailability, *lb.Availability) + } + if lb.AccessMode != nil { + d.Set(isLBAccessMode, *lb.AccessMode) + } + if lb.InstanceGroupsSupported != nil { + d.Set(isLBInstanceGroupsSupported, *lb.InstanceGroupsSupported) + } + if lb.SourceIPSessionPersistenceSupported != nil { + d.Set(isLBSourceIPPersistenceSupported, *lb.SourceIPSessionPersistenceSupported) + } + dnsList := make([]map[string]interface{}, 0) if lb.Dns != nil { dns := map[string]interface{}{} @@ -529,7 +575,11 @@ func lbGet(d *schema.ResourceData, meta interface{}, id string) error { if *lb.IsPublic { d.Set(isLBType, "public") } else { - d.Set(isLBType, "private") + if lb.IsPrivatePath != nil && *lb.IsPrivatePath { + d.Set(isLBType, "private_path") + } else { + d.Set(isLBType, "private") + } } if lb.RouteMode != nil { d.Set(isLBRouteMode, *lb.RouteMode) diff --git a/ibm/service/vpc/resource_ibm_is_lb_test.go b/ibm/service/vpc/resource_ibm_is_lb_test.go index 4dcb832567..ea0a40aca6 100644 --- a/ibm/service/vpc/resource_ibm_is_lb_test.go +++ b/ibm/service/vpc/resource_ibm_is_lb_test.go @@ -115,6 +115,34 @@ func TestAccIBMISLB_basic(t *testing.T) { }) } +func TestAccIBMISLB_PPNLB(t *testing.T) { + var lb string + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tfcreate%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISLBDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISPPNLB(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISLBExists("ibm_is_lb.testacc_LB", lb), + resource.TestCheckResourceAttr( + "ibm_is_lb.testacc_LB", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_lb.testacc_LB", "type", "private_path"), + resource.TestCheckResourceAttr( + "ibm_is_lb.testacc_LB", "profile", "network-private-path"), + resource.TestCheckResourceAttrSet("ibm_is_lb.testacc_LB", "availability"), + resource.TestCheckResourceAttrSet("ibm_is_lb.testacc_LB", "instance_groups_supported"), + resource.TestCheckResourceAttrSet("ibm_is_lb.testacc_LB", "source_ip_persistence_supported"), + ), + }, + }, + }) +} func TestAccIBMISLB_DNS(t *testing.T) { var lb string vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) @@ -522,6 +550,25 @@ func testAccCheckIBMISLBConfig(vpcname, subnetname, zone, cidr, name string) str } +func testAccCheckIBMISPPNLB(vpcname, subnetname, zone, cidr, name string) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + ipv4_cidr_block = "%s" + } + resource "ibm_is_lb" "testacc_LB" { + name = "%s" + profile = "network-private-path" + type = "private_path" + subnets = [ibm_is_subnet.testacc_subnet.id] + }`, vpcname, subnetname, zone, cidr, name) +} func testAccCheckIBMISLBDNS(vpcname, subnetname, zone, cidr, name, dnsInstanceCrn, dnsZoneId string) string { return fmt.Sprintf(` resource "ibm_is_vpc" "testacc_vpc" { diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway.go new file mode 100644 index 0000000000..8bc0dcc300 --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway.go @@ -0,0 +1,495 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func ResourceIBMIsPrivatePathServiceGateway() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMIsPrivatePathServiceGatewayCreate, + ReadContext: resourceIBMIsPrivatePathServiceGatewayRead, + UpdateContext: resourceIBMIsPrivatePathServiceGatewayUpdate, + DeleteContext: resourceIBMIsPrivatePathServiceGatewayDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "service_endpoints": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "The fully qualified domain names for this private path service gateway. ", + }, + "default_access_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validate.InvokeValidator("ibm_is_private_path_service_gateway", "access_policy"), + Description: "The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewed.", + }, + "load_balancer": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The load balancer for this private path service gateway. ", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The name of this PPSG ", + }, + "resource_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "ID of resource group to use.", + }, + "zonal_affinity": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: "ndicates whether this private path service gateway has zonal affinity.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was created.", + }, + "endpoint_gateway_binding_auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether endpoint gateway bindings will be automatically deleted after endpoint_gateway_binding_auto_delete_timeout hours have passed.", + }, + "endpoint_gateway_binding_auto_delete_timeout": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "If endpoint_gateway_binding_auto_delete is true, the hours after which endpoint gateway bindings will be automatically deleted.", + }, + "endpoint_gateway_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The number of endpoint gateways using this private path service gateway.", + }, + "published": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates the availability of this private path service gateway.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Href of this resource", + }, + "vpc": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VPC this private path service gateway resides in.", + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "lifecycle_state of this resource", + }, + "crn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "CRN of this resource.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was updated.", + }, + // "remote": &schema.Schema{ + // Type: schema.TypeList, + // Computed: true, + // Description: "If present, this property indicates that the resource associated with this reference is remote and therefore may not be directly retrievable..", + // Elem: &schema.Resource{ + // Schema: map[string]*schema.Schema{ + // "account": &schema.Schema{ + // Type: schema.TypeList, + // Computed: true, + // Description: "If present, this property indicates that the referenced resource is remote to this account, and identifies the owning account.", + // Elem: &schema.Resource{ + // Schema: map[string]*schema.Schema{ + // "id": &schema.Schema{ + // Type: schema.TypeList, + // Computed: true, + // Description: "The unique identifier for this account.", + // }, + // "resource_type": &schema.Schema{ + // Type: schema.TypeString, + // Computed: true, + // Description: "The resource type.", + // }, + // }, + // }, + // }, + // "region": &schema.Schema{ + // Type: schema.TypeList, + // Computed: true, + // Description: "If present, this property indicates that the referenced resource is remote to this region, and identifies the native region.", + // Elem: &schema.Resource{ + // Schema: map[string]*schema.Schema{ + // "href": &schema.Schema{ + // Type: schema.TypeList, + // Computed: true, + // Description: "The URL for this region.", + // }, + // "name": &schema.Schema{ + // Type: schema.TypeString, + // Computed: true, + // Description: "The globally unique name for this region.", + // }, + // }, + // }, + // }, + // }, + // }, + // }, + "private_path_service_gateway": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this account policy.", + }, + }, + } +} + +func ResourceIBMIsPrivatePathServiceGatewayValidator() *validate.ResourceValidator { + validateSchema := make([]validate.ValidateSchema, 0) + validateSchema = append(validateSchema, + validate.ValidateSchema{ + Identifier: "access_policy", + ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, + Type: validate.TypeString, + Required: true, + AllowedValues: "deny, permit, review", + Regexp: `^[a-z][a-z0-9]*(_[a-z0-9]+)*$`, + MinValueLength: 1, + MaxValueLength: 128, + }, + ) + + resourceValidator := validate.ResourceValidator{ResourceName: "ibm_is_private_path_service_gateway", Schema: validateSchema} + return &resourceValidator +} + +func resourceIBMIsPrivatePathServiceGatewayCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + loadBalancerId := d.Get("load_balancer").(string) + + createPrivatePathServiceGatewayOptions := &vpcv1.CreatePrivatePathServiceGatewayOptions{ + LoadBalancer: &vpcv1.LoadBalancerIdentity{ + ID: &loadBalancerId, + }, + } + serviceEndpoints := d.Get("service_endpoints").(*schema.Set) + if serviceEndpoints.Len() != 0 { + serviceEndpointsList := make([]string, serviceEndpoints.Len()) + for i, serviceEndpointsItem := range serviceEndpoints.List() { + sEndpoint := serviceEndpointsItem.(string) + serviceEndpointsList[i] = sEndpoint + } + createPrivatePathServiceGatewayOptions.ServiceEndpoints = serviceEndpointsList + } + if nameIntf, ok := d.GetOk("name"); ok { + name := nameIntf.(string) + createPrivatePathServiceGatewayOptions.Name = &name + } + if resGrpIntf, ok := d.GetOk("resource_group"); ok { + resGrp := resGrpIntf.(string) + createPrivatePathServiceGatewayOptions.ResourceGroup = &vpcv1.ResourceGroupIdentity{ + ID: &resGrp, + } + } + if defaultAccessPolicyIntf, ok := d.GetOk("default_access_policy"); ok { + dAccessPolicy := defaultAccessPolicyIntf.(string) + createPrivatePathServiceGatewayOptions.DefaultAccessPolicy = &dAccessPolicy + } + if zonalAffinityIntf, ok := d.GetOk("zonal_affinity"); ok { + zonalAffinity := zonalAffinityIntf.(bool) + createPrivatePathServiceGatewayOptions.ZonalAffinity = &zonalAffinity + } + + privatePathServiceGateway, response, err := vpcClient.CreatePrivatePathServiceGatewayWithContext(context, createPrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] CreatePrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("CreatePrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + d.SetId(*privatePathServiceGateway.ID) + _, err = isWaitForPPSGAvailable(vpcClient, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return diag.FromErr(err) + } + + return resourceIBMIsPrivatePathServiceGatewayUpdate(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + getPrivatePathServiceGatewayOptions := &vpcv1.GetPrivatePathServiceGatewayOptions{} + + getPrivatePathServiceGatewayOptions.SetID(d.Id()) + + privatePathServiceGateway, response, err := vpcClient.GetPrivatePathServiceGatewayWithContext(context, getPrivatePathServiceGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + if err = d.Set("default_access_policy", privatePathServiceGateway.DefaultAccessPolicy); err != nil { + return diag.FromErr(fmt.Errorf("Error setting access_policy: %s", err)) + } + if err = d.Set("created_at", flex.DateTimeToString(privatePathServiceGateway.CreatedAt)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + } + if err = d.Set("href", privatePathServiceGateway.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + if err = d.Set("endpoint_gateway_count", privatePathServiceGateway.EndpointGatewayCount); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_count: %s", err)) + } + if err = d.Set("endpoint_gateway_binding_auto_delete", privatePathServiceGateway.EndpointGatewayBindingAutoDelete); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_binding_auto_delete: %s", err)) + } + if err = d.Set("endpoint_gateway_binding_auto_delete_timeout", privatePathServiceGateway.EndpointGatewayBindingAutoDeleteTimeout); err != nil { + return diag.FromErr(fmt.Errorf("Error setting endpoint_gateway_binding_auto_delete_timeout: %s", err)) + } + if err = d.Set("published", privatePathServiceGateway.Published); err != nil { + return diag.FromErr(fmt.Errorf("Error setting published: %s", err)) + } + if err = d.Set("load_balancer", *privatePathServiceGateway.LoadBalancer.ID); err != nil { + return diag.FromErr(fmt.Errorf("Error setting load balancer id: %s", err)) + } + if err = d.Set("lifecycle_state", privatePathServiceGateway.LifecycleState); err != nil { + return diag.FromErr(fmt.Errorf("Error setting lifecycle_state: %s", err)) + } + if err = d.Set("name", privatePathServiceGateway.Name); err != nil { + return diag.FromErr(fmt.Errorf("Error setting name: %s", err)) + } + if err = d.Set("vpc", privatePathServiceGateway.VPC.ID); err != nil { + return diag.FromErr(fmt.Errorf("Error setting vpc: %s", err)) + } + if err = d.Set("zonal_affinity", privatePathServiceGateway.ZonalAffinity); err != nil { + return diag.FromErr(fmt.Errorf("Error setting zonal_affinity: %s", err)) + } + serviceEndpointsList := make([]string, 0) + for i := 0; i < len(privatePathServiceGateway.ServiceEndpoints); i++ { + serviceEndpointsList = append(serviceEndpointsList, string(privatePathServiceGateway.ServiceEndpoints[i])) + } + if err = d.Set("service_endpoints", serviceEndpointsList); err != nil { + return diag.FromErr(fmt.Errorf("Error setting service_endpoints: %s", err)) + } + if err = d.Set("crn", privatePathServiceGateway.CRN); err != nil { + return diag.FromErr(fmt.Errorf("Error setting crn: %s", err)) + } + if err = d.Set("resource_type", privatePathServiceGateway.ResourceType); err != nil { + return diag.FromErr(fmt.Errorf("Error setting resource_type: %s", err)) + } + if err = d.Set("private_path_service_gateway", privatePathServiceGateway.ID); err != nil { + return diag.FromErr(fmt.Errorf("Error setting private_path_service_gateway: %s", err)) + } + + return nil +} + +func resourceIBMIsPrivatePathServiceGatewayUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + updatePrivatePathServiceGatewayOptions := &vpcv1.UpdatePrivatePathServiceGatewayOptions{} + updatePrivatePathServiceGatewayOptions.SetID(d.Id()) + hasChange := false + + patchVals := &vpcv1.PrivatePathServiceGatewayPatch{} + + if d.HasChange("default_access_policy") && !d.IsNewResource() { + newAccessPolicy := d.Get("default_access_policy").(string) + patchVals.DefaultAccessPolicy = &newAccessPolicy + hasChange = true + } + + if d.HasChange("name") && !d.IsNewResource() { + name := d.Get("name").(string) + patchVals.Name = &name + hasChange = true + } + if d.HasChange("zonal_affinity") && !d.IsNewResource() { + zonalAffinity := d.Get("zonal_affinity").(bool) + patchVals.ZonalAffinity = &zonalAffinity + hasChange = true + } + // if d.HasChange("published") { + // published := d.Get("published").(bool) + // patchVals.Published = &published + // hasChange = true + // } + if d.HasChange("load_balancer") && !d.IsNewResource() { + loadBalancer := d.Get("load_balancer").(string) + patchVals.LoadBalancer = &vpcv1.LoadBalancerIdentity{ + ID: &loadBalancer, + } + hasChange = true + } + + if hasChange { + updatePrivatePathServiceGatewayOptions.PrivatePathServiceGatewayPatch, _ = patchVals.AsPatch() + if err != nil { + log.Printf("[DEBUG] Error calling AsPatch for PrivatePathServiceGatewayPatch %s", err) + return diag.FromErr(err) + } + _, response, err := vpcClient.UpdatePrivatePathServiceGatewayWithContext(context, updatePrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] UpdatePrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdatePrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + } + + return resourceIBMIsPrivatePathServiceGatewayRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + deletePrivatePathServiceGatewayOptions := &vpcv1.DeletePrivatePathServiceGatewayOptions{} + deletePrivatePathServiceGatewayOptions.SetID(d.Id()) + + response, err := vpcClient.DeletePrivatePathServiceGatewayWithContext(context, deletePrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] DeletePrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("DeletePrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + _, err = isWaitForPPSGDeleted(vpcClient, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + + return nil +} + +func isWaitForPPSGDeleteRetry(vpcClient *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("[DEBUG] Retrying PPSG (%s) delete", id) + stateConf := &resource.StateChangeConf{ + Pending: []string{"ppsg_in_use"}, + Target: []string{"deleting", "done", ""}, + Refresh: func() (interface{}, string, error) { + deletePrivatePathServiceGatewayOptions := &vpcv1.DeletePrivatePathServiceGatewayOptions{} + deletePrivatePathServiceGatewayOptions.SetID(id) + log.Printf("[DEBUG] Retrying PPSG (%s) delete", id) + response, err := vpcClient.DeletePrivatePathServiceGateway(deletePrivatePathServiceGatewayOptions) + if err != nil { + if response != nil && response.StatusCode == 409 { + return response, "ppsg_in_use", nil + } else if response != nil && response.StatusCode == 404 { + return response, "done", nil + } + return response, "", fmt.Errorf("[ERROR] Error deleting ppsg: %s\n%s", err, response) + } + return response, "deleting", nil + }, + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + return stateConf.WaitForState() +} +func isWaitForPPSGDeleted(vpcClient *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for ppsg (%s) to be deleted.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "deleting", "stable"}, + Target: []string{"deleted", ""}, + Refresh: isPPSGDeleteRefreshFunc(vpcClient, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isPPSGDeleteRefreshFunc(vpcClient *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] is ppsg delete function here") + getPPSGOptions := &vpcv1.GetPrivatePathServiceGatewayOptions{ + ID: &id, + } + ppsg, response, err := vpcClient.GetPrivatePathServiceGateway(getPPSGOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return ppsg, "deleted", nil + } + return ppsg, "", fmt.Errorf("[ERROR] The ppsg %s failed to delete: %s\n%s", id, err, response) + } + return ppsg, "deleting", err + } +} +func isWaitForPPSGAvailable(vpcClient *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for ppsg (%s) to be available.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending", "updating"}, + Target: []string{"stable", "failed", "suspended"}, + Refresh: isPPSGRefreshFunc(vpcClient, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func isPPSGRefreshFunc(vpcClient *vpcv1.VpcV1, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + getPPSGOptions := &vpcv1.GetPrivatePathServiceGatewayOptions{ + ID: &id, + } + ppsg, response, err := vpcClient.GetPrivatePathServiceGateway(getPPSGOptions) + if err != nil { + return nil, "", fmt.Errorf("[ERROR] Error getting ppsg : %s\n%s", err, response) + } + + return ppsg, *ppsg.LifecycleState, nil + } +} diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_account_policy.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_account_policy.go new file mode 100644 index 0000000000..081fcead73 --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_account_policy.go @@ -0,0 +1,237 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func ResourceIBMIsPrivatePathServiceGatewayAccountPolicy() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMIsPrivatePathServiceGatewayAccountPolicyCreate, + ReadContext: resourceIBMIsPrivatePathServiceGatewayAccountPolicyRead, + UpdateContext: resourceIBMIsPrivatePathServiceGatewayAccountPolicyUpdate, + DeleteContext: resourceIBMIsPrivatePathServiceGatewayAccountPolicyDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The private path service gateway identifier.", + }, + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.InvokeValidator("ibm_is_private_path_service_gateway_account_policy", "access_policy"), + Description: "The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewed.", + }, + "account": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The account for this access policy.", + }, + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was created.", + }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The URL for this account policy.", + }, + "resource_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The resource type.", + }, + "updated_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date and time that the account policy was updated.", + }, + "account_policy": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this account policy.", + }, + }, + } +} + +func ResourceIBMIsPrivatePathServiceGatewayAccountPolicyValidator() *validate.ResourceValidator { + validateSchema := make([]validate.ValidateSchema, 0) + validateSchema = append(validateSchema, + validate.ValidateSchema{ + Identifier: "access_policy", + ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, + Type: validate.TypeString, + Required: true, + AllowedValues: "deny, permit, review", + Regexp: `^[a-z][a-z0-9]*(_[a-z0-9]+)*$`, + MinValueLength: 1, + MaxValueLength: 128, + }, + ) + + resourceValidator := validate.ResourceValidator{ResourceName: "ibm_is_private_path_service_gateway_account_policy", Schema: validateSchema} + return &resourceValidator +} + +func resourceIBMIsPrivatePathServiceGatewayAccountPolicyCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + createPrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.CreatePrivatePathServiceGatewayAccountPolicyOptions{} + + createPrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(d.Get("private_path_service_gateway").(string)) + createPrivatePathServiceGatewayAccountPolicyOptions.SetAccessPolicy(d.Get("access_policy").(string)) + accountId := d.Get("account").(string) + account := &vpcv1.AccountIdentity{ + ID: &accountId, + } + createPrivatePathServiceGatewayAccountPolicyOptions.SetAccount(account) + + privatePathServiceGatewayAccountPolicy, response, err := vpcClient.CreatePrivatePathServiceGatewayAccountPolicyWithContext(context, createPrivatePathServiceGatewayAccountPolicyOptions) + if err != nil { + log.Printf("[DEBUG] CreatePrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("CreatePrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response)) + } + + d.SetId(fmt.Sprintf("%s/%s", *createPrivatePathServiceGatewayAccountPolicyOptions.PrivatePathServiceGatewayID, *privatePathServiceGatewayAccountPolicy.ID)) + + return resourceIBMIsPrivatePathServiceGatewayAccountPolicyRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayAccountPolicyRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + getPrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.GetPrivatePathServiceGatewayAccountPolicyOptions{} + + parts, err := flex.SepIdParts(d.Id(), "/") + if err != nil { + return diag.FromErr(err) + } + + getPrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(parts[0]) + getPrivatePathServiceGatewayAccountPolicyOptions.SetID(parts[1]) + + privatePathServiceGatewayAccountPolicy, response, err := vpcClient.GetPrivatePathServiceGatewayAccountPolicyWithContext(context, getPrivatePathServiceGatewayAccountPolicyOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetPrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetPrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response)) + } + + if err = d.Set("access_policy", privatePathServiceGatewayAccountPolicy.AccessPolicy); err != nil { + return diag.FromErr(fmt.Errorf("Error setting access_policy: %s", err)) + } + if err = d.Set("created_at", flex.DateTimeToString(privatePathServiceGatewayAccountPolicy.CreatedAt)); err != nil { + return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) + } + if err = d.Set("href", privatePathServiceGatewayAccountPolicy.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + if err = d.Set("resource_type", privatePathServiceGatewayAccountPolicy.ResourceType); err != nil { + return diag.FromErr(fmt.Errorf("Error setting resource_type: %s", err)) + } + // if err = d.Set("updated_at", flex.DateTimeToString(privatePathServiceGatewayAccountPolicy.UpdatedAt)); err != nil { + // return diag.FromErr(fmt.Errorf("Error setting updated_at: %s", err)) + // } + if err = d.Set("account_policy", privatePathServiceGatewayAccountPolicy.ID); err != nil { + return diag.FromErr(fmt.Errorf("Error setting private_path_service_gateway_account_policy_id: %s", err)) + } + + return nil +} + +func resourceIBMIsPrivatePathServiceGatewayAccountPolicyUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + updatePrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.UpdatePrivatePathServiceGatewayAccountPolicyOptions{} + + parts, err := flex.SepIdParts(d.Id(), "/") + if err != nil { + return diag.FromErr(err) + } + + updatePrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(parts[0]) + updatePrivatePathServiceGatewayAccountPolicyOptions.SetID(parts[1]) + + hasChange := false + + patchVals := &vpcv1.PrivatePathServiceGatewayAccountPolicyPatch{} + + if d.HasChange("access_policy") { + newAccessPolicy := d.Get("access_policy").(string) + patchVals.AccessPolicy = &newAccessPolicy + hasChange = true + } + + if hasChange { + updatePrivatePathServiceGatewayAccountPolicyOptions.PrivatePathServiceGatewayAccountPolicyPatch, _ = patchVals.AsPatch() + if err != nil { + log.Printf("[DEBUG] Error calling AsPatch for PrivatePathServiceGatewayAccountPolicyPatch %s", err) + return diag.FromErr(err) + } + _, response, err := vpcClient.UpdatePrivatePathServiceGatewayAccountPolicyWithContext(context, updatePrivatePathServiceGatewayAccountPolicyOptions) + if err != nil { + log.Printf("[DEBUG] UpdatePrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdatePrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response)) + } + } + + return resourceIBMIsPrivatePathServiceGatewayAccountPolicyRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayAccountPolicyDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + deletePrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.DeletePrivatePathServiceGatewayAccountPolicyOptions{} + + parts, err := flex.SepIdParts(d.Id(), "/") + if err != nil { + return diag.FromErr(err) + } + + deletePrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(parts[0]) + deletePrivatePathServiceGatewayAccountPolicyOptions.SetID(parts[1]) + + response, err := vpcClient.DeletePrivatePathServiceGatewayAccountPolicyWithContext(context, deletePrivatePathServiceGatewayAccountPolicyOptions) + if err != nil { + log.Printf("[DEBUG] DeletePrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("DeletePrivatePathServiceGatewayAccountPolicyWithContext failed %s\n%s", err, response)) + } + + d.SetId("") + + return nil +} diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_account_policy_test.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_account_policy_test.go new file mode 100644 index 0000000000..a5c018fc6d --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_account_policy_test.go @@ -0,0 +1,150 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func TestAccIBMIsPrivatePathServiceGatewayAccountPolicyBasic(t *testing.T) { + var conf vpcv1.PrivatePathServiceGatewayAccountPolicy + accessPolicy := "deny" + accessPolicyUpdate := "review" + accessPolicy1 := "review" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name, accessPolicy1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyExists("ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", conf), + resource.TestCheckResourceAttr("ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "access_policy", accessPolicy1), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "private_path_service_gateway"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "access_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "account.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "private_path_service_gateway_account_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "resource_type"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name, accessPolicyUpdate), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "access_policy", accessPolicyUpdate), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "private_path_service_gateway"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "id"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "access_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "account.#"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "href"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "private_path_service_gateway_account_policy"), + resource.TestCheckResourceAttrSet("data.ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", "resource_type"), + ), + }, + resource.TestStep{ + ResourceName: "ibm_is_private_path_service_gateway_account_policy.is_private_path_service_gateway_account_policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name, accessPolicy1 string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + + resource "ibm_is_private_path_service_gateway_account_policy" "is_private_path_service_gateway_account_policy" { + private_path_service_gateway = ibm_is_private_path_service_gateway.is_private_path_service_gateway.id + access_policy = "%s" + account = "%s" + + } + `, accessPolicy1, acc.AccountId) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyExists(n string, obj vpcv1.PrivatePathServiceGatewayAccountPolicy) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + vpcClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).VpcV1API() + if err != nil { + return err + } + + getPrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.GetPrivatePathServiceGatewayAccountPolicyOptions{} + + parts, err := flex.SepIdParts(rs.Primary.ID, "/") + if err != nil { + return err + } + + getPrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(parts[0]) + getPrivatePathServiceGatewayAccountPolicyOptions.SetID(parts[1]) + + privatePathServiceGatewayAccountPolicy, _, err := vpcClient.GetPrivatePathServiceGatewayAccountPolicy(getPrivatePathServiceGatewayAccountPolicyOptions) + if err != nil { + return err + } + + obj = *privatePathServiceGatewayAccountPolicy + return nil + } +} + +func testAccCheckIBMIsPrivatePathServiceGatewayAccountPolicyDestroy(s *terraform.State) error { + vpcClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).VpcV1API() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_is_private_path_service_gateway_account_policy" { + continue + } + + getPrivatePathServiceGatewayAccountPolicyOptions := &vpcv1.GetPrivatePathServiceGatewayAccountPolicyOptions{} + + parts, err := flex.SepIdParts(rs.Primary.ID, "/") + if err != nil { + return err + } + + getPrivatePathServiceGatewayAccountPolicyOptions.SetPrivatePathServiceGatewayID(parts[0]) + getPrivatePathServiceGatewayAccountPolicyOptions.SetID(parts[1]) + + // Try to find the key + _, response, err := vpcClient.GetPrivatePathServiceGatewayAccountPolicy(getPrivatePathServiceGatewayAccountPolicyOptions) + + if err == nil { + return fmt.Errorf("PrivatePathServiceGatewayAccountPolicy still exists: %s", rs.Primary.ID) + } else if response.StatusCode != 404 { + return fmt.Errorf("Error checking for PrivatePathServiceGatewayAccountPolicy (%s) has been destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_endpoint_gateway_binging_operations.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_endpoint_gateway_binging_operations.go new file mode 100644 index 0000000000..3d4c522d8c --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_endpoint_gateway_binging_operations.go @@ -0,0 +1,139 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +const ( + AccessPolicyEnumPermit = "permit" + AccessPolicyEnumDeny = "deny" +) + +func ResourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperations() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsCreate, + ReadContext: resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsRead, + UpdateContext: resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsUpdate, + DeleteContext: resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The private path service gateway identifier.", + }, + "endpoint_gateway_binding": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The private path service gateway identifier.", + }, + "access_policy": { + Type: schema.TypeString, + Required: true, + Description: "Access polict to set for this endpoint gateway binding.", + }, + }, + } +} + +func resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + ppsgId := d.Get("private_path_service_gateway").(string) + egwbindingId := d.Get("endpoint_gateway_binding").(string) + accessPolicy := d.Get("access_policy").(string) + if accessPolicy == AccessPolicyEnumPermit { + permitPrivatePathServiceGatewayEndpointGatewayBindingOptions := &vpcv1.PermitPrivatePathServiceGatewayEndpointGatewayBindingOptions{} + + permitPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetPrivatePathServiceGatewayID(ppsgId) + permitPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetID(egwbindingId) + + response, err := vpcClient.PermitPrivatePathServiceGatewayEndpointGatewayBindingWithContext(context, permitPrivatePathServiceGatewayEndpointGatewayBindingOptions) + if err != nil { + log.Printf("[DEBUG] PermitPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("PermitPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response)) + } + } else { + denyPrivatePathServiceGatewayEndpointGatewayBindingOptions := &vpcv1.DenyPrivatePathServiceGatewayEndpointGatewayBindingOptions{} + + denyPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetPrivatePathServiceGatewayID(ppsgId) + denyPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetID(egwbindingId) + + response, err := vpcClient.DenyPrivatePathServiceGatewayEndpointGatewayBindingWithContext(context, denyPrivatePathServiceGatewayEndpointGatewayBindingOptions) + if err != nil { + log.Printf("[DEBUG] DenyPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("DenyPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response)) + } + } + + d.SetId(fmt.Sprintf("%s/%s", ppsgId, egwbindingId)) + + return resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + return nil +} + +func resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + ppsgId := d.Get("private_path_service_gateway").(string) + egwbindingId := d.Get("endpoint_gateway_binding").(string) + if d.HasChange("access_policy") { + _, newAccessPolicy := d.GetChange("access_policy") + accessPolicy := newAccessPolicy.(string) + if accessPolicy == AccessPolicyEnumPermit { + permitPrivatePathServiceGatewayEndpointGatewayBindingOptions := &vpcv1.PermitPrivatePathServiceGatewayEndpointGatewayBindingOptions{} + + permitPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetPrivatePathServiceGatewayID(ppsgId) + permitPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetID(egwbindingId) + + response, err := vpcClient.PermitPrivatePathServiceGatewayEndpointGatewayBindingWithContext(context, permitPrivatePathServiceGatewayEndpointGatewayBindingOptions) + if err != nil { + log.Printf("[DEBUG] PermitPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("PermitPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response)) + } + } else { + denyPrivatePathServiceGatewayEndpointGatewayBindingOptions := &vpcv1.DenyPrivatePathServiceGatewayEndpointGatewayBindingOptions{} + + denyPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetPrivatePathServiceGatewayID(ppsgId) + denyPrivatePathServiceGatewayEndpointGatewayBindingOptions.SetID(egwbindingId) + + response, err := vpcClient.DenyPrivatePathServiceGatewayEndpointGatewayBindingWithContext(context, denyPrivatePathServiceGatewayEndpointGatewayBindingOptions) + if err != nil { + log.Printf("[DEBUG] DenyPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("DenyPrivatePathServiceGatewayEndpointGatewayBindingWithContext failed %s\n%s", err, response)) + } + } + + } + + return resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayEndpointGatewayBindingOperationsDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + d.SetId("") + + return nil +} diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_operations.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_operations.go new file mode 100644 index 0000000000..1fb97a5499 --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_operations.go @@ -0,0 +1,132 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func ResourceIBMIsPrivatePathServiceGatewayOperations() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMIsPrivatePathServiceGatewayOperationsCreate, + ReadContext: resourceIBMIsPrivatePathServiceGatewayOperationsRead, + UpdateContext: resourceIBMIsPrivatePathServiceGatewayOperationsUpdate, + DeleteContext: resourceIBMIsPrivatePathServiceGatewayOperationsDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": { + Type: schema.TypeString, + Required: true, + // ForceNew: true, + Description: "The private path service gateway identifier.", + }, + "published": { + Type: schema.TypeBool, + Required: true, + // ForceNew: true, + Description: "Publish or unpublish PPSG.", + }, + }, + } +} + +func resourceIBMIsPrivatePathServiceGatewayOperationsCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + ppsgId := d.Get("private_path_service_gateway").(string) + publish := d.Get("published").(bool) + if publish { + publishPrivatePathServiceGatewayOptions := &vpcv1.PublishPrivatePathServiceGatewayOptions{} + + publishPrivatePathServiceGatewayOptions.SetPrivatePathServiceGatewayID(ppsgId) + + response, err := vpcClient.PublishPrivatePathServiceGatewayWithContext(context, publishPrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] PublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("PublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + } else { + unpublishPrivatePathServiceGatewayOptions := &vpcv1.UnpublishPrivatePathServiceGatewayOptions{} + + unpublishPrivatePathServiceGatewayOptions.SetPrivatePathServiceGatewayID(ppsgId) + + response, err := vpcClient.UnpublishPrivatePathServiceGatewayWithContext(context, unpublishPrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] unpublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("unpublishublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + } + + d.SetId(ppsgId) + + return nil +} + +func resourceIBMIsPrivatePathServiceGatewayOperationsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + return nil +} + +func resourceIBMIsPrivatePathServiceGatewayOperationsUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + ppsgId := d.Get("private_path_service_gateway").(string) + publish := d.Get("published").(bool) + if publish { + publishPrivatePathServiceGatewayOptions := &vpcv1.PublishPrivatePathServiceGatewayOptions{} + + publishPrivatePathServiceGatewayOptions.SetPrivatePathServiceGatewayID(ppsgId) + + response, err := vpcClient.PublishPrivatePathServiceGatewayWithContext(context, publishPrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] PublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + resetPublishedSchemaValue(context, d) + return diag.FromErr(fmt.Errorf("PublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + } else { + unpublishPrivatePathServiceGatewayOptions := &vpcv1.UnpublishPrivatePathServiceGatewayOptions{} + + unpublishPrivatePathServiceGatewayOptions.SetPrivatePathServiceGatewayID(ppsgId) + + response, err := vpcClient.UnpublishPrivatePathServiceGatewayWithContext(context, unpublishPrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] unpublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + resetPublishedSchemaValue(context, d) + return diag.FromErr(fmt.Errorf("unpublishublishPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + } + return nil +} +func resetPublishedSchemaValue(context context.Context, d *schema.ResourceData) { + if d.HasChange("published") { + oldIntf, newIntf := d.GetChange("published") + if oldIntf.(bool) != newIntf.(bool) { + d.Set("published", oldIntf.(bool)) + } + } +} +func resourceIBMIsPrivatePathServiceGatewayOperationsDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + d.SetId("") + + return nil +} diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_revoke_account.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_revoke_account.go new file mode 100644 index 0000000000..b94b72ffb0 --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_revoke_account.go @@ -0,0 +1,85 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func ResourceIBMIsPrivatePathServiceGatewayRevokeAccount() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMIsPrivatePathServiceGatewayRevokeAccountCreate, + ReadContext: resourceIBMIsPrivatePathServiceGatewayRevokeAccountRead, + UpdateContext: resourceIBMIsPrivatePathServiceGatewayRevokeAccountUpdate, + DeleteContext: resourceIBMIsPrivatePathServiceGatewayRevokeAccountDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "private_path_service_gateway": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The private path service gateway identifier.", + }, + "account": { + Type: schema.TypeString, + Required: true, + //ForceNew: true, + Description: "The account for this access policy.", + }, + }, + } +} + +func resourceIBMIsPrivatePathServiceGatewayRevokeAccountCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vpcClient, err := meta.(conns.ClientSession).VpcV1API() + if err != nil { + return diag.FromErr(err) + } + + revokePrivatePathServiceGatewayOptions := &vpcv1.RevokeAccountForPrivatePathServiceGatewayOptions{} + + revokePrivatePathServiceGatewayOptions.SetPrivatePathServiceGatewayID(d.Get("private_path_service_gateway").(string)) + + accountId := d.Get("account").(string) + account := &vpcv1.AccountIdentity{ + ID: &accountId, + } + revokePrivatePathServiceGatewayOptions.SetAccount(account) + + response, err := vpcClient.RevokeAccountForPrivatePathServiceGatewayWithContext(context, revokePrivatePathServiceGatewayOptions) + if err != nil { + log.Printf("[DEBUG] RevokeAccountForPrivatePathServiceGatewayWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("RevokeAccountForPrivatePathServiceGatewayWithContext failed %s\n%s", err, response)) + } + + d.SetId(*revokePrivatePathServiceGatewayOptions.PrivatePathServiceGatewayID) + + return resourceIBMIsPrivatePathServiceGatewayRevokeAccountRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayRevokeAccountRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + return nil +} + +func resourceIBMIsPrivatePathServiceGatewayRevokeAccountUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + return resourceIBMIsPrivatePathServiceGatewayRevokeAccountRead(context, d, meta) +} + +func resourceIBMIsPrivatePathServiceGatewayRevokeAccountDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + d.SetId("") + + return nil +} diff --git a/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_test.go b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_test.go new file mode 100644 index 0000000000..24a6e7f2e6 --- /dev/null +++ b/ibm/service/vpc/resource_ibm_is_private_path_service_gateway_test.go @@ -0,0 +1,119 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package vpc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/vpc-go-sdk/vpcv1" +) + +func TestAccIBMIsPrivatePathServiceGatewayBasic(t *testing.T) { + var conf vpcv1.PrivatePathServiceGateway + accessPolicy := "deny" + accessPolicyUpdate := "review" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + nameUpdated := fmt.Sprintf("tf-test-ppsg-updated%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMIsPrivatePathServiceGatewayDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMIsPrivatePathServiceGatewayExists("ibm_is_private_path_service_gateway.is_private_path_service_gateway", conf), + resource.TestCheckResourceAttr("ibm_is_private_path_service_gateway.is_private_path_service_gateway", "default_access_policy", accessPolicy), + resource.TestCheckResourceAttr("ibm_is_private_path_service_gateway.is_private_path_service_gateway", "name", name), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicyUpdate, nameUpdated), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_is_private_path_service_gateway.is_private_path_service_gateway", "default_access_policy", accessPolicyUpdate), + resource.TestCheckResourceAttr("ibm_is_private_path_service_gateway.is_private_path_service_gateway", "name", nameUpdated), + ), + }, + resource.TestStep{ + ResourceName: "ibm_is_private_path_service_gateway.is_private_path_service_gateway", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name string) string { + return testAccCheckIBMISPPNLB(vpcname, subnetname, zone, cidr, lbname) + fmt.Sprintf(` + resource "ibm_is_private_path_service_gateway" "is_private_path_service_gateway" { + default_access_policy = "%s" + name = "%s" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["mytestfqdn.internal"] + } + `, accessPolicy, name) +} + +func testAccCheckIBMIsPrivatePathServiceGatewayExists(n string, obj vpcv1.PrivatePathServiceGateway) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + vpcClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).VpcV1API() + if err != nil { + return err + } + + getPrivatePathServiceGatewayOptions := &vpcv1.GetPrivatePathServiceGatewayOptions{} + getPrivatePathServiceGatewayOptions.SetID(rs.Primary.ID) + + privatePathServiceGateway, _, err := vpcClient.GetPrivatePathServiceGateway(getPrivatePathServiceGatewayOptions) + if err != nil { + return err + } + + obj = *privatePathServiceGateway + return nil + } +} + +func testAccCheckIBMIsPrivatePathServiceGatewayDestroy(s *terraform.State) error { + vpcClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).VpcV1API() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_is_private_path_service_gateway" { + continue + } + + getPrivatePathServiceGatewayOptions := &vpcv1.GetPrivatePathServiceGatewayOptions{} + getPrivatePathServiceGatewayOptions.SetID(rs.Primary.ID) + + // Try to find the key + _, response, err := vpcClient.GetPrivatePathServiceGateway(getPrivatePathServiceGatewayOptions) + + if err == nil { + return fmt.Errorf("PrivatePathServiceGateway still exists: %s", rs.Primary.ID) + } else if response.StatusCode != 404 { + return fmt.Errorf("Error checking for PrivatePathServiceGateway (%s) has been destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} diff --git a/ibm/service/vpc/resource_ibm_is_subnet.go b/ibm/service/vpc/resource_ibm_is_subnet.go index fc8883eccf..ff6ef5b0bd 100644 --- a/ibm/service/vpc/resource_ibm_is_subnet.go +++ b/ibm/service/vpc/resource_ibm_is_subnet.go @@ -42,6 +42,7 @@ const ( isSubnetDeleting = "deleting" isSubnetDeleted = "done" isSubnetRoutingTableID = "routing_table" + isSubnetRoutingTableCrn = "routing_table_crn" isSubnetInUse = "resources_attached" isSubnetAccessTags = "access_tags" isUserTagType = "user" @@ -183,13 +184,21 @@ func ResourceIBMISSubnet() *schema.Resource { Description: "The resource group for this subnet", }, isSubnetRoutingTableID: { - Type: schema.TypeString, - Optional: true, - ForceNew: false, - Computed: true, - Description: "routing table id that is associated with the subnet", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{isSubnetRoutingTableCrn}, + Computed: true, + Description: "routing table id that is associated with the subnet", + }, + isSubnetRoutingTableCrn: { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Optional: true, + ConflictsWith: []string{isSubnetRoutingTableID}, + Description: "routing table crn that is associated with the subnet.", }, - flex.ResourceControllerURL: { Type: schema.TypeString, Computed: true, @@ -309,7 +318,12 @@ func resourceIBMISSubnetCreate(d *schema.ResourceData, meta interface{}) error { rtID = rt.(string) } - err := subnetCreate(d, meta, name, vpc, zone, ipv4cidr, acl, gw, rtID, ipv4addrcount64) + rtCrn := "" + if rtcrn, ok := d.GetOk(isSubnetRoutingTableCrn); ok { + rtCrn = rtcrn.(string) + } + + err := subnetCreate(d, meta, name, vpc, zone, ipv4cidr, acl, gw, rtID, rtCrn, ipv4addrcount64) if err != nil { return err } @@ -317,7 +331,7 @@ func resourceIBMISSubnetCreate(d *schema.ResourceData, meta interface{}) error { return resourceIBMISSubnetRead(d, meta) } -func subnetCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone, ipv4cidr, acl, gw, rtID string, ipv4addrcount64 int64) error { +func subnetCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone, ipv4cidr, acl, gw, rtID, rtCrn string, ipv4addrcount64 int64) error { sess, err := vpcClient(meta) if err != nil { @@ -355,6 +369,12 @@ func subnetCreate(d *schema.ResourceData, meta interface{}, name, vpc, zone, ipv ID: &rt, } } + if rtCrn != "" { + subnetTemplate.RoutingTable = &vpcv1.RoutingTableIdentity{ + CRN: &rtCrn, + } + } + rg := "" if grp, ok := d.GetOk(isSubnetResourceGroup); ok { rg = grp.(string) @@ -474,8 +494,10 @@ func subnetGet(d *schema.ResourceData, meta interface{}, id string) error { } if subnet.RoutingTable != nil { d.Set(isSubnetRoutingTableID, *subnet.RoutingTable.ID) + d.Set(isSubnetRoutingTableCrn, *subnet.RoutingTable.CRN) } else { d.Set(isSubnetRoutingTableID, nil) + d.Set(isSubnetRoutingTableCrn, nil) } d.Set(isSubnetStatus, *subnet.Status) d.Set(isSubnetZone, *subnet.Zone.Name) @@ -613,6 +635,14 @@ func subnetUpdate(d *schema.ResourceData, meta interface{}, id string) error { return err }*/ } + if d.HasChange(isSubnetRoutingTableCrn) { + hasChanged = true + rtCrn := d.Get(isSubnetRoutingTableCrn).(string) + // Construct an instance of the RoutingTableIdentityByCRN model + routingTableIdentityModel := new(vpcv1.RoutingTableIdentityByCRN) + routingTableIdentityModel.CRN = &rtCrn + subnetPatchModel.RoutingTable = routingTableIdentityModel + } if hasChanged { subnetPatch, err := subnetPatchModel.AsPatch() if err != nil { diff --git a/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment.go b/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment.go index fa9a4b5374..5547446032 100644 --- a/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment.go +++ b/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment.go @@ -37,9 +37,19 @@ func ResourceIBMISSubnetRoutingTableAttachment() *schema.Resource { }, isRoutingTableID: { - Type: schema.TypeString, - Required: true, - Description: "The unique identifier of routing table", + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{isRoutingTableID, isRoutingTableCrn}, + Description: "The unique identifier of routing table", + }, + + isRoutingTableCrn: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{isRoutingTableID, isRoutingTableCrn}, + Description: "The crn of routing table", }, rtRouteDirectLinkIngress: { @@ -119,6 +129,30 @@ func ResourceIBMISSubnetRoutingTableAttachment() *schema.Resource { }, }, }, + rtResourceGroup: { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this volume.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + rtResourceGroupHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + rtResourceGroupId: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + rtResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, }, } } @@ -130,16 +164,29 @@ func resourceIBMISSubnetRoutingTableAttachmentCreate(context context.Context, d } subnet := d.Get(isSubnetID).(string) - routingTable := d.Get(isRoutingTableID).(string) - - // Construct an instance of the RoutingTableIdentityByID model - routingTableIdentityModel := new(vpcv1.RoutingTableIdentityByID) - routingTableIdentityModel.ID = &routingTable - - // Construct an instance of the ReplaceSubnetRoutingTableOptions model + routingTableID := d.Get(isRoutingTableID).(string) + routingTableCrn := d.Get(isRoutingTableCrn).(string) replaceSubnetRoutingTableOptionsModel := new(vpcv1.ReplaceSubnetRoutingTableOptions) replaceSubnetRoutingTableOptionsModel.ID = &subnet - replaceSubnetRoutingTableOptionsModel.RoutingTableIdentity = routingTableIdentityModel + + if routingTableID != "" { + // Construct an instance of the RoutingTableIdentityByID model + routingTableIdentityModel := new(vpcv1.RoutingTableIdentityByID) + routingTableIdentityModel.ID = &routingTableID + + // Construct an instance of the ReplaceSubnetRoutingTableOptions model + replaceSubnetRoutingTableOptionsModel.RoutingTableIdentity = routingTableIdentityModel + } + + if routingTableCrn != "" { + // Construct an instance of the RoutingTableIdentityByID model + routingTableIdentityModel := new(vpcv1.RoutingTableIdentityByCRN) + routingTableIdentityModel.CRN = &routingTableCrn + + // Construct an instance of the ReplaceSubnetRoutingTableOptions model + replaceSubnetRoutingTableOptionsModel.RoutingTableIdentity = routingTableIdentityModel + } + resultRT, response, err := sess.ReplaceSubnetRoutingTableWithContext(context, replaceSubnetRoutingTableOptionsModel) if err != nil { @@ -175,6 +222,7 @@ func resourceIBMISSubnetRoutingTableAttachmentRead(context context.Context, d *s d.Set(isRoutingTableName, *subRT.Name) d.Set(isSubnetID, id) d.Set(isRoutingTableID, *subRT.ID) + d.Set(isRoutingTableCrn, *subRT.CRN) d.Set(isRoutingTableResourceType, *subRT.ResourceType) d.Set(rtRouteDirectLinkIngress, *subRT.RouteDirectLinkIngress) d.Set(rtIsDefault, *subRT.IsDefault) @@ -200,6 +248,14 @@ func resourceIBMISSubnetRoutingTableAttachmentRead(context context.Context, d *s routes = append(routes, route) } d.Set(rtRoutes, routes) + + resourceGroupList := []map[string]interface{}{} + if subRT.ResourceGroup != nil { + resourceGroupMap := routingTableResourceGroupToMap(*subRT.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + } + d.Set(rtResourceGroup, resourceGroupList) + return nil } @@ -233,6 +289,30 @@ func resourceIBMISSubnetRoutingTableAttachmentUpdate(context context.Context, d return resourceIBMISSubnetRoutingTableAttachmentRead(context, d, meta) } + if d.HasChange(isRoutingTableCrn) { + subnet := d.Get(isSubnetID).(string) + routingTableCrn := d.Get(isRoutingTableCrn).(string) + + // Construct an instance of the RoutingTableIdentityByID model + routingTableIdentityModel := new(vpcv1.RoutingTableIdentityByCRN) + routingTableIdentityModel.CRN = &routingTableCrn + + // Construct an instance of the ReplaceSubnetRoutingTableOptions model + replaceSubnetRoutingTableOptionsModel := new(vpcv1.ReplaceSubnetRoutingTableOptions) + replaceSubnetRoutingTableOptionsModel.ID = &subnet + replaceSubnetRoutingTableOptionsModel.RoutingTableIdentity = routingTableIdentityModel + resultRT, response, err := sess.ReplaceSubnetRoutingTableWithContext(context, replaceSubnetRoutingTableOptionsModel) + + if err != nil { + log.Printf("[DEBUG] Error while attaching a routing table to a subnet %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("[ERROR] Error while attaching a routing table to a subnet %s\n%s", err, response)) + } + log.Printf("[INFO] Updated subnet %s with Routing Table Crn : %s", subnet, *resultRT.CRN) + + d.SetId(subnet) + return resourceIBMISSubnetRoutingTableAttachmentRead(context, d, meta) + } + return resourceIBMISSubnetRoutingTableAttachmentRead(context, d, meta) } diff --git a/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment_test.go b/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment_test.go index 69717214ff..87b78d08cc 100644 --- a/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment_test.go +++ b/ibm/service/vpc/resource_ibm_is_subnet_routing_table_attachment_test.go @@ -43,6 +43,32 @@ func TestAccIBMISSubnetRoutingTableAttachment_basic(t *testing.T) { }) } +func TestAccIBMISSubnetRoutingTableAttachment_CRNSupport(t *testing.T) { + var subnetRT string + rtname := fmt.Sprintf("tfrt-%d", acctest.RandIntRange(10, 100)) + vpcname := fmt.Sprintf("tfvpc-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tfsubnet-vpc-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: checkSubnetRoutingTableAttachmentDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMISSubnetRoutingTableAttachmentCRNConfig(rtname, subnetname, vpcname, acc.ISZoneName), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISSubnetRoutingTableAttachmentExists("ibm_is_subnet_routing_table_attachment.attach", subnetRT), + resource.TestCheckResourceAttrSet( + "ibm_is_subnet_routing_table_attachment.attach", "lifecycle_state"), + resource.TestCheckResourceAttrSet( + "ibm_is_subnet_routing_table_attachment.attach", "crn"), + resource.TestCheckResourceAttrSet( + "ibm_is_subnet_routing_table_attachment.attach", "id"), + ), + }, + }, + }) +} + func checkSubnetRoutingTableAttachmentDestroy(s *terraform.State) error { sess, _ := acc.TestAccProvider.Meta().(conns.ClientSession).VpcV1API() @@ -114,3 +140,29 @@ func testAccCheckIBMISSubnetRoutingTableAttachmentConfig(rtname, subnetname, vpc `, vpcname, rtname, subnetname, zone) } + +func testAccCheckIBMISSubnetRoutingTableAttachmentCRNConfig(rtname, subnetname, vpcname, zone string) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + resource "ibm_is_vpc_routing_table" "testacc_vpc_routing_table" { + vpc = ibm_is_vpc.testacc_vpc.id + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + total_ipv4_address_count = 16 + } + + resource "ibm_is_subnet_routing_table_attachment" "attach" { + depends_on = [ibm_is_vpc_routing_table.testacc_vpc_routing_table, ibm_is_subnet.testacc_subnet] + subnet = ibm_is_subnet.testacc_subnet.id + routing_table = ibm_is_vpc_routing_table.testacc_vpc_routing_table.crn + } + + `, vpcname, rtname, subnetname, zone) +} diff --git a/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway.go b/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway.go index 0f0efd956f..e4780d96ea 100644 --- a/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway.go +++ b/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway.go @@ -33,6 +33,7 @@ const ( isVirtualEndpointGatewayIPsResourceType = "resource_type" isVirtualEndpointGatewayHealthState = "health_state" isVirtualEndpointGatewayLifecycleState = "lifecycle_state" + isVirtualEndpointGatewayLifecycleReasons = "lifecycle_reasons" isVirtualEndpointGatewayTarget = "target" isVirtualEndpointGatewayTargetName = "name" isVirtualEndpointGatewayTargetCRN = "crn" @@ -122,6 +123,32 @@ func ResourceIBMISEndpointGateway() *schema.Resource { Computed: true, Description: "Endpoint gateway lifecycle state", }, + isVirtualEndpointGatewayLifecycleReasons: { + Type: schema.TypeList, + Computed: true, + Description: "The reasons for the current lifecycle_state (if any).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the reason for this lifecycle state.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the reason for this lifecycle state.", + }, + + "more_info": { + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about the reason for this lifecycle state.", + }, + }, + }, + }, isVirtualEndpointGatewaySecurityGroups: { Type: schema.TypeSet, Computed: true, @@ -179,6 +206,7 @@ func ResourceIBMISEndpointGateway() *schema.Resource { isVirtualEndpointGatewayTargetName: { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, AtLeastOneOf: []string{ targetNameFmt, @@ -263,7 +291,7 @@ func ResourceIBMISEndpointGatewayValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, Type: validate.TypeString, Required: true, - AllowedValues: "provider_cloud_service, provider_infrastructure_service"}) + AllowedValues: "provider_cloud_service, provider_infrastructure_service, private_path_service_gateway"}) validateSchema = append(validateSchema, validate.ValidateSchema{ @@ -354,9 +382,16 @@ func resourceIBMisVirtualEndpointGatewayCreate(d *schema.ResourceData, meta inte d.SetId(*endpointGateway.ID) - _, err = isWaitForVirtualEndpointGatewayAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return err + if d.Get(targetResourceTypeFmt).(string) == "private_path_service_gateway" { + _, err = isWaitForVirtualEndpointGatewayForPPSGAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } else { + _, err = isWaitForVirtualEndpointGatewayAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } } v := os.Getenv("IC_ENV_TAGS") if _, ok := d.GetOk(isVirtualEndpointGatewayTags); ok || v != "" { @@ -385,6 +420,7 @@ func resourceIBMisVirtualEndpointGatewayUpdate(d *schema.ResourceData, meta inte if err != nil { return err } + // create option endpointGatewayPatchModel := new(vpcv1.EndpointGatewayPatch) if d.HasChange(isVirtualEndpointGatewayName) { @@ -498,6 +534,9 @@ func resourceIBMisVirtualEndpointGatewayRead(d *schema.ResourceData, meta interf d.Set(isVirtualEndpointGatewayHealthState, endpointGateway.HealthState) d.Set(isVirtualEndpointGatewayCreatedAt, endpointGateway.CreatedAt.String()) d.Set(isVirtualEndpointGatewayLifecycleState, endpointGateway.LifecycleState) + if err := d.Set(isVirtualEndpointGatewayLifecycleReasons, resourceEGWFlattenLifecycleReasons(endpointGateway.LifecycleReasons)); err != nil { + return fmt.Errorf("[ERROR] Error setting lifecycle_reasons: %s", err) + } d.Set(isVirtualEndpointGatewayAllowDnsResolutionBinding, endpointGateway.AllowDnsResolutionBinding) d.Set(isVirtualEndpointGatewayResourceType, endpointGateway.ResourceType) d.Set(isVirtualEndpointGatewayCRN, endpointGateway.CRN) @@ -505,9 +544,11 @@ func resourceIBMisVirtualEndpointGatewayRead(d *schema.ResourceData, meta interf d.Set(isVirtualEndpointGatewayResourceGroupID, endpointGateway.ResourceGroup.ID) d.Set(isVirtualEndpointGatewayTarget, flattenEndpointGatewayTarget(endpointGateway.Target.(*vpcv1.EndpointGatewayTarget))) + serviceEndpoints := []string{} if len(endpointGateway.ServiceEndpoints) > 0 { - d.Set(isVirtualEndpointGatewayServiceEndpoints, endpointGateway.ServiceEndpoints) + serviceEndpoints = endpointGateway.ServiceEndpoints } + d.Set(isVirtualEndpointGatewayServiceEndpoints, serviceEndpoints) d.Set(isVirtualEndpointGatewayVpcID, endpointGateway.VPC.ID) if endpointGateway.SecurityGroups != nil { d.Set(isVirtualEndpointGatewaySecurityGroups, flattenDataSourceSecurityGroups(endpointGateway.SecurityGroups)) @@ -555,6 +596,22 @@ func isWaitForVirtualEndpointGatewayAvailable(sess *vpcv1.VpcV1, endPointGateway return stateConf.WaitForState() } +func isWaitForVirtualEndpointGatewayForPPSGAvailable(sess *vpcv1.VpcV1, endPointGatewayId string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for virtual endpoint gateway (%s) to be available.", endPointGatewayId) + // When the target is PPSG, pending is a valid state when the endpoint gateway binding is not permitted within the terraform configuration. + stateConf := &resource.StateChangeConf{ + Pending: []string{"waiting", "updating"}, + Target: []string{"stable", "failed", "pending", ""}, + Refresh: isVirtualEndpointGatewayRefreshFunc(sess, endPointGatewayId), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + ContinuousTargetOccurence: 6, + } + + return stateConf.WaitForState() +} + func isVirtualEndpointGatewayRefreshFunc(sess *vpcv1.VpcV1, endPointGatewayId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { @@ -584,9 +641,42 @@ func resourceIBMisVirtualEndpointGatewayDelete(d *schema.ResourceData, meta inte log.Printf("Delete Endpoint Gateway failed: %v", response) return fmt.Errorf("Delete Endpoint Gateway failed : %s\n%s", err, response) } + _, err = isWaitForEGWDelete(sess, d, d.Id()) + if err != nil { + return err + } return nil } +func isWaitForEGWDelete(vpcClient *vpcv1.VpcV1, d *schema.ResourceData, id string) (interface{}, error) { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "stable"}, + Target: []string{"done", ""}, + Refresh: func() (interface{}, string, error) { + getegwoptions := &vpcv1.GetEndpointGatewayOptions{ + ID: &id, + } + egw, response, err := vpcClient.GetEndpointGateway(getegwoptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + return egw, "done", nil + } + return nil, "", fmt.Errorf("[ERROR] Error Getting EGW: %s\n%s", err, response) + } + if *egw.LifecycleState == "failed" { + return egw, *egw.LifecycleState, fmt.Errorf("[ERROR] The egw %s failed to delete: %v", d.Id(), err) + } + return egw, "deleting", nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + func resourceIBMisVirtualEndpointGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) { sess, err := vpcClient(meta) if err != nil { @@ -667,3 +757,19 @@ func flattenEndpointGatewayTarget(target *vpcv1.EndpointGatewayTarget) interface targetSlice = append(targetSlice, targetOutput) return targetSlice } + +func resourceEGWFlattenLifecycleReasons(lifecycleReasons []vpcv1.EndpointGatewayLifecycleReason) (lifecycleReasonsList []map[string]interface{}) { + lifecycleReasonsList = make([]map[string]interface{}, 0) + for _, lr := range lifecycleReasons { + currentLR := map[string]interface{}{} + if lr.Code != nil && lr.Message != nil { + currentLR[isInstanceLifecycleReasonsCode] = *lr.Code + currentLR[isInstanceLifecycleReasonsMessage] = *lr.Message + if lr.MoreInfo != nil { + currentLR[isInstanceLifecycleReasonsMoreInfo] = *lr.MoreInfo + } + lifecycleReasonsList = append(lifecycleReasonsList, currentLR) + } + } + return lifecycleReasonsList +} diff --git a/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway_test.go b/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway_test.go index 581dee47df..278f10bbad 100644 --- a/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway_test.go +++ b/ibm/service/vpc/resource_ibm_is_virtual_endpoint_gateway_test.go @@ -36,6 +36,32 @@ func TestAccIBMISVirtualEndpointGateway_Basic(t *testing.T) { }) } +func TestAccIBMISVirtualEndpointGateway_PPSG(t *testing.T) { + var endpointGateway string + accessPolicy := "deny" + vpcname := fmt.Sprintf("tflb-vpc-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tflb-subnet-name-%d", acctest.RandIntRange(10, 100)) + lbname := fmt.Sprintf("tf-test-lb%dd", acctest.RandIntRange(10, 100)) + name1 := fmt.Sprintf("tf-test-ppsg%d", acctest.RandIntRange(10, 100)) + name := "ibm_is_virtual_endpoint_gateway.endpoint_gateway" + // targetName := fmt.Sprintf("tf-egw-target%d", acctest.RandIntRange(10, 100)) + egwName := fmt.Sprintf("tf-egw%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckisVirtualEndpointGatewayConfigPPSG(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name1, egwName), + Check: resource.ComposeTestCheckFunc( + testAccCheckisVirtualEndpointGatewayExists(name, &endpointGateway), + resource.TestCheckResourceAttr(name, "name", egwName), + resource.TestCheckResourceAttr(name, "target.0.name", name1), + resource.TestCheckResourceAttr(name, "target.0.resource_type", "private_path_service_gateway"), + ), + }, + }, + }) +} func TestAccIBMISVirtualEndpointGateway_AllowDnsResolutionBinding(t *testing.T) { var endpointGateway string vpcname1 := fmt.Sprintf("tfvpngw-vpc-%d", acctest.RandIntRange(10, 100)) @@ -316,6 +342,21 @@ func testAccCheckisVirtualEndpointGatewayConfigBasic(vpcname1, subnetname1, name }`, vpcname1, subnetname1, acc.ISZoneName, acc.ISCIDR, name1) } +func testAccCheckisVirtualEndpointGatewayConfigPPSG(vpcname, subnetname, zone, cidr, lbname, accessPolicy, name, egwName string) string { + return testAccCheckIBMIsPrivatePathServiceGatewayConfigBasic(vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, lbname, accessPolicy, name) + fmt.Sprintf(` + data "ibm_resource_group" "test_acc" { + is_default=true + } + resource "ibm_is_virtual_endpoint_gateway" "endpoint_gateway" { + name = "%s" + target { + crn = ibm_is_private_path_service_gateway.is_private_path_service_gateway.crn + resource_type = "private_path_service_gateway" + } + vpc = ibm_is_vpc.testacc_vpc.id + resource_group = data.ibm_resource_group.test_acc.id + }`, egwName) +} func testAccCheckisVirtualEndpointGatewayConfigAllowDnsResolutionBinding(vpcname1, name1 string, enable_hub, allowDnsResolutionBinding bool) string { return fmt.Sprintf(` resource "ibm_is_vpc" "testacc_vpc" { diff --git a/ibm/service/vpc/resource_ibm_is_volume.go b/ibm/service/vpc/resource_ibm_is_volume.go index 0fcf79e00a..210437396b 100644 --- a/ibm/service/vpc/resource_ibm_is_volume.go +++ b/ibm/service/vpc/resource_ibm_is_volume.go @@ -124,12 +124,12 @@ func ResourceIBMISVolume() *schema.Resource { }, isVolumeCapacity: { - Type: schema.TypeInt, - Optional: true, - ForceNew: false, - Computed: true, - ValidateFunc: validate.InvokeValidator("ibm_is_volume", isVolumeCapacity), - Description: "Volume capacity value", + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Computed: true, + // ValidateFunc: validate.InvokeValidator("ibm_is_volume", isVolumeCapacity), + Description: "Volume capacity value", }, isVolumeSourceSnapshot: { Type: schema.TypeString, @@ -156,11 +156,11 @@ func ResourceIBMISVolume() *schema.Resource { Description: "Resource group name", }, isVolumeIops: { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validate.InvokeValidator("ibm_is_volume", isVolumeIops), - Description: "IOPS value for the Volume", + Type: schema.TypeInt, + Optional: true, + Computed: true, + // ValidateFunc: validate.InvokeValidator("ibm_is_volume", isVolumeIops), + Description: "IOPS value for the Volume", }, isVolumeCrn: { Type: schema.TypeString, @@ -231,6 +231,19 @@ func ResourceIBMISVolume() *schema.Resource { }, }, }, + // defined_performance changes + "adjustable_capacity_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable capacity for this volume.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "adjustable_iops_states": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The attachment states that support adjustable IOPS for this volume.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, isVolumeHealthReasons: { Type: schema.TypeList, Computed: true, @@ -410,22 +423,20 @@ func ResourceIBMISVolumeValidator() *validate.ResourceValidator { ValidateFunctionIdentifier: validate.ValidateAllowedStringValue, Type: validate.TypeString, Optional: true, - AllowedValues: "general-purpose, 5iops-tier, 10iops-tier, custom", + AllowedValues: "general-purpose, 5iops-tier, 10iops-tier, custom, sdp", }) validateSchema = append(validateSchema, validate.ValidateSchema{ Identifier: isVolumeCapacity, ValidateFunctionIdentifier: validate.IntBetween, Type: validate.TypeInt, - MinValue: "10", - MaxValue: "16000"}) + MinValue: "10"}) validateSchema = append(validateSchema, validate.ValidateSchema{ Identifier: isVolumeIops, ValidateFunctionIdentifier: validate.IntBetween, Type: validate.TypeInt, - MinValue: "100", - MaxValue: "48000"}) + MinValue: "100"}) validateSchema = append(validateSchema, validate.ValidateSchema{ Identifier: "accesstag", @@ -716,6 +727,16 @@ func volGet(d *schema.ResourceData, meta interface{}, id string) error { if err != nil { return err } + // defined_performance changes + + if err = d.Set("adjustable_capacity_states", vol.AdjustableCapacityStates); err != nil { + err = fmt.Errorf("Error setting adjustable_capacity_states: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_is_volume", "read", "set-adjustable_capacity_states") + } + if err = d.Set("adjustable_iops_states", vol.AdjustableIopsStates); err != nil { + err = fmt.Errorf("Error setting adjustable_iops_states: %s", err) + return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_is_volume", "read", "set-adjustable_iops_states") + } d.Set(flex.ResourceControllerURL, controller+"/vpc-ext/storage/storageVolumes") d.Set(flex.ResourceName, *vol.Name) d.Set(flex.ResourceCRN, *vol.CRN) @@ -786,7 +807,7 @@ func volUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasNam optionsget := &vpcv1.GetVolumeOptions{ ID: &id, } - _, response, err := sess.GetVolume(optionsget) + oldVol, response, err := sess.GetVolume(optionsget) if err != nil { if response != nil && response.StatusCode == 404 { d.SetId("") @@ -809,15 +830,39 @@ func volUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasNam return fmt.Errorf("[ERROR] Error calling asPatch for volumeNamePatch: %s", err) } options.VolumePatch = volumeNamePatch - _, _, err = sess.UpdateVolume(options) + _, response, err = sess.UpdateVolume(options) + if err != nil { + return err + } _, err = isWaitForVolumeAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) if err != nil { return err } + eTag = response.Headers.Get("ETag") + options.IfMatch = &eTag } // profile/ iops update - if d.HasChange(isVolumeProfileName) || d.HasChange(isVolumeIops) { + if !d.HasChange(isVolumeProfileName) && *oldVol.Profile.Name == "sdp" && d.HasChange(isVolumeIops) { + volumeProfilePatchModel := &vpcv1.VolumePatch{} + iops := int64(d.Get(isVolumeIops).(int)) + volumeProfilePatchModel.Iops = &iops + volumeProfilePatch, err := volumeProfilePatchModel.AsPatch() + if err != nil { + return fmt.Errorf("[ERROR] Error calling asPatch for VolumeProfilePatch for sdp profiles : %s", err) + } + options.VolumePatch = volumeProfilePatch + _, response, err = sess.UpdateVolume(options) + if err != nil { + return err + } + _, err = isWaitForVolumeAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + eTag = response.Headers.Get("ETag") + options.IfMatch = &eTag + } else if d.HasChange(isVolumeProfileName) || d.HasChange(isVolumeIops) { volumeProfilePatchModel := &vpcv1.VolumePatch{} volId := d.Id() getvoloptions := &vpcv1.GetVolumeOptions{ @@ -874,6 +919,11 @@ func volUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasNam } options.VolumePatch = volumeProfilePatch _, response, err = sess.UpdateVolume(options) + if err != nil { + return err + } + eTag = response.Headers.Get("ETag") + options.IfMatch = &eTag _, err = isWaitForVolumeAvailable(sess, d.Id(), d.Timeout(schema.TimeoutCreate)) if err != nil { return err @@ -894,32 +944,37 @@ func volUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasNam } return fmt.Errorf("[ERROR] Error Getting Volume (%s): %s\n%s", id, err, response) } - if vol.VolumeAttachments == nil || len(vol.VolumeAttachments) == 0 || *vol.VolumeAttachments[0].ID == "" { - return fmt.Errorf("[ERROR] Error volume capacity can't be updated since volume %s is not attached to any instance for VolumePatch", id) - } - insId := vol.VolumeAttachments[0].Instance.ID - getinsOptions := &vpcv1.GetInstanceOptions{ - ID: insId, - } - instance, response, err := sess.GetInstance(getinsOptions) - if err != nil || instance == nil { - return fmt.Errorf("[ERROR] Error retrieving Instance (%s) : %s\n%s", *insId, err, response) - } - if instance != nil && *instance.Status != "running" { - actiontype := "start" - createinsactoptions := &vpcv1.CreateInstanceActionOptions{ - InstanceID: insId, - Type: &actiontype, + eTag = response.Headers.Get("ETag") + options.IfMatch = &eTag + if *vol.Profile.Name != "sdp" { + if vol.VolumeAttachments == nil || len(vol.VolumeAttachments) == 0 || *vol.VolumeAttachments[0].ID == "" { + return fmt.Errorf("[ERROR] Error volume capacity can't be updated since volume %s is not attached to any instance for VolumePatch", id) } - _, response, err = sess.CreateInstanceAction(createinsactoptions) - if err != nil { - return fmt.Errorf("[ERROR] Error starting Instance (%s) : %s\n%s", *insId, err, response) + insId := vol.VolumeAttachments[0].Instance.ID + getinsOptions := &vpcv1.GetInstanceOptions{ + ID: insId, } - _, err = isWaitForInstanceAvailable(sess, *insId, d.Timeout(schema.TimeoutCreate), d) - if err != nil { - return err + instance, response, err := sess.GetInstance(getinsOptions) + if err != nil || instance == nil { + return fmt.Errorf("[ERROR] Error retrieving Instance (%s) : %s\n%s", *insId, err, response) + } + if instance != nil && *instance.Status != "running" { + actiontype := "start" + createinsactoptions := &vpcv1.CreateInstanceActionOptions{ + InstanceID: insId, + Type: &actiontype, + } + _, response, err = sess.CreateInstanceAction(createinsactoptions) + if err != nil { + return fmt.Errorf("[ERROR] Error starting Instance (%s) : %s\n%s", *insId, err, response) + } + _, err = isWaitForInstanceAvailable(sess, *insId, d.Timeout(schema.TimeoutCreate), d) + if err != nil { + return err + } } } + capacity = int64(d.Get(isVolumeCapacity).(int)) volumeCapacityPatchModel := &vpcv1.VolumePatch{} volumeCapacityPatchModel.Capacity = &capacity diff --git a/ibm/service/vpc/resource_ibm_is_volume_test.go b/ibm/service/vpc/resource_ibm_is_volume_test.go index da6f147ca1..df86461ad7 100644 --- a/ibm/service/vpc/resource_ibm_is_volume_test.go +++ b/ibm/service/vpc/resource_ibm_is_volume_test.go @@ -48,6 +48,106 @@ func TestAccIBMISVolume_basic(t *testing.T) { }, }) } +func TestAccIBMISVolume_sdp(t *testing.T) { + var vol string + name := fmt.Sprintf("tf-vol-%d", acctest.RandIntRange(10, 100)) + name1 := fmt.Sprintf("tf-vol-upd-%d", acctest.RandIntRange(10, 100)) + capacity1 := 16000 + capacity2 := 32000 + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVolumeSdpConfig(name, capacity1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVolumeExists("ibm_is_volume.storage", vol), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "capacity", fmt.Sprintf("%d", capacity1)), + ), + }, + + { + Config: testAccCheckIBMISVolumeSdpConfig(name1, capacity2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVolumeExists("ibm_is_volume.storage", vol), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "name", name1), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "capacity", fmt.Sprintf("%d", capacity2)), + ), + }, + }, + }) +} +func TestAccIBMISVolume_sdpUpdate(t *testing.T) { + var vol string + name := fmt.Sprintf("tf-vol-%d", acctest.RandIntRange(10, 100)) + name1 := fmt.Sprintf("tf-vol-upd-%d", acctest.RandIntRange(10, 100)) + capacity1 := 16000 + capacity2 := 32000 + iops1 := 10000 + iops2 := 28000 + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVolumeSdpUpdateConfig(name, iops1, capacity1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVolumeExists("ibm_is_volume.storage", vol), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "capacity", fmt.Sprintf("%d", capacity1)), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "iops", fmt.Sprintf("%d", iops1)), + ), + }, + { + Config: testAccCheckIBMISVolumeSdpUpdateConfig(name1, iops1, capacity1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVolumeExists("ibm_is_volume.storage", vol), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "name", name1), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "capacity", fmt.Sprintf("%d", capacity1)), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "iops", fmt.Sprintf("%d", iops1)), + ), + }, + { + Config: testAccCheckIBMISVolumeSdpUpdateConfig(name1, iops1, capacity2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVolumeExists("ibm_is_volume.storage", vol), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "name", name1), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "capacity", fmt.Sprintf("%d", capacity2)), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "iops", fmt.Sprintf("%d", iops1)), + ), + }, + + { + Config: testAccCheckIBMISVolumeSdpUpdateConfig(name1, iops2, capacity2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVolumeExists("ibm_is_volume.storage", vol), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "name", name1), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "capacity", fmt.Sprintf("%d", capacity2)), + resource.TestCheckResourceAttr( + "ibm_is_volume.storage", "iops", fmt.Sprintf("%d", iops2)), + ), + }, + }, + }) +} func TestAccIBMISVolume_snapshot(t *testing.T) { var vol string @@ -373,15 +473,41 @@ func testAccCheckIBMISVolumeConfig(name string) string { return fmt.Sprintf( ` resource "ibm_is_volume" "storage"{ - name = "%s" - profile = "10iops-tier" - zone = "us-south-1" + name = "%s" + profile = "10iops-tier" + zone = "us-south-1" # capacity= 200 } `, name) } +func testAccCheckIBMISVolumeSdpConfig(name string, capacity int) string { + return fmt.Sprintf( + ` + resource "ibm_is_volume" "storage"{ + name = "%s" + profile = "sdp" + zone = "eu-gb-1" + capacity = %d + } +`, name, capacity) + +} +func testAccCheckIBMISVolumeSdpUpdateConfig(name string, iops, capacity int) string { + return fmt.Sprintf( + ` + resource "ibm_is_volume" "storage"{ + name = "%s" + profile = "sdp" + iops = %d + zone = "eu-gb-1" + capacity = %d + } +`, name, iops, capacity) + +} + func testAccCheckIBMISVolumeCustomConfig(vpcname, subnetname, sshname, publicKey, name, volName string, iops int64) string { return fmt.Sprintf( ` diff --git a/ibm/service/vpc/resource_ibm_is_vpc.go b/ibm/service/vpc/resource_ibm_is_vpc.go index 9123b356f1..27f9153c97 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc.go +++ b/ibm/service/vpc/resource_ibm_is_vpc.go @@ -34,6 +34,7 @@ const ( isVPCDefaultSecurityGroupName = "default_security_group_name" isVPCDefaultSecurityGroupCRN = "default_security_group_crn" isVPCDefaultRoutingTableName = "default_routing_table_name" + isVPCDefaultRoutingTableCRN = "default_routing_table_crn" isVPCResourceGroup = "resource_group" isVPCStatus = "status" isVPCDeleting = "deleting" @@ -356,6 +357,12 @@ func ResourceIBMISVPC() *schema.Resource { Description: "Default routing table name", }, + isVPCDefaultRoutingTableCRN: { + Type: schema.TypeString, + Computed: true, + Description: "Default routing table CRN", + }, + isVPCResourceGroup: { Type: schema.TypeString, ForceNew: true, @@ -951,6 +958,9 @@ func vpcGet(d *schema.ResourceData, meta interface{}, id string) error { if vpc.DefaultRoutingTable != nil { d.Set(isVPCDefaultRoutingTable, *vpc.DefaultRoutingTable.ID) d.Set(isVPCDefaultRoutingTableName, *vpc.DefaultRoutingTable.Name) + if vpc.DefaultRoutingTable.CRN != nil { + d.Set(isVPCDefaultRoutingTableCRN, *vpc.DefaultRoutingTable.CRN) + } } healthReasons := []map[string]interface{}{} if vpc.HealthReasons != nil { diff --git a/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go b/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go index 1908868ee2..198627b358 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go +++ b/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go @@ -34,6 +34,10 @@ const ( rtNextHop = "next_hop" rtZone = "zone" rtOrigin = "origin" + rtResourceGroup = "resource_group" + rtResourceGroupHref = "href" + rtResourceGroupId = "id" + rtResourceGroupName = "name" ) func ResourceIBMISVPCRoutingTable() *schema.Resource { @@ -114,6 +118,11 @@ func ResourceIBMISVPCRoutingTable() *schema.Resource { Computed: true, Description: "The routing table identifier.", }, + rtCrn: { + Type: schema.TypeString, + Computed: true, + Description: "The routing table CRN.", + }, rtHref: { Type: schema.TypeString, Computed: true, @@ -157,6 +166,30 @@ func ResourceIBMISVPCRoutingTable() *schema.Resource { }, }, }, + rtResourceGroup: { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this volume.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + rtResourceGroupHref: { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + rtResourceGroupId: { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + rtResourceGroupName: { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, }, } } @@ -269,6 +302,7 @@ func resourceIBMISVPCRoutingTableRead(d *schema.ResourceData, meta interface{}) d.Set(rtVpcID, idSet[0]) d.Set(rtID, routeTable.ID) + d.Set(rtCrn, routeTable.CRN) d.Set(rtName, routeTable.Name) d.Set(rtHref, routeTable.Href) d.Set(rtLifecycleState, routeTable.LifecycleState) @@ -307,9 +341,32 @@ func resourceIBMISVPCRoutingTableRead(d *schema.ResourceData, meta interface{}) d.Set(rtSubnets, subnets) + resourceGroupList := []map[string]interface{}{} + if routeTable.ResourceGroup != nil { + resourceGroupMap := routingTableResourceGroupToMap(*routeTable.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + } + d.Set(rtResourceGroup, resourceGroupList) + return nil } +func routingTableResourceGroupToMap(resourceGroupItem vpcv1.ResourceGroupReference) (resourceGroupMap map[string]interface{}) { + resourceGroupMap = map[string]interface{}{} + + if resourceGroupItem.Href != nil { + resourceGroupMap[isVolumesResourceGroupHref] = resourceGroupItem.Href + } + if resourceGroupItem.ID != nil { + resourceGroupMap[isVolumesResourceGroupId] = resourceGroupItem.ID + } + if resourceGroupItem.Name != nil { + resourceGroupMap[isVolumesResourceGroupName] = resourceGroupItem.Name + } + + return resourceGroupMap +} + func resourceIBMISVPCRoutingTableUpdate(d *schema.ResourceData, meta interface{}) error { sess, err := vpcClient(meta) if err != nil { diff --git a/ibm/service/vpc/resource_ibm_is_vpn_gateway_connection_test.go b/ibm/service/vpc/resource_ibm_is_vpn_gateway_connection_test.go index eda71335be..85798aa4a5 100644 --- a/ibm/service/vpc/resource_ibm_is_vpn_gateway_connection_test.go +++ b/ibm/service/vpc/resource_ibm_is_vpn_gateway_connection_test.go @@ -451,6 +451,198 @@ func TestAccIBMISVPNGatewayConnection_admin_state(t *testing.T) { }, }) } + +// distribute_traffic +func TestAccIBMISVPNGatewayConnection_routeDistributeTraffic(t *testing.T) { + var VPNGatewayConnection string + vpcname1 := fmt.Sprintf("tfvpngc-vpc-%d", acctest.RandIntRange(100, 200)) + subnetname1 := fmt.Sprintf("tfvpngc-subnet-%d", acctest.RandIntRange(100, 200)) + vpnname1 := fmt.Sprintf("tfvpngc-vpn-%d", acctest.RandIntRange(100, 200)) + name1 := fmt.Sprintf("tfvpngc-createname-%d", acctest.RandIntRange(100, 200)) + + vpcname2 := fmt.Sprintf("tfvpngc-vpc-%d", acctest.RandIntRange(100, 200)) + subnetname2 := fmt.Sprintf("tfvpngc-subnet-%d", acctest.RandIntRange(100, 200)) + vpnname2 := fmt.Sprintf("tfvpngc-vpn-%d", acctest.RandIntRange(100, 200)) + name2 := fmt.Sprintf("tfvpngc-createname-%d", acctest.RandIntRange(100, 200)) + updname2 := fmt.Sprintf("tfvpngc-updatename-%d", acctest.RandIntRange(100, 200)) + dt := true + dt2 := false + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISVPNGatewayConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVPNGatewayConnectionRouteDistributeTrafficConfig(vpcname1, subnetname1, vpnname1, name1, vpcname2, subnetname2, vpnname2, name2, dt), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPNGatewayConnectionExists("ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", VPNGatewayConnection), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "name", name1), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "mode", "route"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "action"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "admin_state_up"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "authentication_mode"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "created_at"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "establish_mode"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "href"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "id"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "interval"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "preshared_key"), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "resource_type", "vpn_gateway_connection"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "status"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "distribute_traffic"), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "distribute_traffic", fmt.Sprintf("%t", dt)), + ), + }, + { + Config: testAccCheckIBMISVPNGatewayConnectionRouteDistributeTrafficUpdate(vpcname1, subnetname1, vpnname1, name1, vpcname2, subnetname2, vpnname2, updname2, dt2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPNGatewayConnectionExists("ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection2", VPNGatewayConnection), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection2", "name", updname2), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection2", "mode", "route"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "action"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "admin_state_up"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "authentication_mode"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "created_at"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "establish_mode"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "href"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "id"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "interval"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "preshared_key"), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "resource_type", "vpn_gateway_connection"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "status"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "distribute_traffic"), + resource.TestCheckResourceAttr( + "ibm_is_vpn_gateway_connection.testacc_VPNGatewayConnection1", "distribute_traffic", fmt.Sprintf("%t", dt2)), + ), + }, + }, + }) +} + +func testAccCheckIBMISVPNGatewayConnectionRouteDistributeTrafficConfig(vpc1, subnet1, vpnname1, name1, vpc2, subnet2, vpnname2, name2 string, distributeTraffic bool) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc1" { + name = "%s" + } + resource "ibm_is_subnet" "testacc_subnet1" { + name = "%s" + vpc = "${ibm_is_vpc.testacc_vpc1.id}" + zone = "%s" + ipv4_cidr_block = "%s" + } + resource "ibm_is_vpn_gateway" "testacc_VPNGateway1" { + name = "%s" + subnet = "${ibm_is_subnet.testacc_subnet1.id}" + mode = "route" + } + resource "ibm_is_vpn_gateway_connection" "testacc_VPNGatewayConnection1" { + name = "%s" + vpn_gateway = "${ibm_is_vpn_gateway.testacc_VPNGateway1.id}" + peer_address = "${ibm_is_vpn_gateway.testacc_VPNGateway1.public_ip_address}" + preshared_key = "VPNDemoPassword" + } + resource "ibm_is_vpc" "testacc_vpc2" { + name = "%s" + } + resource "ibm_is_subnet" "testacc_subnet2" { + name = "%s" + vpc = "${ibm_is_vpc.testacc_vpc2.id}" + zone = "%s" + ipv4_cidr_block = "%s" + } + resource "ibm_is_vpn_gateway" "testacc_VPNGateway2" { + name = "%s" + subnet = "${ibm_is_subnet.testacc_subnet2.id}" + mode = "route" + } + resource "ibm_is_vpn_gateway_connection" "testacc_VPNGatewayConnection2" { + name = "%s" + vpn_gateway = "${ibm_is_vpn_gateway.testacc_VPNGateway2.id}" + peer_address = "${ibm_is_vpn_gateway.testacc_VPNGateway2.public_ip_address}" + preshared_key = "VPNDemoPassword" + distribute_traffic = %t + } + `, vpc1, subnet1, acc.ISZoneName, acc.ISCIDR, vpnname1, name1, vpc2, subnet2, acc.ISZoneName, acc.ISCIDR, vpnname2, name2, distributeTraffic) + +} + +func testAccCheckIBMISVPNGatewayConnectionRouteDistributeTrafficUpdate(vpc1, subnet1, vpnname1, name1, vpc2, subnet2, vpnname2, name2 string, distributeTraffic bool) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc1" { + name = "%s" + } + resource "ibm_is_subnet" "testacc_subnet1" { + name = "%s" + vpc = "${ibm_is_vpc.testacc_vpc1.id}" + zone = "%s" + ipv4_cidr_block = "%s" + } + resource "ibm_is_vpn_gateway" "testacc_VPNGateway1" { + name = "%s" + subnet = "${ibm_is_subnet.testacc_subnet1.id}" + mode = "route" + } + resource "ibm_is_vpn_gateway_connection" "testacc_VPNGatewayConnection1" { + name = "%s" + vpn_gateway = "${ibm_is_vpn_gateway.testacc_VPNGateway1.id}" + peer_address = "${ibm_is_vpn_gateway.testacc_VPNGateway1.public_ip_address}" + preshared_key = "VPNDemoPassword" + } + resource "ibm_is_vpc" "testacc_vpc2" { + name = "%s" + } + resource "ibm_is_subnet" "testacc_subnet2" { + name = "%s" + vpc = "${ibm_is_vpc.testacc_vpc2.id}" + zone = "%s" + ipv4_cidr_block = "%s" + } + resource "ibm_is_vpn_gateway" "testacc_VPNGateway2" { + name = "%s" + subnet = "${ibm_is_subnet.testacc_subnet2.id}" + mode = "route" + } + resource "ibm_is_vpn_gateway_connection" "testacc_VPNGatewayConnection2" { + name = "%s" + vpn_gateway = "${ibm_is_vpn_gateway.testacc_VPNGateway2.id}" + peer_address = "${ibm_is_vpn_gateway.testacc_VPNGateway2.public_ip_address}" + preshared_key = "VPNDemoPassword" + distribute_traffic = %t + } + `, vpc1, subnet1, acc.ISZoneName, acc.ISCIDR, vpnname1, name1, vpc2, subnet2, acc.ISZoneName, acc.ISCIDR, vpnname2, name2, distributeTraffic) + +} + func TestAccIBMISVPNGatewayConnection_multiple(t *testing.T) { var VPNGatewayConnection string var VPNGatewayConnection2 string diff --git a/ibm/service/vpc/resource_ibm_is_vpn_gateway_connections.go b/ibm/service/vpc/resource_ibm_is_vpn_gateway_connections.go index 5fd2ffa895..32427bb008 100644 --- a/ibm/service/vpc/resource_ibm_is_vpn_gateway_connections.go +++ b/ibm/service/vpc/resource_ibm_is_vpn_gateway_connections.go @@ -86,6 +86,14 @@ func ResourceIBMISVPNGatewayConnection() *schema.Resource { Deprecated: "peer_address is deprecated, use peer instead", }, + // distribute traffic + "distribute_traffic": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Indicates whether the traffic is distributed between the `up` tunnels of the VPN gateway connection when the VPC route's next hop is a VPN connection. If `false`, the traffic is only routed through the `up` tunnel with the lower `public_ip` address.", + }, + // new breaking changes "establish_mode": &schema.Schema{ Type: schema.TypeString, @@ -616,7 +624,9 @@ func vpngwconCreate(d *schema.ResourceData, meta interface{}, name, gatewayID, p } else { vpnGatewayConnectionPrototypeModel.IpsecPolicy = nil } - + if distributeTrafficOk, ok := d.GetOkExists("distribute_traffic"); ok { + vpnGatewayConnectionPrototypeModel.DistributeTraffic = core.BoolPtr(distributeTrafficOk.(bool)) + } options := &vpcv1.CreateVPNGatewayConnectionOptions{ VPNGatewayID: &gatewayID, VPNGatewayConnectionPrototype: vpnGatewayConnectionPrototypeModel, @@ -726,6 +736,11 @@ func vpngwconUpdate(d *schema.ResourceData, meta interface{}, gID, gConnID strin ID: &gConnID, } vpnGatewayConnectionPatchModel := &vpcv1.VPNGatewayConnectionPatch{} + + if d.HasChange("distribute_traffic") { + vpnGatewayConnectionPatchModel.DistributeTraffic = core.BoolPtr(d.Get("distribute_traffic").(bool)) + } + if d.HasChange(isVPNGatewayConnectionName) { name := d.Get(isVPNGatewayConnectionName).(string) vpnGatewayConnectionPatchModel.Name = &name @@ -1206,6 +1221,11 @@ func setvpnGatewayConnectionIntfResource(d *schema.ResourceData, vpn_gateway_id if err = d.Set("mode", vpnGatewayConnection.Mode); err != nil { return fmt.Errorf("[ERROR] Error setting mode: %s", err) } + if !core.IsNil(vpnGatewayConnection.DistributeTraffic) { + if err = d.Set("distribute_traffic", vpnGatewayConnection.DistributeTraffic); err != nil { + return fmt.Errorf("Error setting distribute_traffic: %s", err) + } + } if err = d.Set("name", vpnGatewayConnection.Name); err != nil { return fmt.Errorf("[ERROR] Error setting name: %s", err) } @@ -1282,7 +1302,11 @@ func setvpnGatewayConnectionIntfResource(d *schema.ResourceData, vpn_gateway_id if err = d.Set("created_at", flex.DateTimeToString(vpnGatewayConnection.CreatedAt)); err != nil { return fmt.Errorf("[ERROR] Error setting created_at: %s", err) } - + if !core.IsNil(vpnGatewayConnection.DistributeTraffic) { + if err = d.Set("distribute_traffic", vpnGatewayConnection.DistributeTraffic); err != nil { + return fmt.Errorf("Error setting distribute_traffic: %s", err) + } + } if vpnGatewayConnection.DeadPeerDetection != nil { d.Set(isVPNGatewayConnectionDeadPeerDetectionAction, vpnGatewayConnection.DeadPeerDetection.Action) d.Set(isVPNGatewayConnectionDeadPeerDetectionInterval, vpnGatewayConnection.DeadPeerDetection.Interval) @@ -1378,7 +1402,11 @@ func setvpnGatewayConnectionIntfResource(d *schema.ResourceData, vpn_gateway_id if err = d.Set("created_at", flex.DateTimeToString(vpnGatewayConnection.CreatedAt)); err != nil { return fmt.Errorf("[ERROR] Error setting created_at: %s", err) } - + if !core.IsNil(vpnGatewayConnection.DistributeTraffic) { + if err = d.Set("distribute_traffic", vpnGatewayConnection.DistributeTraffic); err != nil { + return fmt.Errorf("Error setting distribute_traffic: %s", err) + } + } if vpnGatewayConnection.DeadPeerDetection != nil { d.Set(isVPNGatewayConnectionDeadPeerDetectionAction, vpnGatewayConnection.DeadPeerDetection.Action) d.Set(isVPNGatewayConnectionDeadPeerDetectionInterval, vpnGatewayConnection.DeadPeerDetection.Interval) diff --git a/version/version.go b/version/version.go index 3f91a3b5bb..87c1823393 100644 --- a/version/version.go +++ b/version/version.go @@ -5,7 +5,7 @@ import ( ) // Version is the current provider main version -const Version = "1.70.0-beta0" +const Version = "1.71.0-beta0" // GitCommit is the git commit that was compiled. This will be filled in by the compiler. var GitCommit string diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index 09d6aa3a2a..74a627221c 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -9,6 +9,7 @@ Cloud Foundry Cloudant Databases Code Engine Container Registry +Configuration Aggregator Context Based Restrictions CD Tekton Pipeline Direct Link Gateway diff --git a/website/docs/d/cd_tekton_pipeline.html.markdown b/website/docs/d/cd_tekton_pipeline.html.markdown index 7d6b0f15bc..69624caef9 100644 --- a/website/docs/d/cd_tekton_pipeline.html.markdown +++ b/website/docs/d/cd_tekton_pipeline.html.markdown @@ -8,7 +8,7 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline -Provides a read-only data source for cd_tekton_pipeline. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. +Provides a read-only data source to retrieve information about a cd_tekton_pipeline. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. ## Example Usage @@ -20,21 +20,19 @@ data "ibm_cd_tekton_pipeline" "cd_tekton_pipeline" { ## Argument Reference -Review the argument reference that you can specify for your data source. +You can specify the following arguments for this data source. * `pipeline_id` - (Required, Forces new resource, String) ID of current instance. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your data source is created. +After your data source is created, you can read values from the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline. * `build_number` - (Integer) The latest pipeline run build number. If this property is absent, the pipeline hasn't had any pipeline runs. * Constraints: The minimum value is `1`. - * `created_at` - (String) Standard RFC 3339 Date Time String. - * `definitions` - (List) Definition list. * Constraints: The maximum length is `128` items. The minimum length is `0` items. Nested schema for **definitions**: @@ -60,23 +58,16 @@ Nested schema for **definitions**: * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. * `type` - (String) The only supported source type is "git", indicating that the source is a git repository. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^git$/`. - * `enable_notifications` - (Boolean) Flag to enable notifications for this pipeline. If enabled, the Tekton pipeline run events will be published to all the destinations specified by the Slack and Event Notifications integrations in the parent toolchain. If omitted, this feature is disabled by default. - * `enable_partial_cloning` - (Boolean) Flag to enable partial cloning for this pipeline. When partial clone is enabled, only the files contained within the paths specified in definition repositories are read and cloned, this means that symbolic links might not work. If omitted, this feature is disabled by default. - * `enabled` - (Boolean) Flag to check if the trigger is enabled. * Constraints: The default value is `true`. - * `href` - (String) API URL for interacting with the pipeline. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. - * `name` - (String) String. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9][-0-9a-zA-Z_. ]{1,251}[a-zA-Z0-9]$/`. - * `next_build_number` - (Integer) The build number that will be used for the next pipeline run. * Constraints: The maximum value is `99999999999999`. The minimum value is `1`. - * `properties` - (List) Tekton pipeline's environment properties. * Constraints: The maximum length is `1024` items. The minimum length is `0` items. Nested schema for **properties**: @@ -99,16 +90,14 @@ Nested schema for **resource_group**: * Constraints: The maximum length is `64` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_]+$/`. * `runs_url` - (String) URL for this pipeline showing the list of pipeline runs. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. - * `status` - (String) Pipeline status. * Constraints: Allowable values are: `configured`, `configuring`. * `toolchain` - (List) Toolchain object containing references to the parent toolchain. Nested schema for **toolchain**: * `crn` - (String) The CRN for the toolchain that contains the Tekton pipeline. * Constraints: The maximum length is `512` characters. The minimum length is `9` characters. The value must match regular expression `/^crn:v[0-9](:([A-Za-z0-9-._~!$&'()*+,;=@\/]|%[0-9A-Z]{2})*){8}$/`. - * `id` - (String) UUID. + * `id` - (String) Universally Unique Identifier. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. - * `triggers` - (List) Tekton pipeline triggers list. * Constraints: The maximum length is `1024` items. The minimum length is `0` items. Nested schema for **triggers**: @@ -196,7 +185,6 @@ Nested schema for **triggers**: * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_. \\(\\)\\[\\]]{1,253}$/`. * `type` - (String) Type of the worker. Computed based on the worker ID. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. - * `updated_at` - (String) Standard RFC 3339 Date Time String. * `worker` - (List) Details of the worker used to run the pipeline. Nested schema for **worker**: diff --git a/website/docs/d/cd_tekton_pipeline_definition.html.markdown b/website/docs/d/cd_tekton_pipeline_definition.html.markdown index 1348def3eb..f375310174 100644 --- a/website/docs/d/cd_tekton_pipeline_definition.html.markdown +++ b/website/docs/d/cd_tekton_pipeline_definition.html.markdown @@ -8,20 +8,20 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_definition -Provides a read-only data source for cd_tekton_pipeline_definition. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. +Provides a read-only data source to retrieve information about a cd_tekton_pipeline_definition. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. ## Example Usage ```hcl data "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition" { - definition_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition.definition_id - pipeline_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition.pipeline_id + definition_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance.definition_id + pipeline_id = ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition_instance.pipeline_id } ``` ## Argument Reference -Review the argument reference that you can specify for your data source. +You can specify the following arguments for this data source. * `definition_id` - (Required, Forces new resource, String) The definition ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -30,7 +30,7 @@ Review the argument reference that you can specify for your data source. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your data source is created. +After your data source is created, you can read values from the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_definition. * `href` - (String) API URL for interacting with the definition. diff --git a/website/docs/d/cd_tekton_pipeline_property.html.markdown b/website/docs/d/cd_tekton_pipeline_property.html.markdown index 2316b7e8af..d835cf35a8 100644 --- a/website/docs/d/cd_tekton_pipeline_property.html.markdown +++ b/website/docs/d/cd_tekton_pipeline_property.html.markdown @@ -8,20 +8,20 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_property -Provides a read-only data source for cd_tekton_pipeline_property. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. +Provides a read-only data source to retrieve information about a cd_tekton_pipeline_property. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. ## Example Usage ```hcl data "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property" { - pipeline_id = ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property.pipeline_id + pipeline_id = ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property_instance.pipeline_id property_name = "debug-pipeline" } ``` ## Argument Reference -Review the argument reference that you can specify for your data source. +You can specify the following arguments for this data source. * `pipeline_id` - (Required, Forces new resource, String) The Tekton pipeline ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -30,26 +30,20 @@ Review the argument reference that you can specify for your data source. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your data source is created. +After your data source is created, you can read values from the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_property. * `enum` - (List) Options for `single_select` property type. Only needed when using `single_select` property type. * Constraints: The list items must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. The maximum length is `256` items. The minimum length is `0` items. - * `href` - (String) API URL for interacting with the property. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. - * `locked` - (Boolean) When true, this property cannot be overridden by a trigger property or at runtime. Attempting to override it will result in run requests being rejected. The default is false. - * `name` - (Forces new resource, String) Property name. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. - * `path` - (String) A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used. * Constraints: The maximum length is `4096` characters. The minimum length is `0` characters. The value must match regular expression `/^[-0-9a-zA-Z_.]*$/`. - * `type` - (Forces new resource, String) Property type. * Constraints: Allowable values are: `secure`, `text`, `integration`, `single_select`, `appconfig`. - * `value` - (String) Property value. Any string value is valid. * Constraints: The maximum length is `4096` characters. The minimum length is `0` characters. The value must match regular expression `/^.*$/`. diff --git a/website/docs/d/cd_tekton_pipeline_trigger.html.markdown b/website/docs/d/cd_tekton_pipeline_trigger.html.markdown index 4ad6e433cf..00ccf5ac2f 100644 --- a/website/docs/d/cd_tekton_pipeline_trigger.html.markdown +++ b/website/docs/d/cd_tekton_pipeline_trigger.html.markdown @@ -8,20 +8,20 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_trigger -Provides a read-only data source for cd_tekton_pipeline_trigger. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. +Provides a read-only data source to retrieve information about a cd_tekton_pipeline_trigger. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. ## Example Usage ```hcl data "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger" { - pipeline_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.pipeline_id - trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger.trigger_id + pipeline_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.pipeline_id + trigger_id = ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger_instance.trigger_id } ``` ## Argument Reference -Review the argument reference that you can specify for your data source. +You can specify the following arguments for this data source. * `pipeline_id` - (Required, Forces new resource, String) The Tekton pipeline ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -30,35 +30,26 @@ Review the argument reference that you can specify for your data source. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your data source is created. +After your data source is created, you can read values from the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_trigger. * `cron` - (String) Only needed for timer triggers. CRON expression that indicates when this trigger will activate. Maximum frequency is every 5 minutes. The string is based on UNIX crontab syntax: minute, hour, day of month, month, day of week. Example: The CRON expression 0 *_/2 * * * - translates to - every 2 hours. * Constraints: The maximum length is `253` characters. The minimum length is `5` characters. The value must match regular expression `/^[-0-9a-zA-Z,\\*\/ ]{5,253}$/`. - * `enable_events_from_forks` - (Boolean) When enabled, pull request events from forks of the selected repository will trigger a pipeline run. * Constraints: The default value is `false`. - * `enabled` - (Boolean) Flag to check if the trigger is enabled. * Constraints: The default value is `true`. - * `event_listener` - (String) Event listener name. The name of the event listener to which the trigger is associated. The event listeners are defined in the definition repositories of the Tekton pipeline. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. - * `events` - (List) Either 'events' or 'filter' is required specifically for Git triggers. Stores a list of events that a Git trigger listens to. Choose one or more from 'push', 'pull_request', and 'pull_request_closed'. If SCM repositories use the 'merge request' term, they correspond to the generic term i.e. 'pull request'. * Constraints: Allowable list items are: `push`, `pull_request`, `pull_request_closed`. The maximum length is `3` items. The minimum length is `0` items. - * `favorite` - (Boolean) Mark the trigger as a favorite. * Constraints: The default value is `false`. - * `filter` - (String) Either 'events' or 'filter' can be used. Stores the CEL (Common Expression Language) expression value which is used for event filtering against the Git webhook payloads. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. - * `href` - (String) API URL for interacting with the trigger. Only included when fetching the list of pipeline triggers. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. - * `max_concurrent_runs` - (Integer) Defines the maximum number of concurrent runs for this trigger. If omitted then the concurrency limit is disabled for this trigger. - * `name` - (String) Trigger name. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^([a-zA-Z0-9]{1,2}|[a-zA-Z0-9][0-9a-zA-Z-_.: \/\\(\\)\\[\\]]{1,251}[a-zA-Z0-9])$/`. * `properties` - (List) Optional trigger properties are used to override or supplement the pipeline properties when triggering a pipeline run. @@ -108,16 +99,12 @@ Nested schema for **source**: * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. * `type` - (String) The only supported source type is "git", indicating that the source is a git repository. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^git$/`. - * `tags` - (List) Optional trigger tags array. * Constraints: The list items must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. The maximum length is `128` items. The minimum length is `0` items. - * `timezone` - (String) Only used for timer triggers. Specify the timezone used for this timer trigger, which will ensure the CRON activates this trigger relative to the specified timezone. If no timezone is specified, the default timezone used is UTC. Valid timezones are those listed in the IANA timezone database, https://www.iana.org/time-zones. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z+_., \/]{1,253}$/`. - * `type` - (String) Trigger type. * Constraints: Allowable values are: `manual`, `scm`, `timer`, `generic`. - * `webhook_url` - (String) Webhook URL that can be used to trigger pipeline runs. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. * `worker` - (List) Details of the worker used to run the trigger. diff --git a/website/docs/d/cd_tekton_pipeline_trigger_property.html.markdown b/website/docs/d/cd_tekton_pipeline_trigger_property.html.markdown index fcdfffa3e9..bda05caaaa 100644 --- a/website/docs/d/cd_tekton_pipeline_trigger_property.html.markdown +++ b/website/docs/d/cd_tekton_pipeline_trigger_property.html.markdown @@ -8,21 +8,21 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_trigger_property -Provides a read-only data source for cd_tekton_pipeline_trigger_property. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. +Provides a read-only data source to retrieve information about a cd_tekton_pipeline_trigger_property. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. ## Example Usage ```hcl data "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_property" { - pipeline_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property.pipeline_id + pipeline_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance.pipeline_id property_name = "debug-pipeline" - trigger_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property.trigger_id + trigger_id = ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property_instance.trigger_id } ``` ## Argument Reference -Review the argument reference that you can specify for your data source. +You can specify the following arguments for this data source. * `pipeline_id` - (Required, Forces new resource, String) The Tekton pipeline ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -33,26 +33,20 @@ Review the argument reference that you can specify for your data source. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your data source is created. +After your data source is created, you can read values from the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_trigger_property. * `enum` - (List) Options for `single_select` property type. Only needed for `single_select` property type. * Constraints: The list items must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. The maximum length is `256` items. The minimum length is `0` items. - * `href` - (String) API URL for interacting with the trigger property. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. - * `locked` - (Boolean) When true, this property cannot be overridden at runtime. Attempting to override it will result in run requests being rejected. The default is false. - * `name` - (Forces new resource, String) Property name. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. - * `path` - (String) A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used. * Constraints: The maximum length is `4096` characters. The minimum length is `0` characters. The value must match regular expression `/^[-0-9a-zA-Z_.]*$/`. - * `type` - (Forces new resource, String) Property type. * Constraints: Allowable values are: `secure`, `text`, `integration`, `single_select`, `appconfig`. - * `value` - (String) Property value. Any string value is valid. * Constraints: The maximum length is `4096` characters. The minimum length is `0` characters. The value must match regular expression `/^.*$/`. diff --git a/website/docs/d/config_aggregator_configurations.html.markdown b/website/docs/d/config_aggregator_configurations.html.markdown new file mode 100644 index 0000000000..6d7a898637 --- /dev/null +++ b/website/docs/d/config_aggregator_configurations.html.markdown @@ -0,0 +1,73 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_config_aggregator_configurations" +description: |- + Get information about config_aggregator_configurations +subcategory: "Configuration Aggregator" +--- + +# ibm_config_aggregator_configurations + +Provides a read-only data source to retrieve information about config_aggregator_configurations. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. + +## Example Usage + +```hcl +data "ibm_config_aggregator_configurations" "config_aggregator_configurations" { + instance_id=var.instance_id + region=var.region +} +``` + +## Argument Reference + +You can specify the following arguments for this data source. +* `instance_id` - (Required, Forces new resource, String) The GUID of the Configuration Aggregator instance. +* `region` - (Optional, Forces new resource, String) The region of the Configuration Aggregator instance. If not provided defaults to the region defined in the IBM provider configuration. +* `config_type` - (Optional, String) The type of resource configuration that are to be retrieved. + * Constraints: The maximum length is `1024` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9 ,\\-_]+$/`. +* `location` - (Optional, String) The location or region in which the resources are created. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/^$|[a-z]-[a-z]/`. +* `resource_crn` - (Optional, String) The crn of the resource. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9.\\:\/-]+$/`. +* `resource_group_id` - (Optional, String) The resource group id of the resources. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. +* `service_name` - (Optional, String) The name of the IBM Cloud service for which resources are to be retrieved. + * Constraints: The maximum length is `1024` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9 ,\\-_]+$/`. + +## Attribute Reference + +After your data source is created, you can read values from the following attributes. + +* `id` - The unique identifier of the config_aggregator_configurations. +* `configs` - (List) Array of resource configurations. + * Constraints: The maximum length is `100` items. The minimum length is `0` items. +Nested schema for **configs**: + * `about` - (Map) The basic metadata fetched from the query API. + Nested schema for **about**: + * `account_id` - (String) The account ID in which the resource exists. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + * `config_type` - (String) The type of configuration of the retrieved resource. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9.\\:\/-]+$/`. + * `last_config_refresh_time` - (String) Date/time stamp identifying when the information was last collected. Must be in the RFC 3339 format. + * `location` - (String) Location of the resource specified. + * Constraints: The maximum length is `1000` characters. The minimum length is `0` characters. The value must match regular expression `/^$|[a-z]-[a-z]/`. + * `resource_crn` - (String) The unique CRN of the IBM Cloud resource. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9.\\:\/-]+$/`. + * `resource_group_id` - (String) The account ID. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + * `resource_name` - (String) User defined name of the resource. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9.\\:\/-]+$/`. + * `service_name` - (String) The name of the service to which the resources belongs. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9.\\:\/-]+$/`. + * `tags` - (List) Tags associated with the resource. + Nested schema for **tags**: + * `tag` - (String) The name of the tag. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + * `config` - (String) The configuration of the resource. + Nested schema for **config**: +* `prev` - (List) The reference to the previous page of entries. +Nested schema for **prev**: + * `href` - (String) The reference to the previous page of entries. + * `start` - (String) the start string for the query to view the page. + diff --git a/website/docs/d/config_aggregator_resource_collection_status.html.markdown b/website/docs/d/config_aggregator_resource_collection_status.html.markdown new file mode 100644 index 0000000000..4fd1fff0cf --- /dev/null +++ b/website/docs/d/config_aggregator_resource_collection_status.html.markdown @@ -0,0 +1,32 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_config_aggregator_resource_collection_status" +description: |- + Get information about config_aggregator_resource_collection_status +subcategory: "Configuration Aggregator" +--- + +# ibm_config_aggregator_resource_collection_status + +Provides a read-only data source to retrieve information about config_aggregator_resource_collection_status. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. + +## Example Usage + +```hcl +data "ibm_config_aggregator_resource_collection_status" "config_aggregator_resource_collection_status" { + instance_id=var.instance_id + region=var.region +} +``` + + +## Attribute Reference + +After your data source is created, you can read values from the following attributes. +* `instance_id` - (Required, Forces new resource, String) The GUID of the Configuration Aggregator instance. +* `region` - (Optional, Forces new resource, String) The region of the Configuration Aggregator instance. If not provided defaults to the region defined in the IBM provider configuration. +* `id` - The unique identifier of the config_aggregator_resource_collection_status. +* `last_config_refresh_time` - (String) The timestamp at which the configuration was last refreshed. +* `status` - (String) Status of the resource collection. + * Constraints: Allowable values are: `initiated`, `inprogress`, `complete`. + diff --git a/website/docs/d/config_aggregator_settings.html.markdown b/website/docs/d/config_aggregator_settings.html.markdown new file mode 100644 index 0000000000..c242d243ff --- /dev/null +++ b/website/docs/d/config_aggregator_settings.html.markdown @@ -0,0 +1,48 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_config_aggregator_settings" +description: |- + Get information about config_aggregator_settings +subcategory: "Configuration Aggregator" +--- + +# ibm_config_aggregator_settings + +Provides a read-only data source to retrieve information about config_aggregator_settings. You can then reference the fields of the data source in other resources within the same configuration by using interpolation syntax. + +## Example Usage + +```hcl +data "ibm_config_aggregator_settings" "config_aggregator_settings" { + instance_id=var.instance_id + region=var.region +} +``` + + +## Attribute Reference + +After your data source is created, you can read values from the following attributes. +* `instance_id` - (Required, Forces new resource, String) The GUID of the Configuration Aggregator instance. +* `region` - (Optional, Forces new resource, String) The region of the Configuration Aggregator instance. If not provided defaults to the region defined in the IBM provider configuration. +* `id` - The unique identifier of the config_aggregator_settings. +* `additional_scope` - (List) The additional scope that enables resource collection for Enterprise acccounts. + * Constraints: The maximum length is `10` items. The minimum length is `0` items. +Nested schema for **additional_scope**: + * `enterprise_id` - (String) The Enterprise ID. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/[a-zA-Z0-9]/`. + * `profile_template` - (List) The Profile Template details applied on the enterprise account. + Nested schema for **profile_template**: + * `id` - (String) The Profile Template ID created in the enterprise account that provides access to App Configuration instance for resource collection. + * Constraints: The maximum length is `52` characters. The minimum length is `52` characters. The value must match regular expression `/[a-zA-Z0-9-]/`. + * `trusted_profile_id` - (String) The trusted profile ID that provides access to App Configuration instance to retrieve template information. + * Constraints: The maximum length is `44` characters. The minimum length is `44` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + * `type` - (String) The type of scope. Currently allowed value is Enterprise. + * Constraints: The maximum length is `64` characters. The minimum length is `0` characters. The value must match regular expression `/[a-zA-Z0-9]/`. +* `last_updated` - (String) The last time the settings was last updated. +* `resource_collection_regions` - (List) Regions for which the resource collection is enabled. + * Constraints: The list items must match regular expression `/^[a-zA-Z0-9-]*$/`. The maximum length is `10` items. The minimum length is `0` items. +* `resource_collection_enabled` - (Boolean) The field to check if the resource collection is enabled. +* `trusted_profile_id` - (String) The trusted profile ID that provides access to App Configuration instance to retrieve resource metadata. + * Constraints: The maximum length is `44` characters. The minimum length is `44` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + diff --git a/website/docs/d/event_streams_quota.html.markdown b/website/docs/d/event_streams_quota.html.markdown new file mode 100644 index 0000000000..fed93b90e8 --- /dev/null +++ b/website/docs/d/event_streams_quota.html.markdown @@ -0,0 +1,55 @@ +--- +subcategory: "Event Streams" +layout: "ibm" +page_title: "IBM: ibm_event_streams_quota" +description: |- + Get information about a quota of an IBM Event Streams service instance. +--- + +# ibm_event_streams_quota + +Retrieve information about a quota of an Event Streams instance. Both the default quota and user quotas may be managed. Quotas are only available on Event Streams Enterprise plan service instances. For more information about Event Streams quotas, see [Setting Kafka quotas](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-enabling_kafka_quotas). + +## Example usage + +To retrieve the default quota: + +```terraform +data "ibm_resource_instance" "es_instance" { + name = "terraform-integration" + resource_group_id = data.ibm_resource_group.group.id +} + +data "ibm_event_streams_quota" "es_quota_default" { + resource_instance_id = data.ibm_resource_instance.es_instance.id + entity = "default" +} +``` + +To retrieve a user quota, for a user with the given IAM ID: + +```terraform +data "ibm_resource_instance" "es_instance" { + name = "terraform-integration" + resource_group_id = data.ibm_resource_group.group.id +} + +data "ibm_event_streams_quota" "es_quota_user" { + resource_instance_id = data.ibm_resource_instance.es_instance.id + entity = "iam-ServiceId-00001111-2222-3333-4444-555566667777" +} + +## Argument reference + +You must specify the following arguments for this data source. + +- `resource_instance_id` - (Required, String) The ID or CRN of the Event Streams service instance. +- `entity` - (Required, String) Either `default` to set the default quota, or an IAM ID for a user quota. + +## Attribute reference + +After your data source is created, you can read values from the listed arguments and the following attributes. + +- `id` - (String) The ID of the quota in CRN format. The last field of the CRN is either `default`, or the IAM ID of the user. For example, `crn:v1:bluemix:public:messagehub:us-south:a/6db1b0d0b5c54ee5c201552547febcd8:ffffffff-eeee-dddd-cccc-bbbbaaaa9999:quota:default`, or `crn:v1:bluemix:public:messagehub:us-south:a/6db1b0d0b5c54ee5c201552547febcd8:ffffffff-eeee-dddd-cccc-bbbbaaaa9999:quota:iam-ServiceId-00001111-2222-3333-4444-555566667777`. +- `producer_byte_rate` - (Integer) The producer quota in bytes/second. If no producer quota is set, this is -1. +- `consumer_byte_rate` - (Integer) The consumer quota in bytes/second. If no consumer quota is set, this is -1. diff --git a/website/docs/d/is_lb.html.markdown b/website/docs/d/is_lb.html.markdown index 2471a5715a..bdea9b7fba 100644 --- a/website/docs/d/is_lb.html.markdown +++ b/website/docs/d/is_lb.html.markdown @@ -52,7 +52,9 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. +- `access_mode` - (String) The access mode for this load balancer. One of **private**, **public**, **private_path**. - `access_tags` - (String) Access management tags associated for the load balancer. +- `availability` - (String) The availability of this load balancer - `crn` - (String) The CRN for this load balancer. - `dns` - (List) The DNS configuration for this load balancer. @@ -62,6 +64,7 @@ In addition to all argument reference list, you can access the following attribu - `hostname` - (String) Fully qualified domain name assigned to this load balancer. - `id` - (String) The ID of the load balancer. +- `instance_groups_supported` - (Boolean) Indicates whether this load balancer supports instance groups. - `listeners` - (String) The ID of the listeners attached to this load balancer. - `logging`- (Bool) Enable (**true**) or disable (**false**) datapath logging for this load balancer. If unspecified, datapath logging is disabled. This option is supported only for application load balancers. - `operating_status` - (String) The operating status of this load balancer. @@ -113,6 +116,7 @@ In addition to all argument reference list, you can access the following attribu - `route_mode` - (Bool) Indicates whether route mode is enabled for this load balancer. - `security_groups`- (String) A list of security groups that are used with this load balancer. This option is supported only for application load balancers. - `security_groups_supported`- (Bool) Indicates if this load balancer supports security groups. +- `source_ip_session_persistence_supported` - (Boolean) Indicates whether this load balancer supports source IP session persistence. - `subnets` - (String) The ID of the subnets to provision this load balancer. - `status` - (String) The status of load balancer. - `tags` - (String) The tags associated with the load balancer. diff --git a/website/docs/d/is_lb_profile.html.markdown b/website/docs/d/is_lb_profile.html.markdown index 1f647ba104..aa9d024e0c 100644 --- a/website/docs/d/is_lb_profile.html.markdown +++ b/website/docs/d/is_lb_profile.html.markdown @@ -37,7 +37,10 @@ Review the argument references that you can specify for your data source. ## Attribute reference You can access the following attribute references after your data source is created. - +- `access_modes` - (List) The instance groups support for a load balancer with this profile + Nested scheme for `access_modes`: + - `type` - (String) The type of access mode. + - `value` - (String) Access modes for this profile. - `family` - (String) The product family this load balancer profile belongs to. - `href` - (String) The URL for this load balancer profile. - `id` - (String) The id(`name`) for this load balancer profile. diff --git a/website/docs/d/is_lb_profiles.html.markdown b/website/docs/d/is_lb_profiles.html.markdown index 5738d31390..ea1acd75fe 100644 --- a/website/docs/d/is_lb_profiles.html.markdown +++ b/website/docs/d/is_lb_profiles.html.markdown @@ -40,11 +40,38 @@ You can access the following attribute references after your data source is crea - `lb_profiles` - (List) List of all load balancer profiles in the IBM Cloud Infrastructure. Nested scheme for `lb_profiles`: + - `access_modes` - (List) The instance groups support for a load balancer with this profile + + Nested scheme for `access_modes`: + - `type` - (String) The type of access mode. + - `value` - (String) Access modes for this profile. + - `availability` - (List) The availability mode for a load balancer with this profile + + Nested scheme for `availability`: + - `type` - (String) The type of availabilioty mode. One of **fixed**, **dependent** + - `value` - (String) The availability of this load balancer. Applicable only if `type` is **fixed** + + -> **Target should be one of the below:**
+ • `subnet` remains available if at least one zone that the load balancer's subnets reside in is available.
+ • `region` ideremains available if at least one zone in the region is available.
+ - `family` - (String) The product family this load balancer profile belongs to. - `href` - (String) The URL for this load balancer profile. + - `instance_groups_supported` - (List) The instance groups support for a load balancer with this profile + + Nested scheme for `instance_groups_supported`: + - `type` - (String) The instance groups support type. One of **fixed**, **dependent** + - `value` - (String) Indicated whether instance groups is supported. Applicable only if `type` is **fixed** + - `source_ip_session_persistence_supported` - (List) The source IP session persistence support for a load balancer with this profile + + Nested scheme for `source_ip_session_persistence_supported`: + - `type` - (String) The source ip session persistence support type. One of **fixed**, **dependent** + - `value` - (String) Indicated whether source ip session persistence is supported. Applicable only if `type` is **fixed** + - `name` - (String) The name for this load balancer profile. - `route_mode_supported` - (Bool) The route mode support for a load balancer with this profile. - `route_mode_type` - (String) The route mode type for this load balancer profile, one of [fixed, dependent] + - `udp_supported` - (Bool) The UDP support for a load balancer with this profile. - `udp_supported_type` - (String) The UDP support type for a load balancer with this profile, one of [fixed, dependent] diff --git a/website/docs/d/is_lbs.html.markdown b/website/docs/d/is_lbs.html.markdown index cac1c518be..e589c0df98 100644 --- a/website/docs/d/is_lbs.html.markdown +++ b/website/docs/d/is_lbs.html.markdown @@ -34,8 +34,11 @@ Review the attribute references that you can access after you retrieve your data - `load_balancers` - (List) The Collection of load balancers. Nested scheme for `load_balancers`: + - `access_mode` - (String) The access mode for this load balancer. One of **private**, **public**, **private_path**. - `access_tags` - (String) Access management tags associated for the load balancer. + - `availability` - (String) The availability of this load balancer - `id` - (String) The unique identifier of the load balancer. + - `instance_groups_supported` - (Boolean) Indicates whether this load balancer supports instance groups. - `created_at` - (String) The date and time this load balancer was created. - `crn` - (String) The load balancer's CRN. - `dns` - (List) The DNS configuration for this load balancer. @@ -83,6 +86,7 @@ Review the attribute references that you can access after you retrieve your data - `public_ips` - (String) The public IP addresses assigned to this load balancer. - `resource_group` - (String) The resource group id, where the load balancer is created. - `route_mode` - (Bool) Indicates whether route mode is enabled for this load balancer. + - `source_ip_session_persistence_supported` - (Boolean) Indicates whether this load balancer supports source IP session persistence. - `status` - (String) The status of the load balancers. - `type` - (String) The type of the load balancer. - `tags` - (String) Tags associated with the load balancer. diff --git a/website/docs/d/is_private_path_service_gateway.html.markdown b/website/docs/d/is_private_path_service_gateway.html.markdown new file mode 100644 index 0000000000..f169c5ac27 --- /dev/null +++ b/website/docs/d/is_private_path_service_gateway.html.markdown @@ -0,0 +1,98 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway" +description: |- + Get information about PrivatePathServiceGateway +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway + +Provides a read-only data source for PrivatePathServiceGateway. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. + +## Example Usage + +```hcl +resource "ibm_is_vpc" "example" { + name = "example-vpc" +} + +resource "ibm_is_subnet" "example" { + name = "example-subnet" + vpc = ibm_is_vpc.example.id + zone = "us-south-2" + ipv4_cidr_block = "10.240.0.0/24" +} +resource "ibm_is_lb" "example" { + name = "example-lb" + subnets = [ibm_is_subnet.example.id] +} +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "review" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.example.id + zonal_affinity = true + service_endpoints = ["example-fqdn"] +} +data "ibm_is_private_path_service_gateway" "example" { + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +data "ibm_is_private_path_service_gateway" "example1" { + private_path_service_gateway_name = ibm_is_private_path_service_gateway.example.name +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your data source. + +- `private_path_service_gateway` - (Required, Forces new resource, String) The private path service gateway identifier. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your data source is created. + +- `id` - The unique identifier of the PrivatePathServiceGateway. +- `created_at` - (String) The date and time that the private path service gateway was created. +- `crn` - (String) The CRN for this private path service gateway. +- `default_access_policy` - (String) The policy to use for bindings from accounts without an explicit account policy. +- `endpoint_gateway_count` - (Integer) The number of endpoint gateways using this private path service gateway. +- `endpoint_gateway_binding_auto_delete` - (Boolean) Indicates whether endpoint gateway bindings will be automatically deleted after endpoint_gateway_binding_auto_delete_timeout hours have passed. At present, this is always true, but may be modifiable in the future. +- `endpoint_gateway_binding_auto_delete_timeout` - (Integer) If endpoint_gateway_binding_auto_delete is true, the hours after which endpoint gateway bindings will be automatically deleted. If the value is 0, abandoned endpoint gateway bindings will be deleted immediately. At present, this is always set to 0. This value may be modifiable in the future. +- `href` - (String) The URL for this private path service gateway. +- `lifecycle_state` - (String) The lifecycle state of the private path service gateway. +- `load_balancer` - (List) The load balancer for this private path service gateway. + Nested scheme for **load_balancer**: + - `crn` - (String) The load balancer's CRN. + - `deleted` - (List) If present, this property indicates the referenced resource has been deleted, and providessome supplementary information. + Nested scheme for **deleted**: + - `more_info` - (String) Link to documentation about deleted resources. + - `href` - (String) The load balancer's canonical URL. + - `id` - (String) The unique identifier for this load balancer. + - `name` - (String) The name for this load balancer. The name is unique across all load balancers in the VPC. + - `resource_type` - (String) The resource type. +- `name` - (String) The name for this private path service gateway. The name is unique across all private path service gateways in the VPC. +- `published` - (Boolean) Indicates the availability of this private path service gateway- `true`: Any account can request access to this private path service gateway.- `false`: Access is restricted to the account that created this private path service gateway. +- `region` - (List) The region served by this private path service gateway. + Nested scheme for **region**: + - `href` - (String) The URL for this region. + - `name` - (String) The globally unique name for this region. +- `resource_group` - (List) The resource group for this private path service gateway. + Nested scheme for **resource_group**: + - `href` - (String) The URL for this resource group. + - `id` - (String) The unique identifier for this resource group. + - `name` - (String) The name for this resource group. +- `resource_type` - (String) The resource type. +- `service_endpoints` - (List of strings) The fully qualified domain names for this private path service gateway. +- `vpc` - (List) The VPC this private path service gateway resides in. + Nested scheme for **vpc**: + - `crn` - (String) The CRN for this VPC. + - `deleted` - (List) If present, this property indicates the referenced resource has been deleted, and providessome supplementary information. + Nested scheme for **deleted**: + - `more_info` - (String) Link to documentation about deleted resources. + - `href` - (String) The URL for this VPC. + - `id` - (String) The unique identifier for this VPC. + - `name` - (String) The name for this VPC. The name is unique across all VPCs in the region. + - `resource_type` - (String) The resource type. +- `zonal_affinity` - (Boolean) Indicates whether this private path service gateway has zonal affinity.- `true`: Traffic to the service from a zone will favor service endpoints in the same zone.- `false`: Traffic to the service from a zone will be load balanced across all zones in the region the service resides in. + diff --git a/website/docs/d/is_private_path_service_gateway_account_policies.html.markdown b/website/docs/d/is_private_path_service_gateway_account_policies.html.markdown new file mode 100644 index 0000000000..4147530f54 --- /dev/null +++ b/website/docs/d/is_private_path_service_gateway_account_policies.html.markdown @@ -0,0 +1,70 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_account_policies" +description: |- + Get information about PrivatePathServiceGatewayAccountPolicyCollection +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_account_policies + +Provides a read-only data source for PrivatePathServiceGatewayAccountPolicyCollection. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. + +## Example Usage + +```hcl +resource "ibm_is_vpc" "example" { + name = "example-vpc" +} + +resource "ibm_is_subnet" "example" { + name = "example-subnet" + vpc = ibm_is_vpc.example.id + zone = "us-south-2" + ipv4_cidr_block = "10.240.0.0/24" +} +resource "ibm_is_lb" "example" { + name = "example-lb" + subnets = [ibm_is_subnet.example.id] +} +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "review" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.example.id + zonal_affinity = true + service_endpoints = ["example-fqdn"] +} +resource "ibm_is_private_path_service_gateway_account_policy" "example" { + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id + access_policy = "review" + account = "fee82deba12e4c0fb69c3b09d1f12345" +} +data "ibm_is_private_path_service_gateway_account_policies" "example" { + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id + account = "fee82deba12e4c0fb69c3b09d1f12345" +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your data source. + +- `private_path_service_gateway` - (Required, String) The private path service gateway identifier. +- `account` - (Optional, String) - ID of the account to retrieve the policies for. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your data source is created. + +- `account_policies` - (List) Collection of account policies. + Nested scheme for **account_policies**: + - `access_policy` - (String) The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered. + - `account` - (List) The account for this access policy. + Nested scheme for **account**: + - `id` - (String) + - `resource_type` - (String) The resource type. + - `created_at` - (String) The date and time that the account policy was created. + - `href` - (String) The URL for this account policy. + - `id` - (String) The unique identifier for this account policy. + - `resource_type` - (String) The resource type. + - `updated_at` - (String) The date and time that the account policy was updated. diff --git a/website/docs/d/is_private_path_service_gateway_account_policy.html.markdown b/website/docs/d/is_private_path_service_gateway_account_policy.html.markdown new file mode 100644 index 0000000000..3a4d331fcb --- /dev/null +++ b/website/docs/d/is_private_path_service_gateway_account_policy.html.markdown @@ -0,0 +1,70 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_account_policy" +description: |- + Get information about PrivatePathServiceGatewayAccountPolicy +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_account_policy + +Provides a read-only data source for PrivatePathServiceGatewayAccountPolicy. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. + +## Example Usage + +```hcl +resource "ibm_is_vpc" "example" { + name = "example-vpc" +} + +resource "ibm_is_subnet" "example" { + name = "example-subnet" + vpc = ibm_is_vpc.example.id + zone = "us-south-2" + ipv4_cidr_block = "10.240.0.0/24" +} +resource "ibm_is_lb" "example" { + name = "example-lb" + subnets = [ibm_is_subnet.example.id] +} +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "review" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.example.id + zonal_affinity = true + service_endpoints = ["example-fqdn"] +} +resource "ibm_is_private_path_service_gateway_account_policy" "example" { + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id + access_policy = "review" + account = "fee82deba12e4c0fb69c3b09d1f12345" +} +data "ibm_is_private_path_service_gateway_account_policy" "example" { + account_policy = ibm_is_private_path_service_gateway_account_policy.example.id + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your data source. + +- `account_policy` - (Required, String) The account policy identifier. +- `private_path_service_gateway` - (Required, String) The private path service gateway identifier. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your data source is created. + +- `id` - The unique identifier of the PrivatePathServiceGatewayAccountPolicy. The ID is composed of `/`. +- `access_policy` - (String) The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered. +- `account` - (List) The account for this access policy. + Nested scheme for **account**: + - `id` - (String) + - `resource_type` - (String) The resource type. +- `created_at` - (String) The date and time that the account policy was created. +- `href` - (String) The URL for this account policy. +- `private_path_service_gateway_account_policy` - (String) The unique identifier for this account policy. +- `resource_type` - (String) The resource type. +- `updated_at` - (String) The date and time that the account policy was updated. + diff --git a/website/docs/d/is_private_path_service_gateway_endpoint_gateway_binding.html.markdown b/website/docs/d/is_private_path_service_gateway_endpoint_gateway_binding.html.markdown new file mode 100644 index 0000000000..26249c6a11 --- /dev/null +++ b/website/docs/d/is_private_path_service_gateway_endpoint_gateway_binding.html.markdown @@ -0,0 +1,69 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_endpoint_gateway_binding" +description: |- + Get information about PrivatePathServiceGatewayEndpointGatewayBinding +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_endpoint_gateway_binding + +Provides a read-only data source for PrivatePathServiceGatewayEndpointGatewayBinding. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. + +## Example Usage + +```hcl +resource "ibm_is_vpc" "example" { + name = "example-vpc" +} + +resource "ibm_is_subnet" "example" { + name = "example-subnet" + vpc = ibm_is_vpc.example.id + zone = "us-south-2" + ipv4_cidr_block = "10.240.0.0/24" +} +resource "ibm_is_lb" "example" { + name = "example-lb" + subnets = [ibm_is_subnet.example.id] +} +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "review" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.example.id + zonal_affinity = true + service_endpoints = ["example-fqdn"] +} +data "ibm_is_private_path_service_gateway_endpoint_gateway_bindings" "example" { + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +data "ibm_is_private_path_service_gateway_endpoint_gateway_binding" "is_private_path_service_gateway_endpoint_gateway_binding" { + endpoint_gateway_binding = data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.example.endpoint_gateway_bindings.0.id + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your data source. + +- `endpoint_gateway_binding` - (Required, String) The endpoint gateway binding identifier. +- `private_path_service_gateway` - (Required, String) The private path service gateway identifier. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your data source is created. + +- `id` - The unique identifier of the PrivatePathServiceGatewayEndpointGatewayBinding. The ID is composed of `/`. +- `account` - (List) The account that created the endpoint gateway. + Nested scheme for **account**: + - `id` - (String) + - `resource_type` - (String) The resource type. +- `created_at` - (String) The date and time that the endpoint gateway binding was created. +- `expiration_at` - (String) The expiration date and time for the endpoint gateway binding. +- `href` - (String) The URL for this endpoint gateway binding. +- `lifecycle_state` - (String) The lifecycle state of the endpoint gateway binding. +- `resource_type` - (String) The resource type. +- `status` - (String) The status of the endpoint gateway binding- `denied`: endpoint gateway binding was denied- `expired`: endpoint gateway binding has expired- `pending`: endpoint gateway binding is awaiting review- `permitted`: endpoint gateway binding was permittedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered. +- `updated_at` - (String) The date and time that the endpoint gateway binding was updated. + diff --git a/website/docs/d/is_private_path_service_gateway_endpoint_gateway_bindings.html.markdown b/website/docs/d/is_private_path_service_gateway_endpoint_gateway_bindings.html.markdown new file mode 100644 index 0000000000..71bc3a624e --- /dev/null +++ b/website/docs/d/is_private_path_service_gateway_endpoint_gateway_bindings.html.markdown @@ -0,0 +1,69 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_endpoint_gateway_bindings" +description: |- + Get information about PrivatePathServiceGatewayEndpointGatewayBindingCollection +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_endpoint_gateway_bindings + +Provides a read-only data source for PrivatePathServiceGatewayEndpointGatewayBindingCollection. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. + +## Example Usage + +```hcl +resource "ibm_is_vpc" "example" { + name = "example-vpc" +} + +resource "ibm_is_subnet" "example" { + name = "example-subnet" + vpc = ibm_is_vpc.example.id + zone = "us-south-2" + ipv4_cidr_block = "10.240.0.0/24" +} +resource "ibm_is_lb" "example" { + name = "example-lb" + subnets = [ibm_is_subnet.example.id] +} +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "review" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.example.id + zonal_affinity = true + service_endpoints = ["example-fqdn"] +} +data "ibm_is_private_path_service_gateway_endpoint_gateway_bindings" "example" { + status = "pending" + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your data source. + +- `private_path_service_gateway` - (Required, String) The private path service gateway identifier. +- `status` - (Optional, String) Status of the binding +- `account` - (Optional, String) ID of the account to filter + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your data source is created. + +- `id` - The unique identifier of the PrivatePathServiceGatewayEndpointGatewayBindingCollection. +- `endpoint_gateway_bindings` - (List) Collection of endpoint gateway bindings. + Nested scheme for **endpoint_gateway_bindings**: + - `account` - (List) The account that created the endpoint gateway. + Nested scheme for **account**: + - `id` - (String) + - `resource_type` - (String) The resource type. + - `created_at` - (String) The date and time that the endpoint gateway binding was created. + - `expiration_at` - (String) The expiration date and time for the endpoint gateway binding. + - `href` - (String) The URL for this endpoint gateway binding. + - `id` - (String) The unique identifier for this endpoint gateway binding. + - `lifecycle_state` - (String) The lifecycle state of the endpoint gateway binding. + - `resource_type` - (String) The resource type. + - `status` - (String) The status of the endpoint gateway binding- `denied`: endpoint gateway binding was denied- `expired`: endpoint gateway binding has expired- `pending`: endpoint gateway binding is awaiting review- `permitted`: endpoint gateway binding was permittedThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered. + - `updated_at` - (String) The date and time that the endpoint gateway binding was updated. diff --git a/website/docs/d/is_private_path_service_gateways.html.markdown b/website/docs/d/is_private_path_service_gateways.html.markdown new file mode 100644 index 0000000000..000f0f8872 --- /dev/null +++ b/website/docs/d/is_private_path_service_gateways.html.markdown @@ -0,0 +1,72 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateways" +description: |- + Get information about PrivatePathServiceGatewayCollection +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateways + +Provides a read-only data source for PrivatePathServiceGatewayCollection. You can then reference the fields of the data source in other resources within the same configuration using interpolation syntax. + +## Example Usage + +```hcl +data "ibm_is_private_path_service_gateways" "example" { +} +``` + + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your data source is created. + +- `private_path_service_gateways` - (List) Collection of private path service gateways. + Nested scheme for **private_path_service_gateways**: + - `created_at` - (String) The date and time that the private path service gateway was created. + - `crn` - (String) The CRN for this private path service gateway. + - `default_access_policy` - (String) The policy to use for bindings from accounts without an explicit account policy. + - `endpoint_gateway_count` - (Integer) The number of endpoint gateways using this private path service gateway. + - `endpoint_gateway_binding_auto_delete` - (Boolean) Indicates whether endpoint gateway bindings will be automatically deleted after endpoint_gateway_binding_auto_delete_timeout hours have passed. At present, this is always true, but may be modifiable in the future. + - `endpoint_gateway_binding_auto_delete_timeout` - (Integer) If endpoint_gateway_binding_auto_delete is true, the hours after which endpoint gateway bindings will be automatically deleted. If the value is 0, abandoned endpoint gateway bindings will be deleted immediately. At present, this is always set to 0. This value may be modifiable in the future. + - `href` - (String) The URL for this private path service gateway. + - `id` - (String) The unique identifier for this private path service gateway. + - `lifecycle_state` - (String) The lifecycle state of the private path service gateway. + - `load_balancer` - (List) The load balancer for this private path service gateway. + Nested scheme for **load_balancer**: + - `crn` - (String) The load balancer's CRN. + - `deleted` - (List) If present, this property indicates the referenced resource has been deleted, and providessome supplementary information. + Nested scheme for **deleted**: + - `more_info` - (String) Link to documentation about deleted resources. + - `href` - (String) The load balancer's canonical URL. + - `id` - (String) The unique identifier for this load balancer. + - `name` - (String) The name for this load balancer. The name is unique across all load balancers in the VPC. + - `resource_type` - (String) The resource type. + - `name` - (String) The name for this private path service gateway. The name is unique across all private path service gateways in the VPC. + - `published` - (Boolean) Indicates the availability of this private path service gateway- `true`: Any account can request access to this private path service gateway.- `false`: Access is restricted to the account that created this private path service gateway. + - `region` - (List) The region served by this private path service gateway. + Nested scheme for **region**: + - `href` - (String) The URL for this region. + - `name` - (String) The globally unique name for this region. + - `resource_group` - (List) The resource group for this private path service gateway. + Nested scheme for **resource_group**: + - `href` - (String) The URL for this resource group. + - `id` - (String) The unique identifier for this resource group. + - `name` - (String) The name for this resource group. + - `resource_type` - (String) The resource type. + - `service_endpoints` - (List of strings) The fully qualified domain names for this private path service gateway. + - `vpc` - (List) The VPC this private path service gateway resides in. + Nested scheme for **vpc**: + - `crn` - (String) The CRN for this VPC. + - `deleted` - (List) If present, this property indicates the referenced resource has been deleted, and providessome supplementary information. + Nested scheme for **deleted**: + - `more_info` - (String) Link to documentation about deleted resources. + - `href` - (String) The URL for this VPC. + - `id` - (String) The unique identifier for this VPC. + - `name` - (String) The name for this VPC. The name is unique across all VPCs in the region. + - `resource_type` - (String) The resource type. + - `zonal_affinity` - (Boolean) Indicates whether this private path service gateway has zonal affinity.- `true`: Traffic to the service from a zone will favor service endpoints in the same zone.- `false`: Traffic to the service from a zone will be load balanced across all zones in the region the service resides in. + + + diff --git a/website/docs/d/is_subnet.html.markdown b/website/docs/d/is_subnet.html.markdown index 62cb8405b8..fb68d28b7b 100644 --- a/website/docs/d/is_subnet.html.markdown +++ b/website/docs/d/is_subnet.html.markdown @@ -83,6 +83,7 @@ In addition to all argument reference list, you can access the following attribu - `routing_table` - (List) The routing table for this subnet. Nested scheme for `routing_table`: + - `crn` - (String) The crn for this routing table. - `deleted` - (List) If present, this property indicates the referenced resource has been deleted and provides some supplementary information. Nested scheme for `deleted`: diff --git a/website/docs/d/is_subnets.html.markdown b/website/docs/d/is_subnets.html.markdown index 1ff061f5eb..63e0575337 100644 --- a/website/docs/d/is_subnets.html.markdown +++ b/website/docs/d/is_subnets.html.markdown @@ -92,6 +92,7 @@ You can access the following attribute references after your data source is crea - `status` - (String) The status of the subnet. - `routing_table` - (List) The routing table for this subnet. Nested scheme for `routing_table`: + - `crn` - (String) The crn for this routing table. - `deleted` - (List) If present, this property indicates the referenced resource has been deleted and provides some supplementary information. Nested scheme for `deleted`: - `more_info` - (String) Link to documentation about deleted resources. diff --git a/website/docs/d/is_virtual_endpoint_gateway.html.markdown b/website/docs/d/is_virtual_endpoint_gateway.html.markdown index 9d38883be5..f94914e725 100644 --- a/website/docs/d/is_virtual_endpoint_gateway.html.markdown +++ b/website/docs/d/is_virtual_endpoint_gateway.html.markdown @@ -54,6 +54,7 @@ In addition to the argument reference list, you can access the following attribu - `target` - (List) The endpoint gateway target. Nested scheme for `target`: + - `crn` - (String) The target CRN. - `name` - (String) The target name. - `resource_type` - (String) The resource type of the subnet reserved IP. - `vpc` - (String) The VPC ID. diff --git a/website/docs/d/is_volume.html.markdown b/website/docs/d/is_volume.html.markdown index b5389962ac..1d85cae95b 100644 --- a/website/docs/d/is_volume.html.markdown +++ b/website/docs/d/is_volume.html.markdown @@ -46,6 +46,8 @@ In addition to all argument reference list, you can access the following attribu - `access_tags` - (List) Access management tags associated for the volume. - `active` - (Boolean) Indicates whether a running virtual server instance has an attachment to this volume. - `attachment_state` - (Boolean) The attachment state of the volume +- `adjustable_capacity_states` - (List) The attachment states that support adjustable capacity for this volume. Allowable list items are: `attached`, `unattached`, `unusable`. +- `adjustable_iops_states` - (List) The attachment states that support adjustable IOPS for this volume. Allowable list items are: `attached`, `unattached`, `unusable`. - `bandwidth` - The maximum bandwidth (in megabits per second) for the volume - `busy` - (Boolean) Indicates whether this volume is performing an operation that must be serialized. This must be `false` to perform an operation that is specified to require serialization. - `capacity` - (String) The capacity of the volume in gigabytes. @@ -91,3 +93,5 @@ In addition to all argument reference list, you can access the following attribu - `message` - (String) An explanation of the status reason - `more_info` - (String) Link to documentation about this status reason - `tags` - (String) User Tags associated with the volume. (https://cloud.ibm.com/apidocs/tagging#types-of-tags) +- `unattached_capacity_update_supported` - (Boolean) Indicates whether the capacity for the volume can be changed when not attached to a running virtual server instance. +- `unattached_iops_update_supported` - (Boolean) Indicates whether the IOPS for the volume can be changed when not attached to a running virtual server instance. diff --git a/website/docs/d/is_volume_profile.html.markdown b/website/docs/d/is_volume_profile.html.markdown index 8cdf6c9683..4f098dfcbe 100644 --- a/website/docs/d/is_volume_profile.html.markdown +++ b/website/docs/d/is_volume_profile.html.markdown @@ -38,4 +38,13 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to the argument reference list, you can access the following attribute references after your data source is created. +- `adjustable_capacity_states` - (List) + Nested schema for **adjustable_capacity_states**: + - `type` - (String) The type for this profile field. + - `values` - (List) The attachment states that support adjustable capacity for a volume with this profile. Allowable list items are: `attached`, `unattached`, `unusable`. +- `adjustable_iops_states` - (List) + Nested schema for **adjustable_iops_states**: + - `type` - (String) The type for this profile field. + - `values` - (List) The attachment states that support adjustable IOPS for a volume with this profile. Allowable list items are: `attached`, `unattached`, `unusable`. - `family` - (String) The family of the virtual server volume profile. + diff --git a/website/docs/d/is_volume_profiles.html.markdown b/website/docs/d/is_volume_profiles.html.markdown index 77efa299f2..12c6c3e8cf 100644 --- a/website/docs/d/is_volume_profiles.html.markdown +++ b/website/docs/d/is_volume_profiles.html.markdown @@ -35,6 +35,14 @@ You can access the following attribute references after your data source is crea - `profiles` - (List) Lists all server volume profiles in the region. Nested scheme for `profiles`: + - `adjustable_capacity_states` - (List) + Nested schema for **adjustable_capacity_states**: + - `type` - (String) The type for this profile field. + - `values` - (List) The attachment states that support adjustable capacity for a volume with this profile. Allowable list items are: `attached`, `unattached`, `unusable`. + - `adjustable_iops_states` - (List) + Nested schema for **adjustable_iops_states**: + - `type` - (String) The type for this profile field. + - `values` - (List) The attachment states that support adjustable IOPS for a volume with this profile. Allowable list items are: `attached`, `unattached`, `unusable`. - `name` - (String) The name of the virtual server volume profile. - `family` - (String) The family of the virtual server volume profile. diff --git a/website/docs/d/is_volumes.html.markdown b/website/docs/d/is_volumes.html.markdown index 1a8dd26481..609bd268be 100644 --- a/website/docs/d/is_volumes.html.markdown +++ b/website/docs/d/is_volumes.html.markdown @@ -45,6 +45,8 @@ In addition to all argument references listed, you can access the following attr Nested scheme for **volumes**: - `access_tags` - (List) Access management tags associated for the volume. - `active` - (Boolean) Indicates whether a running virtual server instance has an attachment to this volume. + - `adjustable_capacity_states` - (List) The attachment states that support adjustable capacity for this volume. Allowable list items are: `attached`, `unattached`, `unusable`. + - `adjustable_iops_states` - (List) The attachment states that support adjustable IOPS for this volume. Allowable list items are: `attached`, `unattached`, `unusable`. - `attachment_state` - (Boolean) The attachment state of the volume - `bandwidth` - (Integer) The maximum bandwidth (in megabits per second) for the volume. - `busy` - (Boolean) Indicates whether this volume is performing an operation that must be serialized. This must be `false` to perform an operation that is specified to require serialization. diff --git a/website/docs/d/is_vpc_default_routing_table.html.markdown b/website/docs/d/is_vpc_default_routing_table.html.markdown index d18ecfbe37..30f612f281 100644 --- a/website/docs/d/is_vpc_default_routing_table.html.markdown +++ b/website/docs/d/is_vpc_default_routing_table.html.markdown @@ -41,6 +41,7 @@ Review the argument references that you can specify for your data source. In addition to the argument reference list, you can access the following attribute references after your data source is created. - `created_at` - (Timestamp) The date and time that the default routing table was created. +- `crn` - (String) The crn for this default routing table. - `default_routing_table` - (String) The unique identifier for this routing table. - `href` - (String) The routing table URL. - `id` - (String) The unique identifier for this routing table. Same as `default_routing_table`. diff --git a/website/docs/d/is_vpc_routing_table.html.markdown b/website/docs/d/is_vpc_routing_table.html.markdown index 04f6388bcd..f1e2630103 100644 --- a/website/docs/d/is_vpc_routing_table.html.markdown +++ b/website/docs/d/is_vpc_routing_table.html.markdown @@ -58,6 +58,7 @@ In addition to all argument references listed, you can access the following attr **•** `direct_link` (requires `route_direct_link_ingress` be set to `true`)
**•** `transit_gateway` (requires `route_transit_gateway_ingress` be set to `true`) - `created_at` - (String) The date and time that this routing table was created. +- `crn` - (String) The crn for this routing table. - `href` - (String) The URL for this routing table. - `id` - (String) The unique identifier of the RoutingTable. - `is_default` - (Boolean) Indicates whether this is the default routing table for this VPC. diff --git a/website/docs/d/is_vpc_routing_tables.html.markdown b/website/docs/d/is_vpc_routing_tables.html.markdown index d7b750a62d..76b3dbed46 100644 --- a/website/docs/d/is_vpc_routing_tables.html.markdown +++ b/website/docs/d/is_vpc_routing_tables.html.markdown @@ -54,6 +54,7 @@ In addition to the argument reference list, you can access the following attribu **•** `direct_link` (requires `route_direct_link_ingress` be set to `true`)
**•** `transit_gateway` (requires `route_transit_gateway_ingress` be set to `true`) - `created_at` - (Timestamp) The date and time the routing table was created. + - `crn` - (String) The crn for this routing table. - `href` - (String) The routing table URL. - `is_default` - (String) Indicates whether the default routing table. - `lifecycle_state` - (String) The lifecycle state of the routing table. diff --git a/website/docs/d/is_vpn_gateway_connection.html.markdown b/website/docs/d/is_vpn_gateway_connection.html.markdown index 00cc804950..afe9f2fd52 100644 --- a/website/docs/d/is_vpn_gateway_connection.html.markdown +++ b/website/docs/d/is_vpn_gateway_connection.html.markdown @@ -58,7 +58,7 @@ In addition to all argument references listed, you can access the following attr - `action` - (String) Dead Peer Detection actions. - `interval` - (Integer) Dead Peer Detection interval in seconds. - `timeout` - (Integer) Dead Peer Detection timeout in seconds. Must be at least the interval. - +- `distribute_traffic` - (Boolean) Indicates whether the traffic is distributed between the `up` tunnels of the VPN gateway connection when the VPC route's next hop is a VPN connection. If `false`, the traffic is only routed through the `up` tunnel with the lower `public_ip` address. Distributing traffic across tunnels of route-based VPN gateway connections. Traffic across tunnels can be distributed with a status of up in a route-based VPN gateway connection. When creating or updating a route-based VPN gateway connection, set the distribute_traffic property to true (default is false). Existing connections will have the `distribute_traffic` property set to false. - `establish_mode` - (String) The establish mode of the VPN gateway connection:- `bidirectional`: Either side of the VPN gateway can initiate IKE protocol negotiations or rekeying processes.- `peer_only`: Only the peer can initiate IKE protocol negotiations for this VPN gateway connection. Additionally, the peer is responsible for initiating the rekeying process after the connection is established. If rekeying does not occur, the VPN gateway connection will be brought down after its lifetime expires. - `href` - (String) The VPN connection's canonical URL. diff --git a/website/docs/d/is_vpn_gateway_connections.html.markdown b/website/docs/d/is_vpn_gateway_connections.html.markdown index 417abf9ef0..6272e5241e 100644 --- a/website/docs/d/is_vpn_gateway_connections.html.markdown +++ b/website/docs/d/is_vpn_gateway_connections.html.markdown @@ -43,6 +43,7 @@ In addition to all argument reference list, you can access the following attribu - `admin_state_up` - (String) The VPN gateway connection admin state. Default value is **true**. - `authentication_mode` - (String) The authentication mode. - `created_at`- (Timestamp) The date and time the VPN gateway connection was created. +- `distribute_traffic` - (Boolean) Indicates whether the traffic is distributed between the `up` tunnels of the VPN gateway connection when the VPC route's next hop is a VPN connection. If `false`, the traffic is only routed through the `up` tunnel with the lower `public_ip` address. Distributing traffic across tunnels of route-based VPN gateway connections. Traffic across tunnels can be distributed with a status of up in a route-based VPN gateway connection. When creating or updating a route-based VPN gateway connection, set the distribute_traffic property to true (default is false). Existing connections will have the `distribute_traffic` property set to false. - `establish_mode` - (String) The establish mode of the VPN gateway connection:- `bidirectional`: Either side of the VPN gateway can initiate IKE protocol negotiations or rekeying processes.- `peer_only`: Only the peer can initiate IKE protocol negotiations for this VPN gateway connection. Additionally, the peer is responsible for initiating the rekeying process after the connection is established. If rekeying does not occur, the VPN gateway connection will be brought down after its lifetime expires. - `id` - (String) The ID of the VPN gateway connection. - `ike_policy` - (String) The VPN gateway connection IKE Policy. diff --git a/website/docs/d/logs_alert.html.markdown b/website/docs/d/logs_alert.html.markdown index 25b2398eca..e3f26e124f 100644 --- a/website/docs/d/logs_alert.html.markdown +++ b/website/docs/d/logs_alert.html.markdown @@ -63,9 +63,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -74,7 +74,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -89,7 +89,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -117,7 +117,7 @@ Nested schema for **condition**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **values**: * `id` - (String) The alert ID. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `not` - (Boolean) The alert not. * `next_op` - (String) Operator for the alerts. * Constraints: Allowable values are: `and`, `or`. @@ -132,9 +132,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -143,7 +143,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -158,7 +158,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -177,9 +177,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -188,7 +188,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -203,7 +203,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -224,9 +224,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -235,7 +235,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -250,7 +250,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -269,9 +269,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -280,7 +280,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -295,7 +295,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -314,9 +314,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -325,7 +325,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -340,7 +340,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -359,9 +359,9 @@ Nested schema for **condition**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -370,7 +370,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -385,7 +385,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -412,34 +412,34 @@ Nested schema for **expiration**: * `filters` - (List) Alert filters. Nested schema for **filters**: * `alias` - (String) The alias of the filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filter_type` - (String) The type of the filter. * Constraints: Allowable values are: `text_or_unspecified`, `template`, `ratio`, `unique_count`, `time_relative`, `metric`, `flow`. * `metadata` - (List) The metadata filters. Nested schema for **metadata**: * `applications` - (List) The applications to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `subsystems` - (List) The subsystems to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `ratio_alerts` - (List) The ratio alerts. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **ratio_alerts**: * `alias` - (String) The alias of the filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `applications` - (List) The applications to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `group_by` - (List) The group by fields. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `severities` - (List) The severities to filter. * Constraints: Allowable list items are: `debug_or_unspecified`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `subsystems` - (List) The subsystems to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `text` - (String) The text to filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severities` - (List) The severity of the logs to filter. * Constraints: Allowable list items are: `debug_or_unspecified`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `text` - (String) The text to filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `incident_settings` - (List) Incident settings, will create the incident based on this configuration. Nested schema for **incident_settings**: @@ -455,21 +455,21 @@ Nested schema for **incident_settings**: * Constraints: The maximum length is `200` items. The minimum length is `0` items. Nested schema for **meta_labels**: * `key` - (String) The key of the label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `value` - (String) The value of the label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `meta_labels_strings` - (List) The Meta labels to add to the alert as string with ':' separator. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `name` - (String) Alert name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `notification_groups` - (List) Alert notification groups. * Constraints: The maximum length is `10` items. The minimum length is `1` item. Nested schema for **notification_groups**: * `group_by_fields` - (List) Group by fields to group the values by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `20` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `20` items. The minimum length is `0` items. * `notifications` - (List) Webhook target settings for the the notification. * Constraints: The maximum length is `20` items. The minimum length is `0` items. Nested schema for **notifications**: @@ -485,7 +485,7 @@ Nested schema for **notification_groups**: * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `notification_payload_filters` - (List) JSON keys to include in the alert notification, if left empty get the full log text in the alert notification. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `severity` - (String) Alert severity. * Constraints: Allowable values are: `info_or_unspecified`, `warning`, `critical`, `error`. diff --git a/website/docs/d/logs_alerts.html.markdown b/website/docs/d/logs_alerts.html.markdown index 951c9a32d9..e92e1c25a8 100644 --- a/website/docs/d/logs_alerts.html.markdown +++ b/website/docs/d/logs_alerts.html.markdown @@ -62,9 +62,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -73,7 +73,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -88,7 +88,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -116,7 +116,7 @@ Nested schema for **alerts**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **values**: * `id` - (String) The alert ID. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `not` - (Boolean) The alert not. * `next_op` - (String) Operator for the alerts. * Constraints: Allowable values are: `and`, `or`. @@ -131,9 +131,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -142,7 +142,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -157,7 +157,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -176,9 +176,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -187,7 +187,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -202,7 +202,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -223,9 +223,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -234,7 +234,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -249,7 +249,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -268,9 +268,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -279,7 +279,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -294,7 +294,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -313,9 +313,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -324,7 +324,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -339,7 +339,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -358,9 +358,9 @@ Nested schema for **alerts**: * `parameters` - (List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -369,7 +369,7 @@ Nested schema for **alerts**: * `arithmetic_operator_modifier` - (Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Integer) Non null percentage of the evaluation. @@ -384,7 +384,7 @@ Nested schema for **alerts**: * `non_null_percentage` - (Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Boolean) Should we swap null values with zero. @@ -408,34 +408,34 @@ Nested schema for **alerts**: * `filters` - (List) Alert filters. Nested schema for **filters**: * `alias` - (String) The alias of the filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filter_type` - (String) The type of the filter. * Constraints: Allowable values are: `text_or_unspecified`, `template`, `ratio`, `unique_count`, `time_relative`, `metric`, `flow`. * `metadata` - (List) The metadata filters. Nested schema for **metadata**: * `applications` - (List) The applications to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `subsystems` - (List) The subsystems to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `ratio_alerts` - (List) The ratio alerts. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **ratio_alerts**: * `alias` - (String) The alias of the filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `applications` - (List) The applications to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `group_by` - (List) The group by fields. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `severities` - (List) The severities to filter. * Constraints: Allowable list items are: `debug_or_unspecified`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `subsystems` - (List) The subsystems to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `text` - (String) The text to filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severities` - (List) The severity of the logs to filter. * Constraints: Allowable list items are: `debug_or_unspecified`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `text` - (String) The text to filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (String) Alert ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `incident_settings` - (List) Incident settings, will create the incident based on this configuration. @@ -450,18 +450,18 @@ Nested schema for **alerts**: * Constraints: The maximum length is `200` items. The minimum length is `0` items. Nested schema for **meta_labels**: * `key` - (String) The key of the label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `value` - (String) The value of the label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `meta_labels_strings` - (List) The Meta labels to add to the alert as string with ':' separator. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `name` - (String) Alert name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `notification_groups` - (List) Alert notification groups. * Constraints: The maximum length is `10` items. The minimum length is `1` item. Nested schema for **notification_groups**: * `group_by_fields` - (List) Group by fields to group the values by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `20` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `20` items. The minimum length is `0` items. * `notifications` - (List) Webhook target settings for the the notification. * Constraints: The maximum length is `20` items. The minimum length is `0` items. Nested schema for **notifications**: @@ -476,7 +476,7 @@ Nested schema for **alerts**: * `retriggering_period_seconds` - (Integer) Retriggering period of the alert in seconds. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `notification_payload_filters` - (List) JSON keys to include in the alert notification, if left empty get the full log text in the alert notification. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `severity` - (String) Alert severity. * Constraints: Allowable values are: `info_or_unspecified`, `warning`, `critical`, `error`. * `unique_identifier` - (String) Alert unique identifier. diff --git a/website/docs/d/logs_dashboard.html.markdown b/website/docs/d/logs_dashboard.html.markdown index a71aed65be..9819bb4d8d 100644 --- a/website/docs/d/logs_dashboard.html.markdown +++ b/website/docs/d/logs_dashboard.html.markdown @@ -28,7 +28,7 @@ You can specify the following arguments for this data source. * `instance_id` - (Required, String) Cloud Logs Instance GUID. * `region` - (Optional, String) Cloud Logs Instance Region. * `dashboard_id` - (Required, Forces new resource, String) The ID of the dashboard. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference @@ -49,7 +49,7 @@ Nested schema for **annotations**: * `id` - (String) Unique identifier within the dashboard. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) Name of the annotation. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source` - (List) Source of the annotation events. Nested schema for **source**: * `logs` - (List) Logs source. @@ -58,15 +58,15 @@ Nested schema for **annotations**: * Constraints: The maximum length is `10` items. The minimum length is `0` items. Nested schema for **label_fields**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The Lucene query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `message_template` - (String) Template for the annotation message. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `strategy` - (List) Strategy for turning logs data into annotations. Nested schema for **strategy**: * `duration` - (List) Event start timestamp and duration are extracted from the log entry. @@ -74,13 +74,13 @@ Nested schema for **annotations**: * `duration_field` - (List) Field to count distinct values of. Nested schema for **duration_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `start_timestamp_field` - (List) Field to count distinct values of. Nested schema for **start_timestamp_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `instant` - (List) Event timestamp is extracted from the log entry. @@ -88,7 +88,7 @@ Nested schema for **annotations**: * `timestamp_field` - (List) Field to count distinct values of. Nested schema for **timestamp_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `range` - (List) Event start and end timestamps are extracted from the log entry. @@ -96,32 +96,32 @@ Nested schema for **annotations**: * `end_timestamp_field` - (List) Field to count distinct values of. Nested schema for **end_timestamp_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `start_timestamp_field` - (List) Field to count distinct values of. Nested schema for **start_timestamp_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (List) Metrics source. Nested schema for **metrics**: * `labels` - (List) Labels to display in the annotation. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `message_template` - (String) Template for the annotation message. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The PromQL query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `strategy` - (List) Strategy for turning metrics data into annotations. Nested schema for **strategy**: * `start_time_metric` - (List) Take first data point and use its value as annotation timestamp (instead of point own timestamp). Nested schema for **start_time_metric**: * `description` - (String) Brief description or summary of the dashboard's purpose or content. - * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `false` - (List) Auto refresh interval is set to off. Nested schema for **false**: @@ -138,7 +138,7 @@ Nested schema for **filters**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -152,7 +152,7 @@ Nested schema for **filters**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -160,11 +160,11 @@ Nested schema for **filters**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -176,7 +176,7 @@ Nested schema for **filters**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -184,7 +184,7 @@ Nested schema for **filters**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `five_minutes` - (List) Auto refresh interval is set to five minutes. Nested schema for **five_minutes**: @@ -197,7 +197,7 @@ Nested schema for **folder_id**: * `folder_path` - (List) Path of the folder containing the dashboard. Nested schema for **folder_path**: * `segments` - (List) The segments of the folder path. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `href` - (String) Unique identifier for the dashboard. * Constraints: The maximum length is `21` characters. The minimum length is `21` characters. The value must match regular expression `/^[a-zA-Z0-9]{21}$/`. @@ -208,7 +208,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **sections**: * `href` - (String) The unique identifier of the section within the layout. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (List) Unique identifier of the folder containing the dashboard. Nested schema for **id**: * `value` - (String) The UUID value. @@ -220,7 +220,7 @@ Nested schema for **layout**: Nested schema for **appearance**: * `height` - (Integer) The height of the row. * `href` - (String) The unique identifier of the row within the layout. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (List) Unique identifier of the folder containing the dashboard. Nested schema for **id**: * `value` - (String) The UUID value. @@ -234,7 +234,7 @@ Nested schema for **layout**: * `bar_chart` - (List) Bar chart widget. Nested schema for **bar_chart**: * `color_scheme` - (String) Supported vaues: classic, severity, cold, negative, green, red, blue. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `colors_by` - (List) Coloring mode. Nested schema for **colors_by**: * `aggregation` - (List) Each aggregation will have different color and stack color will be derived from aggregation color. @@ -246,7 +246,7 @@ Nested schema for **layout**: * `data_mode_type` - (String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `group_name_template` - (String) Template for bar labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `max_bars_per_chart` - (Integer) Maximum number of bars to present in the chart. * `query` - (List) Data source specific query, defines from where and how to fetch the data. Nested schema for **query**: @@ -255,7 +255,7 @@ Nested schema for **layout**: * `dataprime_query` - (List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) Extra filter on top of the Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -264,7 +264,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -278,7 +278,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -286,11 +286,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -302,7 +302,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -310,11 +310,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `stacked_group_name` - (String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `logs` - (List) Logs specific query. Nested schema for **logs**: * `aggregation` - (List) Aggregations. @@ -324,7 +324,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (List) Count the number of entries. @@ -334,7 +334,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (List) Calculate maximum value of log field. @@ -342,7 +342,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (List) Calculate minimum value of log field. @@ -350,7 +350,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (List) Calculate percentile value of log field. @@ -358,7 +358,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Float) Value in range (0, 100]. @@ -367,7 +367,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (List) Extra filter on top of Lucene query. @@ -376,7 +376,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -390,7 +390,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -398,22 +398,22 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names_fields` - (List) Fiel to group by. * Constraints: The maximum length is `2` items. The minimum length is `1` item. Nested schema for **group_names_fields**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name_field` - (List) Field to count distinct values of. Nested schema for **stacked_group_name_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (List) Metrics specific query. @@ -422,7 +422,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -434,7 +434,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -442,15 +442,15 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (List) Labels to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name` - (String) Label to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `scale_type` - (String) Scale type. * Constraints: Allowable values are: `unspecified`, `linear`, `logarithmic`. * `sort_by` - (String) Sorting mode. @@ -459,7 +459,7 @@ Nested schema for **layout**: Nested schema for **stack_definition**: * `max_slices_per_bar` - (Integer) Maximum number of slices per bar. * `stack_name_template` - (String) Template for stack slice label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `x_axis` - (List) X axis mode. @@ -477,14 +477,14 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **columns**: * `field` - (String) References a field in result set. In case of aggregation, it references the aggregation identifier. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `width` - (Integer) Column width. * `data_mode_type` - (String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `order_by` - (List) Column used for ordering the results. Nested schema for **order_by**: * `field` - (String) The field to order by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order_direction` - (String) The direction of the order: ascending or descending. * Constraints: Allowable values are: `unspecified`, `asc`, `desc`. * `query` - (List) Data source specific query, defines from where and how to fetch the data. @@ -494,7 +494,7 @@ Nested schema for **layout**: * `dataprime_query` - (List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) Extra filtering on top of the Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -503,7 +503,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -517,7 +517,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -525,11 +525,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -541,7 +541,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -549,7 +549,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs` - (List) Logs specific query. Nested schema for **logs**: * `filters` - (List) Extra filtering on top of the Lucene query. @@ -558,7 +558,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -572,7 +572,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -580,7 +580,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `grouping` - (List) Grouping and aggregation. Nested schema for **grouping**: * `aggregations` - (List) Aggregations. @@ -593,7 +593,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (List) Count the number of entries. @@ -603,7 +603,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (List) Calculate maximum value of log field. @@ -611,7 +611,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (List) Calculate minimum value of log field. @@ -619,7 +619,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (List) Calculate percentile value of log field. @@ -627,7 +627,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Float) Value in range (0, 100]. @@ -636,32 +636,32 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `id` - (String) Aggregation identifier, must be unique within grouping configuration. - * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `is_visible` - (Boolean) Whether the aggregation is visible. * `name` - (String) Aggregation name, used as column name. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `group_bys` - (List) Fields to group by. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **group_bys**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metrics` - (List) Metrics specific query. Nested schema for **metrics**: * `filters` - (List) Extra filtering on top of the PromQL query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -673,7 +673,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -681,11 +681,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `results_per_page` - (Integer) Number of results per page. * `row_style` - (String) Display style for table rows. * Constraints: Allowable values are: `unspecified`, `one_line`, `two_line`, `condensed`, `json`, `list`. @@ -702,7 +702,7 @@ Nested schema for **layout**: * `dataprime_query` - (List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) Extra filters applied on top of Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -711,7 +711,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -725,7 +725,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -733,11 +733,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -749,7 +749,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -757,7 +757,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs` - (List) Logs specific query. Nested schema for **logs**: * `filters` - (List) Extra filters applied on top of Lucene query. @@ -766,7 +766,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -780,7 +780,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -788,7 +788,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs_aggregation` - (List) Aggregations. Nested schema for **logs_aggregation**: * `average` - (List) Calculate average value of log field. @@ -796,7 +796,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (List) Count the number of entries. @@ -806,7 +806,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (List) Calculate maximum value of log field. @@ -814,7 +814,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (List) Calculate minimum value of log field. @@ -822,7 +822,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (List) Calculate percentile value of log field. @@ -830,7 +830,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Float) Value in range (0, 100]. @@ -839,13 +839,13 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metrics` - (List) Metrics specific query. Nested schema for **metrics**: * `aggregation` - (String) Aggregation. When AGGREGATION_UNSPECIFIED is selected, widget uses instant query. Otherwise, it uses range query. @@ -854,7 +854,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -866,7 +866,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -874,11 +874,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `show_inner_arc` - (Boolean) Show inner arc (styling). * `show_outer_arc` - (Boolean) Show outer arc (styling). * `threshold_by` - (String) What threshold color should be applied to: value or background. @@ -887,14 +887,14 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **thresholds**: * `color` - (String) Color. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `from` - (Float) Value at which the color should change. * `unit` - (String) Query result value interpretation. * Constraints: Allowable values are: `unspecified`, `number`, `percent`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `horizontal_bar_chart` - (List) Horizontal bar chart widget. Nested schema for **horizontal_bar_chart**: * `color_scheme` - (String) Color scheme name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `colors_by` - (List) Coloring mode. Nested schema for **colors_by**: * `aggregation` - (List) Each aggregation will have different color and stack color will be derived from aggregation color. @@ -907,7 +907,7 @@ Nested schema for **layout**: * Constraints: Allowable values are: `high_unspecified`, `archive`. * `display_on_bar` - (Boolean) Whether to display values on the bars. * `group_name_template` - (String) Template for bar labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `max_bars_per_chart` - (Integer) Maximum number of bars to display in the chart. * `query` - (List) Data source specific query, defines from where and how to fetch the data. Nested schema for **query**: @@ -916,7 +916,7 @@ Nested schema for **layout**: * `dataprime_query` - (List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) Extra filter on top of the Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -925,7 +925,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -939,7 +939,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -947,11 +947,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -963,7 +963,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -971,11 +971,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `stacked_group_name` - (String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `logs` - (List) Logs specific query. Nested schema for **logs**: * `aggregation` - (List) Aggregations. @@ -985,7 +985,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (List) Count the number of entries. @@ -995,7 +995,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (List) Calculate maximum value of log field. @@ -1003,7 +1003,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (List) Calculate minimum value of log field. @@ -1011,7 +1011,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (List) Calculate percentile value of log field. @@ -1019,7 +1019,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Float) Value in range (0, 100]. @@ -1028,7 +1028,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (List) Extra filter on top of the Lucene query. @@ -1037,7 +1037,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -1051,7 +1051,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1059,22 +1059,22 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names_fields` - (List) Fields to group by. * Constraints: The maximum length is `2` items. The minimum length is `1` item. Nested schema for **group_names_fields**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name_field` - (List) Field to count distinct values of. Nested schema for **stacked_group_name_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (List) Metrics specific query. @@ -1083,7 +1083,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -1095,7 +1095,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1103,15 +1103,15 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (List) Labels to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name` - (String) Label to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `scale_type` - (String) Scale type. * Constraints: Allowable values are: `unspecified`, `linear`, `logarithmic`. * `sort_by` - (String) Sorting mode. @@ -1120,7 +1120,7 @@ Nested schema for **layout**: Nested schema for **stack_definition**: * `max_slices_per_bar` - (Integer) Maximum number of slices per bar. * `stack_name_template` - (String) Template for stack slice label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `y_axis_view_by` - (List) Y-axis view mode. @@ -1141,14 +1141,14 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **query_definitions**: * `color_scheme` - (String) Color scheme for the series. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `data_mode_type` - (String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `id` - (String) Unique identifier of the query within the widget. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `is_visible` - (Boolean) Whether data for this query should be visible on the chart. * `name` - (String) Query name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `query` - (List) Data source specific query, defines from where and how to fetch the data. Nested schema for **query**: * `dataprime` - (List) Dataprime language based query. @@ -1156,7 +1156,7 @@ Nested schema for **layout**: * `dataprime_query` - (List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) Filters to be applied to query results. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -1165,7 +1165,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -1179,7 +1179,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1187,11 +1187,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -1203,7 +1203,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1211,7 +1211,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs` - (List) Logs specific query. Nested schema for **logs**: * `aggregations` - (List) Aggregations. @@ -1222,7 +1222,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (List) Count the number of entries. @@ -1232,7 +1232,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (List) Calculate maximum value of log field. @@ -1240,7 +1240,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (List) Calculate minimum value of log field. @@ -1248,7 +1248,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (List) Calculate percentile value of log field. @@ -1256,7 +1256,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Float) Value in range (0, 100]. @@ -1265,7 +1265,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (List) Extra filtering on top of the Lucene query. @@ -1274,7 +1274,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -1288,7 +1288,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1296,27 +1296,27 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (List) Group by fields (deprecated). - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_bys` - (List) Group by fields. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **group_bys**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metrics` - (List) Metrics specific query. Nested schema for **metrics**: * `filters` - (List) Filtering to be applied to query results. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -1328,7 +1328,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1336,11 +1336,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `resolution` - (List) Resolution of the data. Nested schema for **resolution**: * `buckets_presented` - (Integer) Maximum number of data points to fetch. @@ -1351,7 +1351,7 @@ Nested schema for **layout**: * `series_count_limit` - (String) Maximum number of series to display. * Constraints: The maximum length is `19` characters. The minimum length is `1` character. The value must match regular expression `/^-?\\d{1,19}$/`. * `series_name_template` - (String) Template for series name in legend and tooltip. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `stacked_line` - (String) Stacked lines. @@ -1364,17 +1364,17 @@ Nested schema for **layout**: * `markdown` - (List) Markdown widget. Nested schema for **markdown**: * `markdown_text` - (String) Markdown text to render. - * Constraints: The maximum length is `10000` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `10000` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `tooltip_text` - (String) Tooltip text on hover. - * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `pie_chart` - (List) Pie chart widget. Nested schema for **pie_chart**: * `color_scheme` - (String) Color scheme name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `data_mode_type` - (String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `group_name_template` - (String) Template for group labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `label_definition` - (List) Label settings. Nested schema for **label_definition**: * `is_visible` - (Boolean) Controls whether to show the label. @@ -1392,7 +1392,7 @@ Nested schema for **layout**: * `dataprime_query` - (List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) Extra filters on top of Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -1401,7 +1401,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -1415,7 +1415,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1423,11 +1423,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -1439,7 +1439,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1447,11 +1447,11 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `stacked_group_name` - (String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `logs` - (List) Logs specific query. Nested schema for **logs**: * `aggregation` - (List) Aggregations. @@ -1461,7 +1461,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (List) Count the number of entries. @@ -1471,7 +1471,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (List) Calculate maximum value of log field. @@ -1479,7 +1479,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (List) Calculate minimum value of log field. @@ -1487,7 +1487,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (List) Calculate percentile value of log field. @@ -1495,7 +1495,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Float) Value in range (0, 100]. @@ -1504,7 +1504,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (List) Extra filters on top of Lucene query. @@ -1513,7 +1513,7 @@ Nested schema for **layout**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (List) Operator to use for filtering the logs. @@ -1527,7 +1527,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1535,22 +1535,22 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names_fields` - (List) Fields to group by. * Constraints: The maximum length is `2` items. The minimum length is `1` item. Nested schema for **group_names_fields**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (List) Lucene query. Nested schema for **lucene_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name_field` - (List) Field to count distinct values of. Nested schema for **stacked_group_name_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (List) Metrics specific query. @@ -1559,7 +1559,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (List) Equality comparison. @@ -1571,7 +1571,7 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (List) Selection criteria for the non-equality comparison. @@ -1579,37 +1579,37 @@ Nested schema for **layout**: * `list` - (List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `promql_query` - (List) PromQL query. Nested schema for **promql_query**: * `value` - (String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name` - (String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `show_legend` - (Boolean) Controls whether to show the legend. * `stack_definition` - (List) Stack definition. Nested schema for **stack_definition**: * `max_slices_per_stack` - (Integer) Maximum number of slices per stack. * `stack_name_template` - (String) Template for stack labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `description` - (String) Widget description. - * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `href` - (String) Widget identifier within the dashboard. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (List) Unique identifier of the folder containing the dashboard. Nested schema for **id**: * `value` - (String) The UUID value. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `title` - (String) Widget title. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `updated_at` - (String) Last update timestamp. * `name` - (String) Display name of the dashboard. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `relative_time_frame` - (String) Relative time frame specifying a duration from the current time. * Constraints: The maximum length is `10` characters. The minimum length is `2` characters. The value must match regular expression `/^[0-9]+[smhdw]?$/`. @@ -1631,31 +1631,31 @@ Nested schema for **variables**: * `list` - (List) Specific values are selected. Nested schema for **list**: * `values` - (List) Selected values. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `source` - (List) Variable value source. Nested schema for **source**: * `constant_list` - (List) List of constant values. Nested schema for **constant_list**: * `values` - (List) List of constant values. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `logs_path` - (List) Unique values for a given logs path. Nested schema for **logs_path**: * `observation_field` - (List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metric_label` - (List) Unique values for a given metric label. Nested schema for **metric_label**: * `label` - (String) Metric label to source unique values from. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_name` - (String) Metric name to source unique values from. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `values_order_direction` - (String) The direction of the order: ascending or descending. * Constraints: Allowable values are: `unspecified`, `asc`, `desc`. * `display_name` - (String) Name used in variable UI. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `name` - (String) Name of the variable which can be used in templates. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_dashboard_folders.html.markdown b/website/docs/d/logs_dashboard_folders.html.markdown index e6beff78f4..b77e7da3d4 100644 --- a/website/docs/d/logs_dashboard_folders.html.markdown +++ b/website/docs/d/logs_dashboard_folders.html.markdown @@ -37,9 +37,9 @@ After your data source is created, you can read values from the following attrib * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **folders**: * `id` - (String) The dashboard folder ID, uuid. - * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `name` - (String) The dashboard folder name, required. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `parent_id` - (String) The dashboard folder parent ID, optional. If not set, the folder is a root folder, if set, the folder is a subfolder of the parent folder and needs to be a uuid. - * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_data_access_rules.html.markdown b/website/docs/d/logs_data_access_rules.html.markdown index d901a54f48..ba620f5910 100644 --- a/website/docs/d/logs_data_access_rules.html.markdown +++ b/website/docs/d/logs_data_access_rules.html.markdown @@ -43,7 +43,7 @@ Nested schema for **data_access_rules**: * `description` - (String) Optional Data Access Rule Description. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\-\\s]+$/`. * `display_name` - (String) Data Access Rule Display Name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (List) List of filters that the Data Access Rule is composed of. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: diff --git a/website/docs/d/logs_e2m.html.markdown b/website/docs/d/logs_e2m.html.markdown index 4b8445766e..8e97d3aaa8 100644 --- a/website/docs/d/logs_e2m.html.markdown +++ b/website/docs/d/logs_e2m.html.markdown @@ -28,7 +28,7 @@ You can specify the following arguments for this data source. * `instance_id` - (Required, String) Cloud Logs Instance GUID. * `region` - (Optional, String) Cloud Logs Instance Region. * `logs_e2m_id` - (Required, Forces new resource, String) ID of e2m to be deleted. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference @@ -36,7 +36,7 @@ After your data source is created, you can read values from the following attrib * `id` - The unique identifier of the logs_e2m. * `create_time` - (String) E2M create time. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `description` - (String) Description of the E2M. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\-\\s]+$/`. @@ -46,15 +46,15 @@ After your data source is created, you can read values from the following attrib * `logs_query` - (List) E2M logs query. Nested schema for **logs_query**: * `alias` - (String) Alias. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `applicationname_filters` - (List) Application name filters. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `lucene` - (String) Lucene query. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severity_filters` - (List) Severity type filters. * Constraints: Allowable list items are: `unspecified`, `debug`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `subsystemname_filters` - (List) Subsystem names filters. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metric_fields` - (List) E2M metric fields. * Constraints: The maximum length is `10` items. The minimum length is `0` items. @@ -74,9 +74,9 @@ Nested schema for **metric_fields**: * `sample_type` - (String) Sample type min/max. * Constraints: Allowable values are: `unspecified`, `min`, `max`. * `target_metric_name` - (String) Target metric field alias name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source_field` - (String) Source field. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `target_base_metric_name` - (String) Target metric field alias name. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[\\w\/-]+$/`. @@ -84,12 +84,12 @@ Nested schema for **metric_fields**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **metric_labels**: * `source_field` - (String) Metric label source field. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `target_label` - (String) Metric label target alias name. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[\\w\/-]+$/`. * `name` - (String) Name of the E2M. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `permutations` - (List) Represents the limit of the permutations and if the limit was exceeded. Nested schema for **permutations**: @@ -100,5 +100,5 @@ Nested schema for **permutations**: * Constraints: Allowable values are: `unspecified`, `logs2metrics`. * `update_time` - (String) E2M update time. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_e2ms.html.markdown b/website/docs/d/logs_e2ms.html.markdown index 98316b9f08..a014fa6586 100644 --- a/website/docs/d/logs_e2ms.html.markdown +++ b/website/docs/d/logs_e2ms.html.markdown @@ -36,7 +36,7 @@ After your data source is created, you can read values from the following attrib * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **events2metrics**: * `create_time` - (String) E2M create time. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `description` - (String) Description of the E2M. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\-\\s]+$/`. * `id` - (String) E2M unique ID, required on update requests. @@ -45,15 +45,15 @@ Nested schema for **events2metrics**: * `logs_query` - (List) E2M logs query. Nested schema for **logs_query**: * `alias` - (String) Alias. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `applicationname_filters` - (List) Application name filters. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `lucene` - (String) Lucene query. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severity_filters` - (List) Severity type filters. * Constraints: Allowable list items are: `unspecified`, `debug`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `subsystemname_filters` - (List) Subsystem names filters. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metric_fields` - (List) E2M metric fields. * Constraints: The maximum length is `10` items. The minimum length is `0` items. Nested schema for **metric_fields**: @@ -72,20 +72,20 @@ Nested schema for **events2metrics**: * `sample_type` - (String) Sample type min/max. * Constraints: Allowable values are: `unspecified`, `min`, `max`. * `target_metric_name` - (String) Target metric field alias name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source_field` - (String) Source field. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `target_base_metric_name` - (String) Target metric field alias name. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[\\w\/-]+$/`. * `metric_labels` - (List) E2M metric labels. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **metric_labels**: * `source_field` - (String) Metric label source field. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `target_label` - (String) Metric label target alias name. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[\\w\/-]+$/`. * `name` - (String) Name of the E2M. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `permutations` - (List) Represents the limit of the permutations and if the limit was exceeded. Nested schema for **permutations**: * `has_exceeded_limit` - (Boolean) Flag to indicate if limit was exceeded. @@ -93,5 +93,5 @@ Nested schema for **events2metrics**: * `type` - (String) E2M type. * Constraints: Allowable values are: `unspecified`, `logs2metrics`. * `update_time` - (String) E2M update time. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_enrichments.html.markdown b/website/docs/d/logs_enrichments.html.markdown index 7b5a9cbbd0..a76a8c8a53 100644 --- a/website/docs/d/logs_enrichments.html.markdown +++ b/website/docs/d/logs_enrichments.html.markdown @@ -46,7 +46,7 @@ Nested schema for **enrichments**: * `suspicious_ip` - (List) The suspicious ip enrichment. Nested schema for **suspicious_ip**: * `field_name` - (String) The enrichment field name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (Integer) The enrichment ID. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. diff --git a/website/docs/d/logs_outgoing_webhook.html.markdown b/website/docs/d/logs_outgoing_webhook.html.markdown index f4b4a0a9f1..05bca20040 100644 --- a/website/docs/d/logs_outgoing_webhook.html.markdown +++ b/website/docs/d/logs_outgoing_webhook.html.markdown @@ -46,12 +46,12 @@ Nested schema for **ibm_event_notifications**: * `region_id` - (String) The region ID of the selected IBM Event Notifications instance. * Constraints: The maximum length is `4096` characters. The minimum length is `4` characters. The value must match regular expression `/^[a-z]{2}-[a-z]+$/`. * `source_id` - (String) The ID of the created source in the IBM Event Notifications instance. Corresponds to the Cloud Logs instance crn. Not required when creating an Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source_name` - (String) The name of the created source in the IBM Event Notifications instance. Not required when creating an Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `name` - (String) The name of the Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `type` - (String) The type of the deployed Outbound Integrations to list. * Constraints: Allowable values are: `ibm_event_notifications`. @@ -59,5 +59,5 @@ Nested schema for **ibm_event_notifications**: * `updated_at` - (String) The update time of the Outbound Integration. * `url` - (String) The URL of the Outbound Integration. Null for IBM Event Notifications integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_outgoing_webhooks.html.markdown b/website/docs/d/logs_outgoing_webhooks.html.markdown index a1cae70f63..269ef8dbfa 100644 --- a/website/docs/d/logs_outgoing_webhooks.html.markdown +++ b/website/docs/d/logs_outgoing_webhooks.html.markdown @@ -44,8 +44,8 @@ Nested schema for **outgoing_webhooks**: * `id` - (String) The ID of the Outbound Integration. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) The name of the Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `updated_at` - (String) The update time of the Outbound Integration. * `url` - (String) The URL of the Outbound Integration. Null for IBM Event Notifications integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_policies.html.markdown b/website/docs/d/logs_policies.html.markdown index d195ff9077..3e1e3057a8 100644 --- a/website/docs/d/logs_policies.html.markdown +++ b/website/docs/d/logs_policies.html.markdown @@ -43,7 +43,7 @@ Nested schema for **policies**: * `application_rule` - (List) Rule for matching with application. Nested schema for **application_rule**: * `name` - (String) Value of the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_type_id` - (String) Identifier of the rule. * Constraints: Allowable values are: `unspecified`, `is`, `is_not`, `start_with`, `includes`. * `archive_retention` - (List) Archive retention definition. @@ -64,14 +64,14 @@ Nested schema for **policies**: * `severities` - (List) Source severities to match with. * Constraints: Allowable list items are: `unspecified`, `debug`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `name` - (String) Name of policy. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Integer) Order of policy in relation to other policies. * `priority` - (String) The data pipeline sources that match the policy rules will go through. * Constraints: Allowable values are: `type_unspecified`, `type_block`, `type_low`, `type_medium`, `type_high`. * `subsystem_rule` - (List) Rule for matching with application. Nested schema for **subsystem_rule**: * `name` - (String) Value of the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_type_id` - (String) Identifier of the rule. * Constraints: Allowable values are: `unspecified`, `is`, `is_not`, `start_with`, `includes`. * `updated_at` - (String) Updated at date at utc+0. diff --git a/website/docs/d/logs_policy.html.markdown b/website/docs/d/logs_policy.html.markdown index 3200eca0a8..d5993ffdb9 100644 --- a/website/docs/d/logs_policy.html.markdown +++ b/website/docs/d/logs_policy.html.markdown @@ -38,7 +38,7 @@ After your data source is created, you can read values from the following attrib * `application_rule` - (List) Rule for matching with application. Nested schema for **application_rule**: * `name` - (String) Value of the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_type_id` - (String) Identifier of the rule. * Constraints: Allowable values are: `unspecified`, `is`, `is_not`, `start_with`, `includes`. @@ -65,7 +65,7 @@ Nested schema for **log_rules**: * Constraints: Allowable list items are: `unspecified`, `debug`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `name` - (String) Name of policy. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Integer) Order of policy in relation to other policies. @@ -75,7 +75,7 @@ Nested schema for **log_rules**: * `subsystem_rule` - (List) Rule for matching with application. Nested schema for **subsystem_rule**: * `name` - (String) Value of the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_type_id` - (String) Identifier of the rule. * Constraints: Allowable values are: `unspecified`, `is`, `is_not`, `start_with`, `includes`. diff --git a/website/docs/d/logs_rule_group.html.markdown b/website/docs/d/logs_rule_group.html.markdown index 3f68821d3f..77c64071ff 100644 --- a/website/docs/d/logs_rule_group.html.markdown +++ b/website/docs/d/logs_rule_group.html.markdown @@ -36,12 +36,12 @@ After your data source is created, you can read values from the following attrib * `id` - The unique identifier of the logs_rule_group. * `description` - (String) A description for the rule group, should express what is the rule group purpose. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `enabled` - (Boolean) Whether or not the rule is enabled. * `name` - (String) The name of the rule group. - * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Integer) // The order in which the rule group will be evaluated. The lower the order, the more priority the group will have. Not providing the order will by default create a group with the last order. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. @@ -52,7 +52,7 @@ Nested schema for **rule_matchers**: * `application_name` - (List) ApplicationName constraint. Nested schema for **application_name**: * `value` - (String) Only logs with this ApplicationName value will match. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severity` - (List) Severity constraint. Nested schema for **severity**: * `value` - (String) Only logs with this severity value will match. @@ -60,7 +60,7 @@ Nested schema for **rule_matchers**: * `subsystem_name` - (List) SubsystemName constraint. Nested schema for **subsystem_name**: * `value` - (String) Only logs with this SubsystemName value will match. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_subgroups` - (List) Rule subgroups. Will try to execute the first rule subgroup, and if not matched will try to match the next one in order. * Constraints: The maximum length is `4096` items. The minimum length is `1` item. @@ -79,7 +79,7 @@ Nested schema for **rule_subgroups**: * `id` - (String) Unique identifier of the rule. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) Name of the rule. - * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Integer) The ordering of the rule subgroup. Lower order will run first. 0 is considered as no value. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `parameters` - (List) Parameters for a rule which specifies how it should run. @@ -112,31 +112,31 @@ Nested schema for **rule_subgroups**: Nested schema for **json_parse_parameters**: * `delete_source` - (Boolean) Whether or not to delete the source field after running this rule. * `destination_field` - (String) Destination field under which to put the json object. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `override_dest` - (Boolean) Destination field in which to put the json stringified content. * `json_stringify_parameters` - (List) Parameters for json stringify rule. Nested schema for **json_stringify_parameters**: * `delete_source` - (Boolean) Whether or not to delete the source field after running this rule. * `destination_field` - (String) Destination field in which to put the json stringified content. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `parse_parameters` - (List) Parameters for parse rule. Nested schema for **parse_parameters**: * `destination_field` - (String) In which field to put the parsed text. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule` - (String) Regex which will parse the source field and extract the json keys from it while removing the source field. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `remove_fields_parameters` - (List) Parameters for remove fields rule. Nested schema for **remove_fields_parameters**: * `fields` - (List) Json field paths to drop from the log. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `replace_parameters` - (List) Parameters for replace rule. Nested schema for **replace_parameters**: * `destination_field` - (String) In which field to put the modified text. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `replace_new_val` - (String) The value to replace the matched text with. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `rule` - (String) Regex which will match parts in the text to replace. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `source_field` - (String) A field on which value to execute the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_rule_groups.html.markdown b/website/docs/d/logs_rule_groups.html.markdown index 0090cef0c8..0bfab8fcaa 100644 --- a/website/docs/d/logs_rule_groups.html.markdown +++ b/website/docs/d/logs_rule_groups.html.markdown @@ -36,12 +36,12 @@ After your data source is created, you can read values from the following attrib * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **rulegroups**: * `description` - (String) A description for the rule group, should express what is the rule group purpose. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `enabled` - (Boolean) Whether or not the rule is enabled. * `id` - (String) The ID of the rule group. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) The name of the rule group. - * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Integer) // The order in which the rule group will be evaluated. The lower the order, the more priority the group will have. Not providing the order will by default create a group with the last order. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `rule_matchers` - (List) // Optional rule matchers which if matched will make the rule go through the rule group. @@ -50,7 +50,7 @@ Nested schema for **rulegroups**: * `application_name` - (List) ApplicationName constraint. Nested schema for **application_name**: * `value` - (String) Only logs with this ApplicationName value will match. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severity` - (List) Severity constraint. Nested schema for **severity**: * `value` - (String) Only logs with this severity value will match. @@ -58,7 +58,7 @@ Nested schema for **rulegroups**: * `subsystem_name` - (List) SubsystemName constraint. Nested schema for **subsystem_name**: * `value` - (String) Only logs with this SubsystemName value will match. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_subgroups` - (List) Rule subgroups. Will try to execute the first rule subgroup, and if not matched will try to match the next one in order. * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **rule_subgroups**: @@ -76,7 +76,7 @@ Nested schema for **rulegroups**: * `id` - (String) Unique identifier of the rule. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) Name of the rule. - * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Integer) The ordering of the rule subgroup. Lower order will run first. 0 is considered as no value. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `parameters` - (List) Parameters for a rule which specifies how it should run. @@ -109,31 +109,31 @@ Nested schema for **rulegroups**: Nested schema for **json_parse_parameters**: * `delete_source` - (Boolean) Whether or not to delete the source field after running this rule. * `destination_field` - (String) Destination field under which to put the json object. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `override_dest` - (Boolean) Destination field in which to put the json stringified content. * `json_stringify_parameters` - (List) Parameters for json stringify rule. Nested schema for **json_stringify_parameters**: * `delete_source` - (Boolean) Whether or not to delete the source field after running this rule. * `destination_field` - (String) Destination field in which to put the json stringified content. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `parse_parameters` - (List) Parameters for parse rule. Nested schema for **parse_parameters**: * `destination_field` - (String) In which field to put the parsed text. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule` - (String) Regex which will parse the source field and extract the json keys from it while removing the source field. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `remove_fields_parameters` - (List) Parameters for remove fields rule. Nested schema for **remove_fields_parameters**: * `fields` - (List) Json field paths to drop from the log. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `replace_parameters` - (List) Parameters for replace rule. Nested schema for **replace_parameters**: * `destination_field` - (String) In which field to put the modified text. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `replace_new_val` - (String) The value to replace the matched text with. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `rule` - (String) Regex which will match parts in the text to replace. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `source_field` - (String) A field on which value to execute the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_view.html.markdown b/website/docs/d/logs_view.html.markdown index dc55c6c3e2..a32bf0fd3b 100644 --- a/website/docs/d/logs_view.html.markdown +++ b/website/docs/d/logs_view.html.markdown @@ -40,19 +40,19 @@ Nested schema for **filters**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **filters**: * `name` - (String) Filter name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `selected_values` - (Map) Filter selected values. * `folder_id` - (String) View folder ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) View name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `search_query` - (List) View search query. Nested schema for **search_query**: * `query` - (String) View search query. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `time_selection` - (List) View time selection. Nested schema for **time_selection**: @@ -63,7 +63,7 @@ Nested schema for **time_selection**: * `quick_selection` - (List) Quick time selection. Nested schema for **quick_selection**: * `caption` - (String) Quick time selection caption. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `seconds` - (Integer) Quick time selection amount of seconds. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. diff --git a/website/docs/d/logs_view_folder.html.markdown b/website/docs/d/logs_view_folder.html.markdown index 58faf27aac..bf200a485f 100644 --- a/website/docs/d/logs_view_folder.html.markdown +++ b/website/docs/d/logs_view_folder.html.markdown @@ -36,5 +36,5 @@ After your data source is created, you can read values from the following attrib * `id` - The unique identifier of the logs_view_folder. * `name` - (String) Folder name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_view_folders.html.markdown b/website/docs/d/logs_view_folders.html.markdown index 9d7d731497..15b40d80d8 100644 --- a/website/docs/d/logs_view_folders.html.markdown +++ b/website/docs/d/logs_view_folders.html.markdown @@ -38,5 +38,5 @@ Nested schema for **view_folders**: * `id` - (String) Folder ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (String) Folder name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. diff --git a/website/docs/d/logs_views.html.markdown b/website/docs/d/logs_views.html.markdown index 6591e61da8..8c3cf6e234 100644 --- a/website/docs/d/logs_views.html.markdown +++ b/website/docs/d/logs_views.html.markdown @@ -41,17 +41,17 @@ Nested schema for **views**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **filters**: * `name` - (String) Filter name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `selected_values` - (Map) Filter selected values. * `folder_id` - (String) View folder ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `id` - (Integer) View ID. * `name` - (String) View name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `search_query` - (List) View search query. Nested schema for **search_query**: * `query` - (String) View search query. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `time_selection` - (List) View time selection. Nested schema for **time_selection**: * `custom_selection` - (List) Custom time selection. @@ -61,7 +61,7 @@ Nested schema for **views**: * `quick_selection` - (List) Quick time selection. Nested schema for **quick_selection**: * `caption` - (String) Quick time selection caption. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `seconds` - (Integer) Quick time selection amount of seconds. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. diff --git a/website/docs/d/pi_catalog_images.html.markdown b/website/docs/d/pi_catalog_images.html.markdown index 291bf88579..0775b6a751 100644 --- a/website/docs/d/pi_catalog_images.html.markdown +++ b/website/docs/d/pi_catalog_images.html.markdown @@ -48,6 +48,7 @@ In addition to the argument reference list, you can access the following attribu - `architecture` - (String) The CPU architecture that the image is designed for. - `container_format` - (String) The container format. - `creation_date` - (String) Date of image creation. + - `crn` - (String) The CRN of this resource. - `description` - (String) The description of an image. - `disk_format` - (String) The disk format. - `endianness` - (String) The `Endianness` order. diff --git a/website/docs/d/pi_cloud_instance.html.markdown b/website/docs/d/pi_cloud_instance.html.markdown index 32dcbb43ba..957ce3fdac 100644 --- a/website/docs/d/pi_cloud_instance.html.markdown +++ b/website/docs/d/pi_cloud_instance.html.markdown @@ -45,11 +45,13 @@ In addition to the argument reference list, you can access the following attribu Nested scheme for `pvm_instances`: - `creation_date` - (String) Date of PVM instance creation. + - `crn` - (String) The CRN of this resource. - `href` - (String) Link to Cloud Instance resource. - `id` - (String) PVM Instance ID. - `name` - (String) Name of the server. - `status` - (String) The status of the instance. - `systype` - (string) System type used to host the instance. + - `user_tags` - (List) List of user tags attached to the resource. - `region` - (String) The region the cloud instance lives. - `tenant_id` - (String) The tenant ID that owns this cloud instance. - `total_instances` - (String) The count of lpars that belong to this specific cloud instance. diff --git a/website/docs/d/pi_datacenter.html.markdown b/website/docs/d/pi_datacenter.html.markdown index 9ce0e5a12a..1aefd065ec 100644 --- a/website/docs/d/pi_datacenter.html.markdown +++ b/website/docs/d/pi_datacenter.html.markdown @@ -7,22 +7,26 @@ description: |- --- # ibm_pi_datacenter + Retrieve information about a Power Systems Datacenter. ## Example usage + ```terraform data "ibm_pi_datacenter" "datacenter" { pi_datacenter_zone= "dal12" } ``` -**Notes** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: + ```terraform provider "ibm" { region = "lon" @@ -31,13 +35,49 @@ Example usage: ``` ## Argument reference + Review the argument references that you can specify for your data source. - `pi_datacenter_zone` - (Optional, String) Datacenter zone you want to retrieve. If no value is supplied, the `zone` configured within the IBM provider will be utilized. ## Attribute reference + In addition to all argument reference list, you can access the following attribute references after your data source is created. +- `capability_details` - (List) Additional Datacenter Capability Details. + + Nested schema for `capability_details`: + - `disaster_recovery` - (List) Disaster Recovery Information. + + Nested schema for `disaster_recovery`: + - `replication_services`- (List) Replication services. + + Nested schema for `replication_services`: + - `asynchronous_replication` - (List) Asynchronous Replication Target Information. + + Nested schema for `asynchronous_replication`: + - `enabled` - (Boolean) Service Enabled. + - `target_locations` - (List) List of all replication targets. + + Nested schema for `target_locations`: + - `region` - (String) regionZone of replication site. + - `status` - (String) the replication site is `active` or `down`. + - `synchronous_replication` - (List) Synchronous Replication Target Information. + + Nested schema for `synchronous_replication`: + - `enabled` - (Boolean) Service Enabled. + - `target_locations` - (List) List of all replication targets. + + Nested schema for `target_locations`: + - `region` - (String) regionZone of replication site. + - `status` - (String) the replication site is `active` or `down`. + + - `supported_systems` - (List) Datacenter System Types Information. + + Nested schema for `supported_systems`: + - `dedicated` - (List) List of all available dedicated host types. + - `general` - (List) List of all available host types. + - `pi_datacenter_capabilities` - (Map) Datacenter Capabilities. Capabilities are `true` or `false`. Some of `pi_datacenter_capabilities` are: diff --git a/website/docs/d/pi_datacenters.html.markdown b/website/docs/d/pi_datacenters.html.markdown index 6efbda05e9..cbaa14f0ee 100644 --- a/website/docs/d/pi_datacenters.html.markdown +++ b/website/docs/d/pi_datacenters.html.markdown @@ -36,17 +36,49 @@ In addition to all argument reference list, you can access the following attribu - `datacenters` - (List) List of Datacenters Nested schema for `datacenters` + - `capability_details` - (List) Additional Datacenter Capability Details. + + Nested schema for `capability_details`: + - `disaster_recovery` - (List) Disaster Recovery Information. + + Nested schema for `disaster_recovery`: + - `replication_services`- (List) Replication services. + + Nested schema for `replication_services`: + - `asynchronous_replication` - (List) Asynchronous Replication Target Information. + + Nested schema for `asynchronous_replication`: + - `enabled` - (Boolean) Service Enabled. + - `target_locations` - (List) List of all replication targets. + + Nested schema for `target_locations`: + - `region` - (String) regionZone of replication site. + - `status` - (String) the replication site is `active` or `down`. + - `synchronous_replication` - (List) Synchronous Replication Target Information. + + Nested schema for `synchronous_replication`: + - `enabled` - (Boolean) Service Enabled. + - `target_locations` - (List) List of all replication targets. + + Nested schema for `target_locations`: + - `region` - (String) regionZone of replication site. + - `status` - (String) the replication site is `active` or `down`. + + - `supported_systems` - (List) Datacenter System Types Information. + + Nested schema for `supported_systems`: + - `dedicated` - (List) List of all available dedicated host types. + - `general` - (List) List of all available host types. - `pi_datacenter_capabilities` - (Map) Datacenter Capabilities. Capabilities are `true` or `false`. - Some of `pi_datacenter_capabilities` are: - - `cloud-connections`, `disaster-recovery-site`, `metrics`, `power-edge-router`, `power-vpn-connections` - + Some of `pi_datacenter_capabilities` are: + - `cloud-connections`, `disaster-recovery-site`, `metrics`, `power-edge-router`, `power-vpn-connections` - `pi_datacenter_href` - (String) Datacenter href. - `pi_datacenter_location` - (Map) Datacenter location. - Nested schema for `pi_datacenter_location`: - - `region` - (String) Datacenter location region zone. - - `type` - (String) Datacenter location region type. - - `url`- (String) Datacenter location region url. + Nested schema for `pi_datacenter_location`: + - `region` - (String) Datacenter location region zone. + - `type` - (String) Datacenter location region type. + - `url`- (String) Datacenter location region url. - `pi_datacenter_status` - (String) Datacenter status, `active`,`maintenance` or `down`. - `pi_datacenter_type` - (String) Datacenter type, `off-premises` or `on-premises`. diff --git a/website/docs/d/pi_disaster_recovery_location.html.markdown b/website/docs/d/pi_disaster_recovery_location.html.markdown index 60e7415568..e65e876926 100644 --- a/website/docs/d/pi_disaster_recovery_location.html.markdown +++ b/website/docs/d/pi_disaster_recovery_location.html.markdown @@ -46,3 +46,8 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `replication_sites`: - `is_active` - (Boolean) Indicates the location is active or not, `true` if location is active , otherwise it is `false`. - `location` - (String) The region zone of the location. + - `replication_pool_map` - (List) List of replication pool map. + + Nested scheme for `replication_pool_map`: + - `remote_pool` - (String) Remote pool. + - `volume_pool` - (String) Volume pool. diff --git a/website/docs/d/pi_disaster_recovery_locations.html.markdown b/website/docs/d/pi_disaster_recovery_locations.html.markdown index 6ed0e43817..ab32d04a68 100644 --- a/website/docs/d/pi_disaster_recovery_locations.html.markdown +++ b/website/docs/d/pi_disaster_recovery_locations.html.markdown @@ -38,7 +38,12 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `disaster_recovery_locations`: - `location` - (String) The region zone of a site. - `replication_sites` - List of Replication Sites. - + Nested scheme for `replication_sites`: - `is_active` - (Boolean) Indicates the location is active or not, `true` if location is active, otherwise it is `false`. - `location` - (String) The region zone of the location. + - `replication_pool_map` - (List) List of replication pool maps. + + Nested scheme for `replication_pool_map`: + - `remote_pool` - (String) Remote pool. + - `volume_pool` - (String) Volume pool. diff --git a/website/docs/d/pi_image.html.markdown b/website/docs/d/pi_image.html.markdown index 2fcb988d4c..5dccdac60d 100644 --- a/website/docs/d/pi_image.html.markdown +++ b/website/docs/d/pi_image.html.markdown @@ -11,6 +11,7 @@ description: |- Import the details of an existing IBM Power Virtual Server Cloud image as a read-only data source. For more information, about IBM power virtual server cloud, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage + ```terraform data "ibm_pi_image" "ds_image" { pi_image_name = "7200-03-03" @@ -18,13 +19,15 @@ data "ibm_pi_image" "ds_image" { } ``` -**Notes** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: + ```terraform provider "ibm" { region = "lon" @@ -33,20 +36,25 @@ Example usage: ``` ## Argument reference -Review the argument references that you can specify for your data source. -- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +Review the argument references that you can specify for your data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_image_name` - (Required, String) The ID of the image. To find supported images, run the `ibmcloud pi images` command. ## Attribute reference -In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `architecture` - (String) The CPU architecture that the image is designed for. +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `architecture` - (String) The CPU architecture that the image is designed for. +- `crn` - (String) The CRN of this resource. - `hypervisor` - (String) Hypervisor type. - `id` - (String) The unique identifier of the image. - `image_type` - (String) The identifier of this image type. - `operating_system` - (String) The operating system that is installed with the image. - `size` - (String) The size of the image in megabytes. -- `state` - (String) The state for this image. -- `storage_type` - (String) The storage type for this image. +- `source_checksum` - (String) Checksum of the image. +- `state` - (String) The state for this image. - `storage_pool` - (String) Storage pool where image resides. +- `storage_type` - (String) The storage type for this image. +- `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_images.html.markdown b/website/docs/d/pi_images.html.markdown index 617207e159..b8824fc4b9 100644 --- a/website/docs/d/pi_images.html.markdown +++ b/website/docs/d/pi_images.html.markdown @@ -7,10 +7,12 @@ description: |- --- # ibm_pi_images + Retrieve a list of supported images that you can use in your Power Systems Virtual Server instance. The image represents the version of the operation system that is installed in your Power Systems Virtual Server instance. For more information, about power instance images, see [capturing and exporting a virtual machine (VM)](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-capturing-exporting-vm). ## Example usage -The following example retrieves all images for a cloud instance ID. + +The following example retrieves all images for a cloud instance ID. ```terraform data "ibm_pi_images" "ds_images" { @@ -18,13 +20,15 @@ data "ibm_pi_images" "ds_images" { } ``` -**Notes:** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: + ```terraform provider "ibm" { region = "lon" @@ -33,20 +37,25 @@ Example usage: ``` ## Argument reference -Review the argument references that you can specify for your data source. + +Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. ## Attribute reference + In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `image_info` - (List) List of all supported images. +- `image_info` - (List) List of all supported images. Nested scheme for `image_info`: + - `crn` - (String) The CRN of this resource. - `href` - (String) The hyper link of an image. - `id` - (String) The unique identifier of an image. - `image_type` - (String) The identifier of this image type. - `name`- (String) The name of an image. + - `source_checksum` - (String) Checksum of the image. - `state` - (String) The state of an image. - `storage_pool` - (String) Storage pool where image resides. - `storage_type` - (String) The storage type of an image. + - `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_instance.html.markdown b/website/docs/d/pi_instance.html.markdown index dfc470596e..1b5785f36f 100644 --- a/website/docs/d/pi_instance.html.markdown +++ b/website/docs/d/pi_instance.html.markdown @@ -46,6 +46,7 @@ Review the argument references that you can specify for your data source. In addition to all argument reference list, you can access the following attribute references after your data source is created. +- `crn` - (String) The CRN of this resource. - `deployment_type` - (String) The custom deployment type. - `fault` - (Map) Fault information, if any. @@ -91,8 +92,10 @@ In addition to all argument reference list, you can access the following attribu - `shared_processor_pool`- (String) The name of the shared processor pool for the instance. - `shared_processor_pool_id` - (String) The ID of the shared processor pool for the instance. - `status` - (String) The status of the instance. +- `storage_connection` - (String) The storage connection type for the instance. - `storage_pool` - (String) The storage Pool where server is deployed. - `storage_pool_affinity` - (Boolean) Indicates if all volumes attached to the server must reside in the same storage pool. - `storage_type` - (String) The storage type where server is deployed. +- `user_tags` - (List) List of user tags attached to the resource. - `virtual_cores_assigned` - (Integer) The virtual cores that are assigned to the instance. - `volumes` - (List) List of volume IDs that are attached to the instance. diff --git a/website/docs/d/pi_instance_snapshot.html.markdown b/website/docs/d/pi_instance_snapshot.html.markdown index ff1f7469d0..7c4412228b 100644 --- a/website/docs/d/pi_instance_snapshot.html.markdown +++ b/website/docs/d/pi_instance_snapshot.html.markdown @@ -42,9 +42,11 @@ In addition to all argument reference list, you can access the following attribu - `action` - (String) Action performed on the instance snapshot. - `creation_date` - (String) Date of snapshot creation. +- `crn` - (String) The CRN of this resource. - `description` - (String) The description of the snapshot. - `last_updated_date` - (String) Date of last update. - `name` - (String) The name of the Power Systems Virtual Machine instance snapshot. - `percent_complete` - (Integer) The snapshot completion percentage. - `status` - (String) The status of the Power Virtual Machine instance snapshot. +- `user_tags` - (List) List of user tags attached to the resource. - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. diff --git a/website/docs/d/pi_instance_snapshots.html.markdown b/website/docs/d/pi_instance_snapshots.html.markdown index 7d39c0d0d5..5c05a5039b 100644 --- a/website/docs/d/pi_instance_snapshots.html.markdown +++ b/website/docs/d/pi_instance_snapshots.html.markdown @@ -43,10 +43,12 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `instance_snapshots`: - `action` - (String) Action performed on the instance snapshot. - `creation_date` - (String) Date of snapshot creation. + - `crn` - (String) The CRN of this resource. - `description` - (String) The description of the snapshot. - `id` - (String) The unique identifier of the Power Systems Virtual Machine instance snapshot. - `last_updated_date` - (String) Date of last update. - `name` - (String) The name of the Power Systems Virtual Machine instance snapshot. - `percent_complete` - (Integer) The snapshot completion percentage. - `status` - (String) The status of the Power Virtual Machine instance snapshot. + - `user_tags` - (List) List of user tags attached to the resource. - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. diff --git a/website/docs/d/pi_instance_volumes.html.markdown b/website/docs/d/pi_instance_volumes.html.markdown index 16f17cfdb0..6d04358e7b 100644 --- a/website/docs/d/pi_instance_volumes.html.markdown +++ b/website/docs/d/pi_instance_volumes.html.markdown @@ -53,11 +53,18 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `instance_volumes`: - `bootable`- (Boolean) Indicates if the volume is boot capable. + - `creation_date` - (String) Date of volume creation. + - `crn` - (String) The CRN of this resource. + - `freeze_time` - (String) Time of remote copy relationship. - `href` - (String) The hyper link of the volume. - `id` - (String) The unique identifier of the volume. + - `last_update_date` - (String) The date when the volume last updated. - `name` - (String) The name of the volume. - `pool` - (String) Volume pool, name of storage pool where the volume is located. + - `replication_enabled` - (Boolean) Indicates whether replication is enabled on the volume. + - `replication_sites` - (List) List of replication sites for volume replication. - `shareable` - (Boolean) Indicates if the volume is shareable between VMs. - `size` - (Integer) The size of this volume in GB. - `state` - (String) The state of the volume. - `type` - (String) The disk type that is used for this volume. + - `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_instances.html.markdown b/website/docs/d/pi_instances.html.markdown index 0f987e6d0a..0897d28cf3 100644 --- a/website/docs/d/pi_instances.html.markdown +++ b/website/docs/d/pi_instances.html.markdown @@ -47,6 +47,7 @@ In addition to all argument reference list, you can access the following attribu - `pvm_instances` - (List) List of power virtual server instances for the respective cloud instance. Nested scheme for `pvm_instances`: + - `crn` - (String) The CRN of this resource. - `fault` - (Map) Fault information, if any. Nested scheme for `fault`: @@ -84,7 +85,9 @@ In addition to all argument reference list, you can access the following attribu - `shared_processor_pool`- (String) The name of the shared processor pool for the instance. - `shared_processor_pool_id` - (String) The ID of the shared processor pool for the instance. - `status` - (String) The status of the instance. + - `storage_connection` - (String) The storage connection type for the instance - `storage_pool` - (String) The storage Pool where server is deployed. - `storage_pool_affinity` - (Boolean) Indicates if all volumes attached to the server must reside in the same storage pool. - `storage_type` - (String) The storage type where server is deployed. + - `user_tags` - (List) List of user tags attached to the resource. - `virtual_cores_assigned` - (Integer) The virtual cores that are assigned to the instance. diff --git a/website/docs/d/pi_network.html.markdown b/website/docs/d/pi_network.html.markdown index 30635fd315..befcce7a1c 100644 --- a/website/docs/d/pi_network.html.markdown +++ b/website/docs/d/pi_network.html.markdown @@ -43,6 +43,7 @@ In addition to all argument reference list, you can access the following attribu - `access_config` - (String) The network communication configuration option of the network (for satellite locations only). - `available_ip_count` - (Float) The total number of IP addresses that you have in your network. - `cidr` - (String) The CIDR of the network. +- `crn` - (String) The CRN of this resource. - `dns`- (Set) The DNS Servers for the network. - `gateway` - (String) The network gateway that is attached to your network. - `id` - (String) The ID of the network. @@ -51,4 +52,5 @@ In addition to all argument reference list, you can access the following attribu - `type` - (String) The type of network. - `used_ip_count` - (Float) The number of used IP addresses. - `used_ip_percent` - (Float) The percentage of IP addresses used. +- `user_tags` - (List) List of user tags attached to the resource. - `vlan_id` - (String) The VLAN ID that the network is connected to. diff --git a/website/docs/d/pi_network_address_group.html.markdown b/website/docs/d/pi_network_address_group.html.markdown new file mode 100644 index 0000000000..8cb69018ed --- /dev/null +++ b/website/docs/d/pi_network_address_group.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_address_group" +description: |- + Get information about pi_network_address_group +subcategory: "Power Systems" +--- + +# ibm_pi_network_address_group + +Retrieves information about a network address group. + +## Example Usage + +```terraform + data "ibm_pi_network_address_group" "network_address_group" { + pi_cloud_instance_id = "" + pi_network_address_group_id = "" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +You can specify the following arguments for this data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_network_address_group_id` - (Required, String) The network address group id. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `crn` - (String) The network address group's crn. +- `id` - The unique identifier of the network address group. +- `members` - (List) The list of IP addresses in CIDR notation in the network address group. + + Nested schema for `members`: + - `cidr` - (String) The IP addresses in CIDR notation. + - `id` - (String) The id of the network address group member IP addresses. +- `name` - (String) The name of the network address group. +- `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_network_address_groups.html.markdown b/website/docs/d/pi_network_address_groups.html.markdown new file mode 100644 index 0000000000..9426ccdf0b --- /dev/null +++ b/website/docs/d/pi_network_address_groups.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_address_groups" +description: |- + Get information about pi_network_address_groups +subcategory: "Power Systems" +--- + +# ibm_pi_network_address_groups + +Retrieves information about a network address groups. + +## Example Usage + +```terraform + data "ibm_pi_network_address_groups" "network_address_groups" { + pi_cloud_instance_id = "" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument reference + +Review the argument references that you can specify for your data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `network_address_groups` - (List) List of network address groups. + + Nested schema for `network_address_groups`: + - `crn` - (String) The network address group's crn. + - `id` - (String) The id of the network address group. + - `members` - (List) The list of IP addresses in CIDR notation in the network address group. + + Nested schema for `members`: + - `cidr` - (String) The IP addresses in CIDR notation. + - `id` - (String) The id of the network address group member IP addresses. + - `name` - (String) The name of the network address group. + - `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_network_interface.html.markdown b/website/docs/d/pi_network_interface.html.markdown new file mode 100644 index 0000000000..069aca031f --- /dev/null +++ b/website/docs/d/pi_network_interface.html.markdown @@ -0,0 +1,63 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_interface" +description: |- + Get information about pi_network_interface +subcategory: "Power Systems" +--- + +# ibm_pi_network_interface + +Retrieves information about a network interface. + +## Example Usage + +```terraform + data "ibm_pi_network_interface" "network_interface" { + pi_cloud_instance_id = "" + pi_network_id = "network_id" + pi_network_interface_id = "network_interface_id" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +You can specify the following arguments for this data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_network_id` - (Required, String) network id. +- `pi_network_interface_id` - (Required, String) network interface id. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `crn` - (String) The network interface's crn. +- `id` - (String) The unique identifier of the network interface resource.The id is composed of `/` +- `instance` - (List) The attached instance to this Network Interface. + + Nested scheme for `instance`: + - `href` - (String) Link to instance resource. + - `instance_id` - (String) The attached instance id. +- `ip_address` - (String) The ip address of this network interface. +- `mac_address` - (String) The mac address of the network interface. +- `name` - (String) Name of the network interface (not unique or indexable). +- `network_interface_id` - (String) The unique identifier of the network interface. +- `status` - (String) The status of the network address group. +- `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_network_interfaces.html.markdown b/website/docs/d/pi_network_interfaces.html.markdown new file mode 100644 index 0000000000..f2112ab545 --- /dev/null +++ b/website/docs/d/pi_network_interfaces.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_interfaces" +description: |- + Get information about pi_network_interfaces +subcategory: "Power Systems" +--- + +# ibm_pi_network_interfaces + +Retrieve information about network interfaces. + +## Example Usage + +```terraform + data "ibm_pi_network_interfaces" "network_interfaces" { + pi_cloud_instance_id = "" + pi_network_id = "network_id" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +Review the argument references that you can specify for your data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. + +- `pi_network_id` - (Required, String) network id. + +## Attribute Reference + +In addition to all argument reference listed, you can access the following attribute references after your data source is created. + +- `id` - (String) The unique identifier of the pi_network_interfaces. +- `interfaces` - (List) network Interfaces. + + Nested scheme for `interfaces`: + - `crn` - (String) The network interface's crn. + - `id` - (String) The unique network interface id. + - `instance` - (List) The attached instance to this Network Interface. + + Nested scheme for `instance`: + - `href` - (String) Link to instance resource. + - `instance_id` - (String) The attached instance id. + - `ip_address` - (String) The ip address of this network interface. + - `mac_address` - (String) The mac address of the network interface. + - `name` - (String) Name of the network interface (not unique or indexable). + - `status` - (String) The status of the network address group. + - `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_network_security_group.html.markdown b/website/docs/d/pi_network_security_group.html.markdown new file mode 100644 index 0000000000..f3d285275d --- /dev/null +++ b/website/docs/d/pi_network_security_group.html.markdown @@ -0,0 +1,89 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_security_group" +description: |- + Get information about pi_network_security_group +subcategory: "Power Systems" +--- + +# ibm_pi_network_security_group + +Retrieves information about a network security group. + +## Example Usage + +```terraform + data "ibm_pi_network_security_group" "network_security_group" { + pi_cloud_instance_id = "" + pi_network_security_group_id = "" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +You can specify the following arguments for this data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_network_security_group_id` - (Required, String) network security group id or name. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `crn` - (String) The network security group's crn. + +- `members` - (List) The list of IPv4 addresses and\or network interfaces in the network security group. + + Nested schema for `members`: + - `id` - (String) The id of the member in a network security group. + - `mac_address` - (String) The mac address of a network interface included if the type is `network-interface`. + - `target` - (String) If `ipv4-address` type, then IPv4 address or if `network-interface` type, then network interface id. + - `type` - (String) The type of member. Supported values are: `ipv4-address`, `network-interface`. + +- `name` - (String) The name of the network security group. + +- `rules` - (List) The list of rules in the network security group. + + Nested schema for `rules`: + - `action` - (String) The action to take if the rule matches network traffic. Supported values are: `allow`, `deny`. + - `destination_port` - (List) The list of destination port. + + Nested schema for `destination_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + - `id` - (String) The id of the rule in a network security group. + - `protocol` - (List) The list of protocol. + + Nested schema for `protocol`: + - `icmp_type` - (String) If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched. + - `tcp_flags` - (List) If tcp type, the list of TCP flags and if not present then all flags are matched. Supported values are: `syn`, `ack`, `fin`, `rst`, `urg`, `psh`, `wnd`, `chk`, `seq`. + Nested schema for `icmp_types`: + - `flags` - (String) TCP flag. + - `type` - (String) The protocol of the network traffic. Supported values are: `icmp`, `tcp`, `udp`, `all`. + - `remote` - (List) List of remote. + + Nested schema for `remote`: + - `id` - (String) The id of the remote network Address group or network security group the rules apply to. Not required for default-network-address-group. + - `type` - (String) The type of remote group the rules apply to. Supported values are: `network-security-group`, `network-address-group`, `default-network-address-group`. + - `source_port` - (List) List of source port. + + Nested schema for `source_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. +- `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_network_security_groups.html.markdown b/website/docs/d/pi_network_security_groups.html.markdown new file mode 100644 index 0000000000..27f720fbad --- /dev/null +++ b/website/docs/d/pi_network_security_groups.html.markdown @@ -0,0 +1,87 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_security_groups" +description: |- + Get information about pi_network_security_groups +subcategory: "Power Systems" +--- + +# ibm_pi_network_security_groups + +Retrieves information about network security groups. + +## Example Usage + +```terraform + data "ibm_pi_network_security_groups" "network_security_groups" { + pi_cloud_instance_id = "" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument reference + +Review the argument references that you can specify for your data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. + +## Attribute Reference + +After your data source is created, you can read values from the following attributes. + +- `network_security_groups` - (List) list of network security Groups. + + Nested schema for `network_security_groups`: + - `crn` - (String) The network security group's crn. + - `id` - (String) The id of the network security group. + - `members` - (List) The list of IPv4 addresses and\or network Interfaces in the network security group. + + Nested schema for `members`: + - `id` - (String) The id of the member in a network security group. + - `mac_address` - (String) The mac address of a network Interface included if the type is `network-interface`. + - `target` - (String) If `ipv4-address` type, then IPv4 address or if `network-interface` type, then network interface id. + - `type` - (String) The type of member. Supported values are: `ipv4-address`, `network-interface`. + - `name` - (String) The name of the network security group. + - `rules` - (List) The list of rules in the network security group. + + Nested schema for `rules`: + - `action` - (String) The action to take if the rule matches network traffic. Supported values are: `allow`, `deny`. + - `destination_port` - (List) List of destination port. + + Nested schema for `destination_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + + - `id` - (String) The id of the rule in a network security group. + - `protocol` - (List) List of protocol. + + Nested schema for `protocol`: + - `icmp_type` - (String) If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched. + - `tcp_flags` - (String) If tcp type, the list of TCP flags and if not present then all flags are matched. Supported values are: `syn`, `ack`, `fin`, `rst`, `urg`, `psh`, `wnd`, `chk`, `seq`. + - `type` - (String) The protocol of the network traffic. Supported values are: `icmp`, `tcp`, `udp`, `all`. + - `remote` - (List) List of remote. + + Nested schema for `remote`: + - `id` - (String) The id of the remote network Address group or network security group the rules apply to. Not required for default-network-address-group. + - `type` - (String) The type of remote group the rules apply to. Supported values are: `network-security-group`, `network-address-group`, `default-network-address-group`. + - `source_port` - (List) List of source port. + + Nested schema for `source_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + - `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_networks.html.markdown b/website/docs/d/pi_networks.html.markdown index 4461cacd9a..2b030d6165 100644 --- a/website/docs/d/pi_networks.html.markdown +++ b/website/docs/d/pi_networks.html.markdown @@ -42,10 +42,12 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `networks`: - `access_config` - (String) The network communication configuration option of the network (for satellite locations only). + - `crn` - (String) The CRN of this resource. - `dhcp_managed` - (Boolean) Indicates if the network DHCP Managed. - `href` - (String) The hyper link of a network. - `mtu` - (Boolean) Maximum Transmission Unit option of the network. - `name` - (String) The name of a network. - `network_id` - (String) The ID of the network. - `type` - (String) The type of network. + - `user_tags` - (List) List of user tags attached to the resource. - `vlan_id` - (String) The VLAN ID that the network is connected to. diff --git a/website/docs/d/pi_public_network.html.markdown b/website/docs/d/pi_public_network.html.markdown index 097d94ce6a..f1ecb789ac 100644 --- a/website/docs/d/pi_public_network.html.markdown +++ b/website/docs/d/pi_public_network.html.markdown @@ -39,6 +39,7 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. +- `crn` - (String) The CRN of this resource. - `id` - (String) The ID of the network. - `name` - (String) The name of the network. - `type` - (String) The type of VLAN that the network is connected to. diff --git a/website/docs/d/pi_pvm_snapshots.html.markdown b/website/docs/d/pi_pvm_snapshots.html.markdown index 3a5fa6571a..b8b42531ac 100644 --- a/website/docs/d/pi_pvm_snapshots.html.markdown +++ b/website/docs/d/pi_pvm_snapshots.html.markdown @@ -45,10 +45,12 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `pvm_snapshots`: - `action` - (String) Action performed on the instance snapshot. - `creation_date` - (String) Date of snapshot creation. + - `crn` - (String) The CRN of this resource. - `description` - (String) The description of the snapshot. - `id` - (String) The unique identifier of the Power Virtual Machine instance snapshot. - `last_updated_date` - (String) Date of last update. - `name` - (String) The name of the Power Virtual Machine instance snapshot. - `percent_complete` - (Integer) The snapshot completion percentage. - `status` - (String) The status of the Power Virtual Machine instance snapshot. + - `user_tags` - (List) List of user tags attached to the resource. - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. diff --git a/website/docs/d/pi_sap_profile.html.markdown b/website/docs/d/pi_sap_profile.html.markdown index 32717c17f7..d94339198f 100644 --- a/website/docs/d/pi_sap_profile.html.markdown +++ b/website/docs/d/pi_sap_profile.html.markdown @@ -7,9 +7,11 @@ description: |- --- # ibm_pi_sap_profile + Retrieve information about a SAP profile. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage + ```terraform data "ibm_pi_sap_profile" "example" { pi_cloud_instance_id = "" @@ -17,30 +19,39 @@ data "ibm_pi_sap_profile" "example" { } ``` -**Notes** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` -Example usage: - ```terraform - provider "ibm" { - region = "lon" - zone = "lon04" - } - ``` + Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` ## Argument reference + Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_sap_profile_id` - (Required, String) SAP Profile ID. ## Attribute reference + In addition to all argument reference list, you can access the following attribute references after your data source is created. - `certified` - (Boolean) Has certification been performed on profile. - `cores` - (Integer) Amount of cores. +- `full_system_profile` - (Boolean) Requires full system for deployment. - `memory` - (Integer) Amount of memory (in GB). +- `saps` - (Integer) SAP application performance standard. +- `supported_systems` - (List) List of supported systems. - `type` - (String) Type of profile. +- `workload_type` - (List) List of workload types. + \ No newline at end of file diff --git a/website/docs/d/pi_sap_profiles.html.markdown b/website/docs/d/pi_sap_profiles.html.markdown index 630242ee68..9c1c124f97 100644 --- a/website/docs/d/pi_sap_profiles.html.markdown +++ b/website/docs/d/pi_sap_profiles.html.markdown @@ -7,35 +7,41 @@ description: |- --- # ibm_pi_sap_profiles + Retrieve information about all SAP profiles. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage + ```terraform data "ibm_pi_sap_profiles" "example" { pi_cloud_instance_id = "" } ``` -**Notes** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` -Example usage: - ```terraform - provider "ibm" { - region = "lon" - zone = "lon04" - } - ``` + Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` ## Argument reference + Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. ## Attribute reference + In addition to all argument reference list, you can access the following attribute references after your data source is created. - `profiles` - (List) List of all the SAP Profiles. @@ -43,6 +49,10 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `profiles`: - `certified` - (Boolean) Has certification been performed on profile. - `cores` - (Integer) Amount of cores. + - `full_system_profile` - (Boolean) Requires full system for deployment. - `memory` - (Integer) Amount of memory (in GB). - `profile_id` - (String) SAP Profile ID. + - `saps` - (Integer) SAP application performance standard. + - `supported_systems` - (List) List of supported systems. - `type` - (String) Type of profile. + - `workload_type` - (List) List of workload types. diff --git a/website/docs/d/pi_shared_processor_pool.html.markdown b/website/docs/d/pi_shared_processor_pool.html.markdown index 7918b46199..f40e6ef8c3 100644 --- a/website/docs/d/pi_shared_processor_pool.html.markdown +++ b/website/docs/d/pi_shared_processor_pool.html.markdown @@ -42,6 +42,7 @@ In addition to all argument reference list, you can access the following attribu - `allocated_cores` - (Float) The allocated cores in the shared processor pool. - `available_cores` - (Integer) The available cores in the shared processor pool. +- `crn` - (String) The CRN of this resource. - `host_id` - (Integer) The host ID where the shared processor pool resides. - `id` - (String) The shared processor pool's unique ID. - `instances` - (List) List of server instances deployed in the shared processor pool. @@ -59,3 +60,4 @@ In addition to all argument reference list, you can access the following attribu - `reserved_cores` - (Integer) The amount of reserved cores for the shared processor pool. - `status` - (String) The status of the shared processor pool. - `status_detail` - (String) The status details of the shared processor pool. +- `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_shared_processor_pools.html.markdown b/website/docs/d/pi_shared_processor_pools.html.markdown index 3586e0437c..5402ae6b4e 100644 --- a/website/docs/d/pi_shared_processor_pools.html.markdown +++ b/website/docs/d/pi_shared_processor_pools.html.markdown @@ -43,9 +43,11 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `shared_processor_pools`: - `allocated_cores` - (Float) The allocated cores in the shared processor pool. - `available_cores` - (Integer) The available cores in the shared processor pool. + - `crn` - (String) The CRN of this resource. - `host_id` - (Integer) The host ID where the shared processor pool resides. - `name` - (String) The name of the shared processor pool. - `reserved_cores` - (Integer) The amount of reserved cores for the shared processor pool. - `shared_processor_pool_id` - (String) The shared processor pool's unique ID. - `status` - (String) The status of the shared processor pool. - `status_detail` - (String) The status details of the shared processor pool. + - `user_tags` - (List) List of user tags attached to the resource. diff --git a/website/docs/d/pi_storage_tiers.html.markdown b/website/docs/d/pi_storage_tiers.html.markdown new file mode 100644 index 0000000000..675a0f8af2 --- /dev/null +++ b/website/docs/d/pi_storage_tiers.html.markdown @@ -0,0 +1,53 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_storage_tiers" +description: |- + Get information about a storage tiers for a pi cloud instance. +subcategory: "Power Systems" +--- + +# ibm_pi_storage_tiers + +Retrieve information about supported storage tiers for a pi cloud instance. For more information, see [storage tiers docs](https://cloud.ibm.com/apidocs/power-cloud#pcloud-cloudinstances-storagetiers-getall). + +## Example Usage + +```terraform + data "ibm_pi_storage_tiers" "pi_storage_tiers" { + pi_cloud_instance_id = "" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +Review the argument references that you can specify for your data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `id` - (String) The unique identifier of the storage tiers. +- `region_storage_tiers` - (List) An array of storage tiers supported in a region. + + Nested schema for `region_storage_tiers`: + - `description` - (String) Description of the storage tier label. + - `name` - (String) Name of the storage tier. + - `state` - (String) State of the storage tier, `active` or `inactive`. diff --git a/website/docs/d/pi_volume.html.markdown b/website/docs/d/pi_volume.html.markdown index 2eac4979a9..e830e322c0 100644 --- a/website/docs/d/pi_volume.html.markdown +++ b/website/docs/d/pi_volume.html.markdown @@ -48,22 +48,29 @@ Review the argument references that you can specify for your data source. In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `auxiliary` - (Boolean) Indicates if the volume is auxiliary or not. +- `auxiliary` - (Boolean) Indicates if the volume is auxiliary. - `auxiliary_volume_name` - (String) The auxiliary volume name. - `bootable` - (Boolean) Indicates if the volume is boot capable. - `consistency_group_name` - (String) Consistency group name if volume is a part of volume group. +- `creation_date` - (String) Date of volume creation. +- `crn` - (String) The CRN of this resource. - `disk_type` - (String) The disk type that is used for the volume. +- `freeze_time` - (String) Time of remote copy relationship. - `group_id` - (String) The volume group id in which the volume belongs. - `id` - (String) The unique identifier of the volume. - `io_throttle_rate` - (String) Amount of iops assigned to the volume. +- `last_update_date` - (String) The date when the volume last updated. - `master_volume_name` - (String) The master volume name. - `mirroring_state` - (String) Mirroring state for replication enabled volume. - `primary_role` - (String) Indicates whether `master`/`auxiliary` volume is playing the primary role. - `replication_enabled` - (Boolean) Indicates if the volume should be replication enabled or not. -- `replication_status` - (String) The replication status of the volume. +- `replication_sites` - (List) List of replication sites for volume replication. +- `replication_status` - (String) The replication status of the volume. - `replication_type` - (String) The replication type of the volume, `metro` or `global`. - `shareable` - (String) Indicates if the volume is shareable between VMs. - `size` - (Integer) The size of the volume in GB. - `state` - (String) The state of the volume. +- `user_tags` - (List) List of user tags attached to the resource. - `volume_pool` - (String) Volume pool, name of storage pool where the volume is located. - `wwn` - (String) The world wide name of the volume. + diff --git a/website/docs/d/pi_volume_group_details.html.markdown b/website/docs/d/pi_volume_group_details.html.markdown index 1f7a6619a0..0d1aa1f632 100644 --- a/website/docs/d/pi_volume_group_details.html.markdown +++ b/website/docs/d/pi_volume_group_details.html.markdown @@ -42,9 +42,11 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. +- `auxiliary` - (Boolean) Indicates if the volume group is auxiliary. - `consistency_group_name` - (String) The name of consistency group at storage controller level. - `id` - (String) The unique identifier of the volume group. - `replication_status` - (String) The replication status of volume group. +- `replication_sites` - (List) Indicates the replication sites of the volume group. - `status` - (String) The status of the volume group. - `status_description_errors` - (List) The status details of the volume group. @@ -52,5 +54,6 @@ In addition to all argument reference list, you can access the following attribu - `key` - (String) The volume group error key. - `message` - (String) The failure message providing more details about the error key. - `volume_ids` - (List) List of volume IDs, which failed to be added/removed to/from the volume group, with the given error. +- `storage_pool` - (String) Storage pool of the volume group. - `volume_group_name` - (String) The name of the volume group. - `volume_ids` - (List) List of volume IDs, member of volume group. diff --git a/website/docs/d/pi_volume_groups.html.markdown b/website/docs/d/pi_volume_groups.html.markdown index 4b660bfbd8..00e55e61ed 100644 --- a/website/docs/d/pi_volume_groups.html.markdown +++ b/website/docs/d/pi_volume_groups.html.markdown @@ -43,9 +43,11 @@ In addition to all argument reference list, you can access the following attribu - `volume_groups`: List of all volume groups. Nested scheme for `volume_groups`: + - `auxiliary` - (Boolean) Indicates if the volume group is auxiliary. - `consistency_group_name` - (String) The name of consistency group at storage controller level. - `id` - (String) The unique identifier of the volume group. - `replication_status` - (String) The replication status of volume group. + - `replication_sites` - (List) Indicates the replication sites of the volume group. - `status` - (String) The status of the volume group. - `status_description_errors` - (List) The status details of the volume group. @@ -53,4 +55,5 @@ In addition to all argument reference list, you can access the following attribu - `key` - (String) The volume group error key. - `message` - (String) The failure message providing more details about the error key. - `volume_ids` - (List) List of volume IDs, which failed to be added/removed to/from the volume group, with the given error. + - `storage_pool` - (String) Storage pool of the volume group. - `volume_group_name` - (String) The name of the volume group. \ No newline at end of file diff --git a/website/docs/d/pi_volume_groups_details.html.markdown b/website/docs/d/pi_volume_groups_details.html.markdown index 69f6f90b63..d690cdd2bb 100644 --- a/website/docs/d/pi_volume_groups_details.html.markdown +++ b/website/docs/d/pi_volume_groups_details.html.markdown @@ -43,9 +43,11 @@ In addition to all argument reference list, you can access the following attribu - `volume_groups` - (List) List of all volume group. Nested scheme for `volume_groups`: + - `auxiliary` - (Boolean) Indicates if the volume group is auxiliary. - `consistency_group_name` - (String) The name of consistency group at storage controller level. - `id` - (String) The unique identifier of the volume group. - `replication_status` - (String) The replication status of volume group. + - `replication_sites` - (List) Indicates the replication sites of the volume group. - `status` - (String) The status of the volume group. - `status_description_errors` - (List) The status details of the volume group. @@ -53,5 +55,6 @@ In addition to all argument reference list, you can access the following attribu - `key` - (String) The volume group error key. - `message` - (String) The failure message providing more details about the error key. - `volume_ids` - (List) List of volume IDs, which failed to be added/removed to/from the volume group, with the given error. + - `storage_pool` - (String) Storage pool of the volume group. - `volume_group_name` - (String) The name of the volume group. - - `volume_ids` - (List) List of volume IDs, member of volume group. + - `volume_ids` - (List) List of volume IDs, member of volume group. diff --git a/website/docs/d/pi_volume_snapshot.html.markdown b/website/docs/d/pi_volume_snapshot.html.markdown new file mode 100644 index 0000000000..32730a7d12 --- /dev/null +++ b/website/docs/d/pi_volume_snapshot.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_volume_snapshot" +description: |- + Get information about a volume snapshot in Power Virtual Server. +subcategory: "Power Systems" +--- + +# ibm_pi_volume_snapshot + +Retrieve information about a volume snapshot. + +## Example Usage + +```terraform +data "ibm_pi_volume_snapshot" "snapshot" { + pi_cloud_instance_id = "" + pi_snapshot_id = "snapshot_id" +} +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +You can specify the following arguments for this data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_snapshot_id` - (Required, String) The volume snapshot id. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `creation_date` - (String) The date and time when the volume snapshot was created. +- `crn` - (String) The CRN for this resource. +- `id` - (String) The unique identifier of the volume snapshot. +- `name` - (String) The volume snapshot name. +- `size` - (Float) The size of the volume snapshot, in gibibytes (GiB). +- `status` - (String) The status for the volume snapshot. +- `updated_date` - (String) The date and time when the volume snapshot was last updated. +- `volume_id` - (String) The volume UUID associated with the snapshot. diff --git a/website/docs/d/pi_volume_snapshots.html.markdown b/website/docs/d/pi_volume_snapshots.html.markdown new file mode 100644 index 0000000000..2f24a14f7d --- /dev/null +++ b/website/docs/d/pi_volume_snapshots.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_volume_snapshots" +description: |- + Get information about all your volume snapshots in Power Virtual Server. +subcategory: "Power Systems" +--- + +# ibm_pi_volume_snapshots + +Retrieve information about your volume snapshots. + +## Example Usage + +```terraform +data "ibm_pi_volume_snapshots" "snapshots" { + pi_cloud_instance_id = "" +} +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +You can specify the following arguments for this data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute references after your data source is created. + +- `volume_snapshots` - (List) The list of volume snapshots. + + Nested schema for `volume_snapshots`: + - `creation_date` - (String) The date and time when the volume snapshot was created. + - `crn` - (String) The CRN of the volume snapshot. + - `id` - (String) The volume snapshot UUID. + - `name` - (String) The volume snapshot name. + - `size` - (Float) The size of the volume snapshot, in gibibytes (GiB). + - `status` - (String) The status for the volume snapshot. + - `updated_date` - (String) The date and time when the volume snapshot was last updated. + - `volume_id` - (String) The volume UUID associated with the snapshot. diff --git a/website/docs/d/pi_workspace.html.markdown b/website/docs/d/pi_workspace.html.markdown index ad02cb7d2a..4d7ce72dad 100644 --- a/website/docs/d/pi_workspace.html.markdown +++ b/website/docs/d/pi_workspace.html.markdown @@ -7,22 +7,26 @@ description: |- --- # ibm_pi_workspace + Retrieve information about your Power Systems account workspace. ## Example usage + ```terraform data "ibm_pi_workspace" "workspace" { pi_cloud_instance_id = "99fba9c9-66f9-99bc-9999-aca999ee9d9b" } ``` -**Notes** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: + ```terraform provider "ibm" { region = "lon" @@ -31,24 +35,30 @@ Example usage: ``` ## Argument reference + Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) Cloud Instance ID of a PCloud Instance under your account. ## Attribute reference + In addition to all argument reference listed, you can access the following attribute references after your data source is created. - `id` - (String) Workspace ID. - `pi_workspace_capabilities` - (Map) Workspace Capabilities. Capabilities are `true` or `false`. Some of `pi_workspace_capabilities` are: - - `cloud-connections`, `power-edge-router`, `power-vpn-connections`, `transit-gateway-connection` + - `cloud-connections`, `power-edge-router`, `power-vpn-connections`, `transit-gateway-connection` - `pi_workspace_details` - (List) Workspace information. Nested schema for `pi_workspace_details`: - `creation_date` - (String) Date of workspace creation. - `crn` - (String) Workspace crn. + - `network_security_groups` - (List) Network security groups configuration. + + Nested schema for `network_security_groups`: + - `state` - (String) The state of a network security groups configuration. - `power_edge_router` - (List) Power Edge Router information. Nested schema for `power_edge_router`: diff --git a/website/docs/d/pi_workspaces.html.markdown b/website/docs/d/pi_workspaces.html.markdown index 268a6bc2dd..af344ceb37 100644 --- a/website/docs/d/pi_workspaces.html.markdown +++ b/website/docs/d/pi_workspaces.html.markdown @@ -7,22 +7,26 @@ description: |- --- # ibm_pi_workspaces + Retrieve information about Power Systems workspaces. ## Example usage + ```terraform data "ibm_pi_workspaces" "workspaces" { pi_cloud_instance_id = "99fba9c9-66f9-99bc-9999-aca999ee9d9b" } ``` -**Notes** +### Notes + - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: + ```terraform provider "ibm" { region = "lon" @@ -31,11 +35,13 @@ Example usage: ``` ## Argument reference + Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. ## Attribute reference + In addition to all argument reference listed, you can access the following attribute references after your data source is created. - `workspaces` - (List) List of all Workspaces. @@ -50,19 +56,23 @@ In addition to all argument reference listed, you can access the following attri Nested schema for `pi_workspace_details`: - `creation_date` - (String) Date of workspace creation. - `crn` - (String) Workspace crn. - - `power_edge_router` - (List) Power Edge Router information. + - `network_security_groups` - (List) Network security groups configuration. + + Nested schema for `network_security_groups`: + - `state` - (String) The state of a network security groups configuration. + - `power_edge_router` - (List) Power Edge Router information. - Nested schema for `power_edge_router`: - - `migration_status` - (String) The migration status of a Power Edge Router. - - `status` - (String) The state of a Power Edge Router. - - `type` - (String) The Power Edge Router type. + Nested schema for `power_edge_router`: + - `migration_status` - (String) The migration status of a Power Edge Router. + - `status` - (String) The state of a Power Edge Router. + - `type` - (String) The Power Edge Router type. - `pi_workspace_id` - (String) Workspace ID. - `pi_workspace_location` - (Map) Workspace location. - Nested schema for `Workspace location`: - - `region` - (String) Workspace location region zone. - - `type` - (String) Workspace location region type. - - `url`- (String) Workspace location region url. + Nested schema for `Workspace location`: + - `region` - (String) Workspace location region zone. + - `type` - (String) Workspace location region type. + - `url`- (String) Workspace location region url. - `pi_workspace_name` - (String) Workspace name. - `pi_workspace_status` - (String) Workspace status, `active`, `critical`, `failed`, `provisioning`. - `pi_workspace_type` - (String) Workspace type, `off-premises` or `on-premises`. diff --git a/website/docs/r/cd_tekton_pipeline.html.markdown b/website/docs/r/cd_tekton_pipeline.html.markdown index d590c73d07..06df3abfe3 100644 --- a/website/docs/r/cd_tekton_pipeline.html.markdown +++ b/website/docs/r/cd_tekton_pipeline.html.markdown @@ -8,7 +8,7 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline -Provides a resource for cd_tekton_pipeline. This allows cd_tekton_pipeline to be created, updated and deleted. +Create, update, and delete cd_tekton_pipelines with this resource. ## Example Usage @@ -23,24 +23,26 @@ resource "ibm_cd_tekton_pipeline" "cd_tekton_pipeline_instance" { ## Argument Reference -Review the argument reference that you can specify for your resource. +You can specify the following arguments for this resource. * `pipeline_id` - (Required, String) ID of the pipeline tool in your toolchain. Can be referenced from your `ibm_cd_toolchain_tool_pipeline` resource, e.g. `pipeline_id = ibm_cd_toolchain_tool_pipeline.my_pipeline.tool_id` * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. -* `enable_notifications` - (Optional, Boolean) Flag to enable notifications for this pipeline. If enabled, the Tekton pipeline run events will be published to all the destinations specified by the Slack and Event Notifications integrations in the parent toolchain. - * Constraints: The default value is `false`. -* `enable_partial_cloning` - (Optional, Boolean) Flag to enable partial cloning for this pipeline. When partial clone is enabled, only the files contained within the paths specified in definition repositories are read and cloned, this means that symbolic links might not work. - * Constraints: The default value is `false`. -* `next_build_number` - (Optional, Integer) Specify the build number that will be used for the next pipeline run. Build numbers can be any positive whole number between 0 and 100000000000000. +* `enable_notifications` - (Optional, Boolean) Flag to enable notifications for this pipeline. If enabled, the Tekton pipeline run events will be published to all the destinations specified by the Slack and Event Notifications integrations in the parent toolchain. If omitted, this feature is disabled by default. +* `enable_partial_cloning` - (Optional, Boolean) Flag to enable partial cloning for this pipeline. When partial clone is enabled, only the files contained within the paths specified in definition repositories are read and cloned, this means that symbolic links might not work. If omitted, this feature is disabled by default. +* `next_build_number` - (Optional, Integer) The build number that will be used for the next pipeline run. * Constraints: The maximum value is `99999999999999`. The minimum value is `1`. -* `worker` - (Optional, List) Specify the worker that is to be used to run the trigger, indicated by a worker object with only the worker ID. If not specified or set as `worker: { id: 'public' }`, the IBM Managed shared workers are used. +* `worker` - (Optional, List) Details of the worker used to run the pipeline. Nested schema for **worker**: * `id` - (Required, String) ID of the worker. - * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z]{1,253}$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z]{1,36}$/`. + * `name` - (Computed, String) Name of the worker. Computed based on the worker ID. + * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_. \\(\\)\\[\\]]{1,253}$/`. + * `type` - (Computed, String) Type of the worker. Computed based on the worker ID. + * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your resource is created. +After your resource is created, you can read values from the listed arguments and the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline. * `build_number` - (Integer) The latest pipeline run build number. If this property is absent, the pipeline hasn't had any pipeline runs. @@ -105,7 +107,7 @@ Nested schema for **resource_group**: Nested schema for **toolchain**: * `crn` - (String) The CRN for the toolchain that contains the Tekton pipeline. * Constraints: The maximum length is `512` characters. The minimum length is `9` characters. The value must match regular expression `/^crn:v[0-9](:([A-Za-z0-9-._~!$&'()*+,;=@\/]|%[0-9A-Z]{2})*){8}$/`. - * `id` - (String) UUID. + * `id` - (String) Universally Unique Identifier. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. * `triggers` - (List) Tekton pipeline triggers list. * Constraints: The maximum length is `1024` items. The minimum length is `0` items. @@ -199,9 +201,9 @@ Nested schema for **triggers**: ## Import -You can import the `ibm_cd_tekton_pipeline` resource by using `id`. UUID. +You can import the `ibm_cd_tekton_pipeline` resource by using `id`. Universally Unique Identifier. # Syntax -``` -$ terraform import ibm_cd_tekton_pipeline.cd_tekton_pipeline -``` +
+$ terraform import ibm_cd_tekton_pipeline.cd_tekton_pipeline <id>
+
diff --git a/website/docs/r/cd_tekton_pipeline_definition.html.markdown b/website/docs/r/cd_tekton_pipeline_definition.html.markdown index 12d0be686d..8d3d9bc0d4 100644 --- a/website/docs/r/cd_tekton_pipeline_definition.html.markdown +++ b/website/docs/r/cd_tekton_pipeline_definition.html.markdown @@ -8,7 +8,7 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_definition -Provides a resource for cd_tekton_pipeline_definition. This allows cd_tekton_pipeline_definition to be created, updated and deleted. +Create, update, and delete cd_tekton_pipeline_definitions with this resource. ## Example Usage @@ -32,7 +32,7 @@ resource "ibm_cd_tekton_pipeline_definition" "cd_tekton_pipeline_definition_inst ## Argument Reference -Review the argument reference that you can specify for your resource. +You can specify the following arguments for this resource. * `pipeline_id` - (Required, Forces new resource, String) The Tekton pipeline ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -57,7 +57,7 @@ Nested schema for **source**: ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your resource is created. +After your resource is created, you can read values from the listed arguments and the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_definition. * `definition_id` - (String) The aggregated definition ID. @@ -75,9 +75,9 @@ The `id` property can be formed from `pipeline_id`, and `definition_id` in the f <pipeline_id>/<definition_id> * `pipeline_id`: A string in the format `94619026-912b-4d92-8f51-6c74f0692d90`. The Tekton pipeline ID. -* `definition_id`: A string in the format `94299034-d45f-4e9a-8ed5-6bd5c7bb7ada`. The definition ID. +* `definition_id`: A string. The aggregated definition ID. # Syntax -``` -$ terraform import ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition / -``` +
+$ terraform import ibm_cd_tekton_pipeline_definition.cd_tekton_pipeline_definition <pipeline_id>/<definition_id>
+
diff --git a/website/docs/r/cd_tekton_pipeline_property.html.markdown b/website/docs/r/cd_tekton_pipeline_property.html.markdown index fbc85ec9d0..41372a07fc 100644 --- a/website/docs/r/cd_tekton_pipeline_property.html.markdown +++ b/website/docs/r/cd_tekton_pipeline_property.html.markdown @@ -8,7 +8,7 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_property -Provides a resource for cd_tekton_pipeline_property. This allows cd_tekton_pipeline_property to be created, updated and deleted. +Create, update, and delete cd_tekton_pipeline_propertys with this resource. ## Example Usage @@ -23,15 +23,14 @@ resource "ibm_cd_tekton_pipeline_property" "cd_tekton_pipeline_property_instance ## Argument Reference -Review the argument reference that you can specify for your resource. +You can specify the following arguments for this resource. * `enum` - (Optional, List) Options for `single_select` property type. Only needed when using `single_select` property type. * Constraints: The list items must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. The maximum length is `256` items. The minimum length is `0` items. * `locked` - (Optional, Boolean) When true, this property cannot be overridden by a trigger property or at runtime. Attempting to override it will result in run requests being rejected. The default is false. - * Constraints: The default value is `false`. * `name` - (Required, Forces new resource, String) Property name. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. -* `path` - (Optional, String) A dot notation path for `integration` type properties only, to select a value from the tool integration. If left blank the full tool integration data will be used. +* `path` - (Optional, String) A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used. * Constraints: The maximum length is `4096` characters. The minimum length is `0` characters. The value must match regular expression `/^[-0-9a-zA-Z_.]*$/`. * `pipeline_id` - (Required, Forces new resource, String) The Tekton pipeline ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -42,7 +41,7 @@ Review the argument reference that you can specify for your resource. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your resource is created. +After your resource is created, you can read values from the listed arguments and the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_property. * `href` - (String) API URL for interacting with the property. @@ -52,15 +51,15 @@ In addition to all argument references listed, you can access the following attr ## Import You can import the `ibm_cd_tekton_pipeline_property` resource by using `name`. -The `name` property can be formed from `pipeline_id`, and `property_name` in the following format: +The `name` property can be formed from `pipeline_id`, and `name` in the following format:
-<pipeline_id>/<property_name>
+<pipeline_id>/<name>
 
* `pipeline_id`: A string in the format `94619026-912b-4d92-8f51-6c74f0692d90`. The Tekton pipeline ID. -* `property_name`: A string in the format `debug-pipeline`. The property name. +* `name`: A string in the format `prop1`. Property name. # Syntax -``` -$ terraform import ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property / -``` +
+$ terraform import ibm_cd_tekton_pipeline_property.cd_tekton_pipeline_property <pipeline_id>/<name>
+
diff --git a/website/docs/r/cd_tekton_pipeline_trigger.html.markdown b/website/docs/r/cd_tekton_pipeline_trigger.html.markdown index 433e285537..1a907a2eff 100644 --- a/website/docs/r/cd_tekton_pipeline_trigger.html.markdown +++ b/website/docs/r/cd_tekton_pipeline_trigger.html.markdown @@ -8,7 +8,7 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_trigger -Provides a resource for cd_tekton_pipeline_trigger. This allows cd_tekton_pipeline_trigger to be created, updated and deleted. +Create, update, and delete cd_tekton_pipeline_triggers with this resource. ## Example Usage @@ -27,13 +27,13 @@ resource "ibm_cd_tekton_pipeline_trigger" "cd_tekton_pipeline_trigger_instance" ## Argument Reference -Review the argument reference that you can specify for your resource. +You can specify the following arguments for this resource. * `cron` - (Optional, String) Only needed for timer triggers. CRON expression that indicates when this trigger will activate. Maximum frequency is every 5 minutes. The string is based on UNIX crontab syntax: minute, hour, day of month, month, day of week. Example: The CRON expression 0 *_/2 * * * - translates to - every 2 hours. * Constraints: The maximum length is `253` characters. The minimum length is `5` characters. The value must match regular expression `/^[-0-9a-zA-Z,\\*\/ ]{5,253}$/`. -* `enable_events_from_forks` - (Optional, Boolean) Only used for SCM triggers. When enabled, pull request events from forks of the selected repository will trigger a pipeline run. +* `enable_events_from_forks` - (Optional, Boolean) When enabled, pull request events from forks of the selected repository will trigger a pipeline run. * Constraints: The default value is `false`. -* `enabled` - (Optional, Boolean) Flag to check if the trigger is enabled. If omitted the trigger is enabled by default. +* `enabled` - (Optional, Boolean) Flag to check if the trigger is enabled. * Constraints: The default value is `true`. * `event_listener` - (Required, String) Event listener name. The name of the event listener to which the trigger is associated. The event listeners are defined in the definition repositories of the Tekton pipeline. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. @@ -64,28 +64,39 @@ Nested schema for **secret**: Nested schema for **source**: * `properties` - (Required, List) Properties of the source, which define the URL of the repository and a branch or pattern. Nested schema for **properties**: + * `blind_connection` - (Computed, Boolean) True if the repository server is not addressable on the public internet. IBM Cloud will not be able to validate the connection details you provide. * `branch` - (Optional, String) Name of a branch from the repo. Only one of branch, pattern, or filter should be specified. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. + * `hook_id` - (Computed, String) Repository webhook ID. It is generated upon trigger creation. + * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. * `pattern` - (Optional, String) The pattern of Git branch or tag. You can specify a glob pattern such as '!test' or '*master' to match against multiple tags or branches in the repository.The glob pattern used must conform to Bash 4.3 specifications, see bash documentation for more info: https://www.gnu.org/software/bash/manual/bash.html#Pattern-Matching. Only one of branch, pattern, or filter should be specified. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.:@=$&^\/\\?\\!\\*\\+\\[\\]\\(\\)\\{\\}\\|\\\\]*$/`. + * `tool` - (Required, List) Reference to the repository tool in the parent toolchain. + Nested schema for **tool**: + * `id` - (Computed, String) ID of the repository tool instance in the parent toolchain. + * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. * `url` - (Required, Forces new resource, String) URL of the repository to which the trigger is listening. * Constraints: The maximum length is `2048` characters. The minimum length is `10` characters. The value must match regular expression `/^http(s)?:\/\/([^\/?#]*)([^?#]*)(\\?([^#]*))?(#(.*))?$/`. * `type` - (Required, String) The only supported source type is "git", indicating that the source is a git repository. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^git$/`. -* `tags` - (Optional, List) Trigger tags array. +* `tags` - (Optional, List) Optional trigger tags array. * Constraints: The list items must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. The maximum length is `128` items. The minimum length is `0` items. * `timezone` - (Optional, String) Only used for timer triggers. Specify the timezone used for this timer trigger, which will ensure the CRON activates this trigger relative to the specified timezone. If no timezone is specified, the default timezone used is UTC. Valid timezones are those listed in the IANA timezone database, https://www.iana.org/time-zones. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z+_., \/]{1,253}$/`. * `type` - (Required, String) Trigger type. - * Constraints: Allowable values are: `manual`, `scm`, `timer`, `generic`. -* `worker` - (Optional, List) Specify the worker used to run the trigger. Use `worker: { id: 'public' }` to use the IBM Managed workers. The default is to inherit the worker set in the pipeline settings, which can also be explicitly set using `worker: { id: 'inherit' }`. + * Constraints: Allowable values are: . +* `worker` - (Optional, List) Details of the worker used to run the trigger. Nested schema for **worker**: * `id` - (Required, String) ID of the worker. - * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z]{1,253}$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z]{1,36}$/`. + * `name` - (Computed, String) Name of the worker. Computed based on the worker ID. + * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_. \\(\\)\\[\\]]{1,253}$/`. + * `type` - (Computed, String) Type of the worker. Computed based on the worker ID. + * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your resource is created. +After your resource is created, you can read values from the listed arguments and the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_trigger. * `href` - (String) API URL for interacting with the trigger. Only included when fetching the list of pipeline triggers. @@ -121,9 +132,9 @@ The `id` property can be formed from `pipeline_id`, and `trigger_id` in the foll <pipeline_id>/<trigger_id> * `pipeline_id`: A string in the format `94619026-912b-4d92-8f51-6c74f0692d90`. The Tekton pipeline ID. -* `trigger_id`: A string in the format `1bb892a1-2e04-4768-a369-b1159eace147`. The trigger ID. +* `trigger_id`: A string. The Trigger ID. # Syntax -``` -$ terraform import ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger / -``` +
+$ terraform import ibm_cd_tekton_pipeline_trigger.cd_tekton_pipeline_trigger <pipeline_id>/<trigger_id>
+
diff --git a/website/docs/r/cd_tekton_pipeline_trigger_property.html.markdown b/website/docs/r/cd_tekton_pipeline_trigger_property.html.markdown index 0947a3d79b..4d99239b73 100644 --- a/website/docs/r/cd_tekton_pipeline_trigger_property.html.markdown +++ b/website/docs/r/cd_tekton_pipeline_trigger_property.html.markdown @@ -8,7 +8,7 @@ subcategory: "Continuous Delivery" # ibm_cd_tekton_pipeline_trigger_property -Provides a resource for cd_tekton_pipeline_trigger_property. This allows cd_tekton_pipeline_trigger_property to be created, updated and deleted. +Create, update, and delete cd_tekton_pipeline_trigger_propertys with this resource. ## Example Usage @@ -24,15 +24,14 @@ resource "ibm_cd_tekton_pipeline_trigger_property" "cd_tekton_pipeline_trigger_p ## Argument Reference -Review the argument reference that you can specify for your resource. +You can specify the following arguments for this resource. * `enum` - (Optional, List) Options for `single_select` property type. Only needed for `single_select` property type. * Constraints: The list items must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. The maximum length is `256` items. The minimum length is `0` items. * `locked` - (Optional, Boolean) When true, this property cannot be overridden at runtime. Attempting to override it will result in run requests being rejected. The default is false. - * Constraints: The default value is `false`. * `name` - (Required, Forces new resource, String) Property name. * Constraints: The maximum length is `253` characters. The minimum length is `1` character. The value must match regular expression `/^[-0-9a-zA-Z_.]{1,253}$/`. -* `path` - (Optional, String) A dot notation path for `integration` type properties only, to select a value from the tool integration. If left blank the full tool integration data will be used. +* `path` - (Optional, String) A dot notation path for `integration` type properties only, that selects a value from the tool integration. If left blank the full tool integration data will be used. * Constraints: The maximum length is `4096` characters. The minimum length is `0` characters. The value must match regular expression `/^[-0-9a-zA-Z_.]*$/`. * `pipeline_id` - (Required, Forces new resource, String) The Tekton pipeline ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[-0-9a-z]+$/`. @@ -45,7 +44,7 @@ Review the argument reference that you can specify for your resource. ## Attribute Reference -In addition to all argument references listed, you can access the following attribute references after your resource is created. +After your resource is created, you can read values from the listed arguments and the following attributes. * `id` - The unique identifier of the cd_tekton_pipeline_trigger_property. * `href` - (String) API URL for interacting with the trigger property. @@ -55,16 +54,16 @@ In addition to all argument references listed, you can access the following attr ## Import You can import the `ibm_cd_tekton_pipeline_trigger_property` resource by using `name`. -The `name` property can be formed from `pipeline_id`, `trigger_id`, and `property_name` in the following format: +The `name` property can be formed from `pipeline_id`, `trigger_id`, and `name` in the following format:
-<pipeline_id>/<trigger_id>/<property_name>
+<pipeline_id>/<trigger_id>/<name>
 
* `pipeline_id`: A string in the format `94619026-912b-4d92-8f51-6c74f0692d90`. The Tekton pipeline ID. * `trigger_id`: A string in the format `1bb892a1-2e04-4768-a369-b1159eace147`. The trigger ID. -* `property_name`: A string in the format `debug-pipeline`. The property name. +* `name`: A string in the format `prop1`. Property name. # Syntax -``` -$ terraform import ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property // -``` +
+$ terraform import ibm_cd_tekton_pipeline_trigger_property.cd_tekton_pipeline_trigger_property <pipeline_id>/<trigger_id>/<name>
+
diff --git a/website/docs/r/config_aggregator_settings.html.markdown b/website/docs/r/config_aggregator_settings.html.markdown new file mode 100644 index 0000000000..11fc00c3fb --- /dev/null +++ b/website/docs/r/config_aggregator_settings.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_config_aggregator_settings" +description: |- + Manages config_aggregator_settings. +subcategory: "Configuration Aggregator" +--- + +# ibm_config_aggregator_settings + +Create, update, and delete config_aggregator_settingss with this resource. + +## Example Usage + +```hcl +resource "ibm_config_aggregator_settings" "config_aggregator_settings_instance" { + instance_id=var.instance_id + region=var.region + additional_scope { + type = "Enterprise" + enterprise_id = "enterprise_id" + profile_template { + id = "ProfileTemplate-adb55769-ae22-4c60-aead-bd1f84f93c57" + trusted_profile_id = "Profile-6bb60124-8fc3-4d18-b63d-0b99560865d3" + } + } + resource_collection_regions = us-south + resource_collection_enabled = true + trusted_profile_id = "Profile-1260aec2-f2fc-44e2-8697-2cc15a447560" +} +``` + +## Argument Reference + +You can specify the following arguments for this resource. +* `instance_id` - (Required, Forces new resource, String) The GUID of the Configuration Aggregator instance. +* `resource_collection_regions` - (Optional, Forces new resource, String) The region of the Configuration Aggregator instance. If not provided defaults to the region defined in the IBM provider configuration. +* `additional_scope` - (Optional, Forces new resource, List) The additional scope that enables resource collection for Enterprise acccounts. + * Constraints: The maximum length is `10` items. The minimum length is `0` items. +Nested schema for **additional_scope**: + * `enterprise_id` - (Optional, String) The Enterprise ID. + * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/[a-zA-Z0-9]/`. + * `profile_template` - (Optional, List) The Profile Template details applied on the enterprise account. + Nested schema for **profile_template**: + * `id` - (Optional, String) The Profile Template ID created in the enterprise account that provides access to App Configuration instance for resource collection. + * Constraints: The maximum length is `52` characters. The minimum length is `52` characters. The value must match regular expression `/[a-zA-Z0-9-]/`. + * `trusted_profile_id` - (Optional, String) The trusted profile ID that provides access to App Configuration instance to retrieve template information. + * Constraints: The maximum length is `44` characters. The minimum length is `44` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + * `type` - (Optional, String) The type of scope. Currently allowed value is Enterprise. + * Constraints: The maximum length is `64` characters. The minimum length is `0` characters. The value must match regular expression `/[a-zA-Z0-9]/`. +* `regions` - (Optional, Forces new resource, List) The list of regions across which the resource collection is enabled. + * Constraints: The list items must match regular expression `/^[a-zA-Z0-9-]*$/`. The maximum length is `10` items. The minimum length is `0` items. +* `resource_collection_enabled` - (Optional, Forces new resource, Boolean) The field denoting if the resource collection is enabled. +* `trusted_profile_id` - (Optional, Forces new resource, String) The trusted profile id that provides Reader access to the App Configuration instance to collect resource metadata. + * Constraints: The maximum length is `44` characters. The minimum length is `44` characters. The value must match regular expression `/^[a-zA-Z0-9-]*$/`. + +## Attribute Reference + +After your resource is created, you can read values from the listed arguments and the following attributes. + +* `id` - The unique identifier of the config_aggregator_settings. + + +## Import + +You can import the `ibm_config_aggregator_settings` resource by using `region` and `instance_id`. +# Syntax +
+$ terraform import ibm_config_aggregator_settings.config_aggregator_settings /
+
+ +# Example +``` +$ terraform import ibm_config_aggregator_settings.config_aggregator_settings us-south/23243-3223-2323-333 +``` diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index 01423f0e36..c8739395ea 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -105,6 +105,7 @@ Review the argument references that you can specify for your resource. - `kms_instance_id` - Instance ID for boot volume encryption. - `kms_account_id` - Account ID for boot volume encryption, if other account is providing the kms. - `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. +- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows the user to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. - `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. ## Attribute reference diff --git a/website/docs/r/container_worker_pool.html.markdown b/website/docs/r/container_worker_pool.html.markdown index b2fc5f333f..6a427b4bce 100644 --- a/website/docs/r/container_worker_pool.html.markdown +++ b/website/docs/r/container_worker_pool.html.markdown @@ -77,6 +77,8 @@ Review the argument references that you can specify for your resource. - `key` - (Required, String) Key for taint. - `value` - (Required, String) Value for taint. - `effect` - (Required, String) Effect for taint. Accepted values are `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. +- `import_on_create` - (Optional, Bool) Import an existing WorkerPool from the cluster, instead of creating a new. +- `orphan_on_delete` - (Optional, Bool) Orphan the Worker Pool resource, instead of deleting it. The argument allows the user to remove the worker pool from the state, without deleting the actual cloud resource. The worker pool can be re-imported into the state using the `import_on_create` argument. **Deprecated reference** diff --git a/website/docs/r/database.html.markdown b/website/docs/r/database.html.markdown index d8cec79f7c..83d8a80b8f 100644 --- a/website/docs/r/database.html.markdown +++ b/website/docs/r/database.html.markdown @@ -686,7 +686,7 @@ Review the argument reference that you can specify for your resource. - `service_endpoints` - (Required, String) Specify whether you want to enable the public, private, or both service endpoints. Supported values are `public`, `private`, or `public-and-private`. - `tags` (Optional, Array of Strings) A list of tags that you want to add to your instance. - `version` - (Optional, Forces new resource, String) The version of the database to be provisioned. If omitted, the database is created with the most recent major and minor version. -- `deletion_protection` - (Optional, Boolean) If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. +- `deletion_protection` - (Optional, Boolean) If the DB instance should have deletion protection within terraform enabled. This is not a property of the resource and does not prevent deletion outside of terraform. The database can't be deleted by terraform when this value is set to `true`. The default is `false`. - `users` - (Optional, List of Objects) A list of users that you want to create on the database. Multiple blocks are allowed. Nested scheme for `users`: diff --git a/website/docs/r/event_streams_quota.html.markdown b/website/docs/r/event_streams_quota.html.markdown new file mode 100644 index 0000000000..d1aa40b38a --- /dev/null +++ b/website/docs/r/event_streams_quota.html.markdown @@ -0,0 +1,96 @@ +--- +subcategory: "Event Streams" +layout: "ibm" +page_title: "IBM: event_streams_quota" +description: |- + Manages a quota of an IBM Event Streams service instance. +--- + +# ibm_event_streams_quota + +Create, update or delete a quota of an Event Streams service instance. Both the default quota and user quotas may be managed. Quotas are only available on Event Streams Enterprise plan service instances. For more information about Event Streams quotas, see [Setting Kafka quotas](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-enabling_kafka_quotas). + +## Example usage + +### Sample 1: Create an Event Streams service instance and apply a default quota + +Using `entity = default` in the quota resource sets the default quota, which applies to all users for which no user quota has been set. + +```terraform +resource "ibm_resource_instance" "es_instance_1" { + name = "terraform-integration-1" + service = "messagehub" + plan = "enterprise-3nodes-2tb" + location = "us-south" + resource_group_id = data.ibm_resource_group.group.id + + timeouts { + create = "3h" + update = "1h" + delete = "15m" + } +} + +resource "ibm_event_streams_quota" "es_quota_1" { + resource_instance_id = ibm_resource_instance.es_instance_1.id + entity = "default" + producer_byte_rate = 16384 + consumer_byte_rate = 32768 +} + +``` + +### Sample 2: Create a user quota on an existing Event Streams instance + +The quota is set for the user with the given IAM ID. The producer rate is limited, the consumer rate is unlimited (-1). + +```terraform +data "ibm_resource_instance" "es_instance_2" { + name = "terraform-integration-2" + resource_group_id = data.ibm_resource_group.group.id +} + +resource "ibm_event_streams_quota" "es_quota_2" { + resource_instance_id = ibm_resource_instance.es_instance_2.id + entity = "iam-ServiceId-00001111-2222-3333-4444-555566667777" + producer_byte_rate = 16384 + consumer_byte_rate = -1 +} + +``` + +## Argument reference + +You must specify the following arguments for this resource. + +- `resource_instance_id` - (Required, String) The ID or the CRN of the Event Streams service instance. +- `entity` - (Required, String) Either `default` to set the default quota, or an IAM ID for a user quota. +- `producer_byte_rate` - (Required, Integer) The producer quota in bytes/second. Use -1 for no quota. +- `consumer_byte_rate` - (Required, Integer) The consumer quota in bytes/second. Use -1 for no quota. + +## Attribute reference + +After your resource is created, you can read values from the listed arguments and the following attributes. + +- `id` - (String) The ID of the quota in CRN format. The last field of the CRN is either `default`, or the IAM ID of the user. See the examples in the import section. + +## Import + +The `ibm_event_streams_quota` resource can be imported by using the ID in CRN format. The three colon-separated parameters of the `CRN` are: + - instance CRN = CRN of the Event Streams instance + - resource type = quota + - quota entity = `default` or the IAM ID of the user + +**Syntax** + +``` +$ terraform import ibm_event_streams_quota.es_quota + +``` + +**Examples** + +``` +$ terraform import ibm_event_streams_quota.es_quota_default crn:v1:bluemix:public:messagehub:us-south:a/6db1b0d0b5c54ee5c201552547febcd8:ffffffff-eeee-dddd-cccc-bbbbaaaa9999:quota:default +$ terraform import ibm_event_streams_quota.es_quota_user crn:v1:bluemix:public:messagehub:us-south:a/6db1b0d0b5c54ee5c201552547febcd8:ffffffff-eeee-dddd-cccc-bbbbaaaa9999:quota:iam-ServiceId-00001111-2222-3333-4444-555566667777 +``` diff --git a/website/docs/r/is_lb.html.markdown b/website/docs/r/is_lb.html.markdown index 5d24ffe531..1b68591c00 100644 --- a/website/docs/r/is_lb.html.markdown +++ b/website/docs/r/is_lb.html.markdown @@ -57,6 +57,15 @@ resource "ibm_is_lb" "example" { } } +``` +## An example to create a private path load balancer. +```terraform +resource "ibm_is_lb" "example" { + name = "example-load-balancer" + subnets = [ibm_is_subnet.example.id] + profile = "network-private-path" + type = "private_path" +} ``` ## Timeouts @@ -85,7 +94,7 @@ Review the argument references that you can specify for your resource. - `logging`- (Optional, Bool) Enable or disable datapath logging for the load balancer. This is applicable only for application load balancer. Supported values are **true** or **false**. Default value is **false**. - `name` - (Required, String) The name of the VPC load balancer. -- `profile` - (Optional, Forces new resource, String) For a Network Load Balancer, this attribute is required and should be set to `network-fixed`. For Application Load Balancer, profile is not a required attribute. +- `profile` - (Optional, Forces new resource, String) For a Network Load Balancer, this attribute is required for network and private path load balancers. Should be set to `network-private-path` for private path load balancers and `network-fixed` for a network load balancer. For Application Load Balancer, profile is not a required attribute. - `resource_group` - (Optional, Forces new resource, String) The resource group where the load balancer to be created. - `route_mode` - (Optional, Forces new resource, Bool) Indicates whether route mode is enabled for this load balancer. @@ -97,14 +106,18 @@ Review the argument references that you can specify for your resource. The subnets must be in the same `VPC`. The load balancer's `availability` will depend on the availability of the `zones` the specified subnets reside in. The load balancer must be in the `application` family for `updating subnets`. Load balancers in the `network` family allow only `one subnet` to be specified. - `tags` (Optional, Array of Strings) A list of tags that you want to add to your load balancer. Tags can help you find the load balancer more easily later. -- `type` - (Optional, Forces new resource, String) The type of the load balancer. Default value is `public`. Supported values are `public` and `private`. +- `type` - (Optional, Forces new resource, String) The type of the load balancer. Default value is `public`. Supported values are `public`, `private` and `private_path`. + ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `access_mode` - (String) The access mode for this load balancer. One of **private**, **public**, **private_path**. +- `availability` - (String) The availability of this load balancer - `crn` - (String) The CRN for this load balancer. - `hostname` - (String) The fully qualified domain name assigned to this load balancer. - `id` - (String) The unique identifier of the load balancer. +- `instance_groups_supported` - (Boolean) Indicates whether this load balancer supports instance groups. - `operating_status` - (String) The operating status of this load balancer. - `public_ips` - (String) The public IP addresses assigned to this load balancer. - `private_ip` - (List) The Reserved IP address reference assigned to this load balancer. @@ -118,6 +131,7 @@ In addition to all argument reference list, you can access the following attribu - `private_ips` - (String) The private IP addresses (Reserved IP address reference) assigned to this load balancer. - `status` - (String) The status of the load balancer. - `security_groups_supported`- (Bool) Indicates if this load balancer supports security groups. +- `source_ip_session_persistence_supported` - (Boolean) Indicates whether this load balancer supports source IP session persistence. - `udp_supported`- (Bool) Indicates whether this load balancer supports UDP. diff --git a/website/docs/r/is_private_path_service_gateway.html.markdown b/website/docs/r/is_private_path_service_gateway.html.markdown new file mode 100644 index 0000000000..dd57798f1f --- /dev/null +++ b/website/docs/r/is_private_path_service_gateway.html.markdown @@ -0,0 +1,77 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway" +description: |- + Manages PrivatePathServiceGateway. +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway + +Provides a resource for PrivatePathServiceGateway. This allows PrivatePathServiceGateway to be created, updated and deleted. + +**NOTE:** +Private path service gateway is a select availability feature. + +## Example Usage + +```hcl +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "permit" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["myexamplefqdn"] +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your resource. + +- `default_access_policy` - (Optional, String) The policy to use for bindings from accounts without an explicit account policy. `permit`: access will be permitted. `deny`: access will be denied. `review`: access will be manually reviewed. Allowable values are: `deny`, `permit`, `review`. +- `load_balancer` - (Required, String) The ID of the load balancer for this private path service gateway. This load balancer must be in the same VPC as the private path service gateway and must have is_private_path set to true. +- `service_endpoints` - (Required, List of Strings) The fully qualified domain names for this private path service gateway. +- `name` - (Optional, String) The name for this private path service gateway. The name must not be used by another private path service gateway in the VPC. +- `resource_group` - (Optional, String) ID of the resource group to use. +- `zonal_affinity` - (Optional, String) Indicates whether this private path service gateway has zonal affinity. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your resource is created. + + +- `created_at` - (String) The date and time that the private path service gateway was created. +- `crn` - (String) The CRN for this private path service gateway. +- `default_access_policy` - (String) The policy to use for bindings from accounts without an explicit account policy. +- `endpoint_gateway_count` - (Integer) The number of endpoint gateways using this private path service gateway. +- `endpoint_gateway_binding_auto_delete` - (Boolean) Indicates whether endpoint gateway bindings will be automatically deleted after endpoint_gateway_binding_auto_delete_timeout hours have passed. At present, this is always true, but may be modifiable in the future. +- `endpoint_gateway_binding_auto_delete_timeout` - (Integer) If endpoint_gateway_binding_auto_delete is true, the hours after which endpoint gateway bindings will be automatically deleted. If the value is 0, abandoned endpoint gateway bindings will be deleted immediately. At present, this is always set to 0. This value may be modifiable in the future. +- `href` - (String) The URL for this private path service gateway. +- `id` - The unique identifier of the PrivatePathServiceGateway +- `lifecycle_state` - (String) The lifecycle state of the private path service gateway. +- `load_balancer` - (String) The load balancer for this private path service gateway. +- `name` - (String) The name for this private path service gateway. The name is unique across all private path service gateways in the VPC. +- `published` - (Boolean) Indicates the availability of this private path service gateway- `true`: Any account can request access to this private path service gateway.- `false`: Access is restricted to the account that created this private path service gateway. +- `resource_group` - (String) The resource group for this private path service gateway. +- `resource_type` - (String) The resource type. +- `service_endpoints` - (List of strings) The fully qualified domain names for this private path service gateway. +- `vpc` - (String) The VPC this private path service gateway resides in. +- `zonal_affinity` - (Boolean) Indicates whether this private path service gateway has zonal affinity.- `true`: Traffic to the service from a zone will favor service endpoints in the same zone.- `false`: Traffic to the service from a zone will be load balanced across all zones in the region the service resides in. + + +## Import + +You can import the `ibm_is_private_path_service_gateway` resource by using `id`. + + +``` + +``` +- `private_path_service_gateway`: A string. The private path service gateway identifier. + + +# Syntax +``` +$ terraform import ibm_is_private_path_service_gateway.example +``` diff --git a/website/docs/r/is_private_path_service_gateway_account_policy.html.markdown b/website/docs/r/is_private_path_service_gateway_account_policy.html.markdown new file mode 100644 index 0000000000..d35d52b185 --- /dev/null +++ b/website/docs/r/is_private_path_service_gateway_account_policy.html.markdown @@ -0,0 +1,67 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_account_policy" +description: |- + Manages PrivatePathServiceGatewayAccountPolicy. +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_account_policy + +Provides a resource for PrivatePathServiceGatewayAccountPolicy. This allows PrivatePathServiceGatewayAccountPolicy to be created, updated and deleted. + +**NOTE:** +Private path service gateway is a select availability feature. + +## Example Usage + +```hcl +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "deny" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["myexamplefqdn"] +} +resource "ibm_is_private_path_service_gateway_account_policy" "example" { + access_policy = "deny" + account = "fee82deba12e4c0fb69c3b09d1f12345" + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your resource. + +- `access_policy` - (Required, String) The access policy for the account:- permit: access will be permitted- deny: access will be denied- review: access will be manually reviewed. Allowable values are: `deny`, `permit`, `review`. +- `account` - (Required, Forces new resource, String) The ID of the account for this access policy. +- `private_path_service_gateway` - (Required, Forces new resource, String) The private path service gateway identifier. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your resource is created. + +- `id` - The unique identifier of the PrivatePathServiceGatewayAccountPolicy. The ID is composed of `/`. +- `created_at` - (String) The date and time that the account policy was created. +- `href` - (String) The URL for this account policy. +- `account_policy` - (String) The unique identifier for this account policy. +- `resource_type` - (String) The resource type. +- `updated_at` - (String) The date and time that the account policy was updated. + + +## Import + +You can import the `ibm_is_private_path_service_gateway_account_policy` resource by using `id`. +The `id` property can be formed from `private_path_service_gateway`, and `id` in the following format: + +``` +/ +``` +- `private_path_service_gateway`: A string. The private path service gateway identifier. +- `id`: A string. The account policy identifier. + +# Syntax +``` +$ terraform import ibm_is_private_path_service_gateway_account_policy.example / +``` diff --git a/website/docs/r/is_private_path_service_gateway_endpoint_gateway_binding_operations.html.markdown b/website/docs/r/is_private_path_service_gateway_endpoint_gateway_binding_operations.html.markdown new file mode 100644 index 0000000000..746e69a5e1 --- /dev/null +++ b/website/docs/r/is_private_path_service_gateway_endpoint_gateway_binding_operations.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_endpoint_gateway_binding_operations" +description: |- + Manages PrivatePathServiceGateway endpoint gateway bindings. +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_endpoint_gateway_binding_operations + +Provides a resource for ibm_is_private_path_service_gateway_endpoint_gateway_binding_operations. This allows permitting or denying endpoint gateway bindings. + +**NOTE:** +Private path service gateway is a select availability feature. + +## Example Usage. Permit all the pending endpoint gateway bindings + +```hcl +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "review" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["myexamplefqdn"] +} +data "ibm_is_private_path_service_gateway_endpoint_gateway_bindings" "bindings" { + account = "7f75c7b025e54bc5635f754b2f888665" + status = "pending" + private_path_service_gateway = ibm_is_private_path_service_gateway.ppsg.id +} +resource "ibm_is_private_path_service_gateway_endpoint_gateway_binding_operations" "policy" { + count = length(data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.bindings.endpoint_gateway_bindings) + access_policy = "permit" + endpoint_gateway_binding = data.ibm_is_private_path_service_gateway_endpoint_gateway_bindings.bindings.endpoint_gateway_bindings[count.index].id + private_path_service_gateway = ibm_is_private_path_service_gateway.ppsg.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your resource. + +- `access_policy` - (Required, String) The access policy for the endpoint gateway binding:- permit: access will be permitted- deny: access will be denied. Allowable values are: `deny`, `permit`. +- `private_path_service_gateway` - (Required, Forces new resource, String) The private path service gateway +identifier. +- `endpoint_gateway_binding` - (Required, Forces new resource, String) ID of the endpoint gateway binding + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your resource is created. + +- `access_policy` - (String) The access policy for the endpoint gateway binding:- permit: access will be permitted- deny: access will be denied. Allowable values are: `deny`, `permit`. +- `private_path_service_gateway` - (String) The private path service gateway +identifier. +- `endpoint_gateway_binding` - (String) ID of the endpoint gateway binding + diff --git a/website/docs/r/is_private_path_service_gateway_operations.html.markdown b/website/docs/r/is_private_path_service_gateway_operations.html.markdown new file mode 100644 index 0000000000..41d38f2e5a --- /dev/null +++ b/website/docs/r/is_private_path_service_gateway_operations.html.markdown @@ -0,0 +1,62 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_operations" +description: |- + Manages PrivatePathServiceGateway publish and unpublish. +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_operations + +Provides a resource for ibm_is_private_path_service_gateway_operations. This allows publishing or unpublishing the PPSG. + +**NOTE:** +Private path service gateway is a select availability feature. + +## Example Usage. Publish a PPSG. + +```hcl +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "permit" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["myexamplefqdn"] +} +resource "ibm_is_private_path_service_gateway_operations" "publish" { + published = true + private_path_service_gateway = ibm_is_private_path_service_gateway.ppsg.id +} +``` +## Example Usage. Unpublish a PPSG. + +```hcl +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "permit" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["myexamplefqdn"] +} +resource "ibm_is_private_path_service_gateway_operations" "publish" { + published = false + private_path_service_gateway = ibm_is_private_path_service_gateway.ppsg.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your resource. + +- `published` - (Required, Boolean) Boolean to specify whether to publish or unpublish the PPSG. +- `private_path_service_gateway` - (Required, Forces new resource, String) The private path service gateway +identifier. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your resource is created. + +- `published` - (Boolean) Boolean to specify whether to publish or unpublish the PPSG. +- `private_path_service_gateway` - (String) The private path service gateway +identifier. + diff --git a/website/docs/r/is_private_path_service_gateway_revoke_account.html.markdown b/website/docs/r/is_private_path_service_gateway_revoke_account.html.markdown new file mode 100644 index 0000000000..6c72d77bd5 --- /dev/null +++ b/website/docs/r/is_private_path_service_gateway_revoke_account.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_is_private_path_service_gateway_revoke_account" +description: |- + Manages PrivatePathServiceGateway revoke account. +subcategory: "VPC infrastructure" +--- + +# ibm_is_private_path_service_gateway_revoke_account + +Provides a resource for ibm_is_private_path_service_gateway_revoke_account. This revokes the access to provided account. + +**NOTE:** +Private path service gateway is a select availability feature. + +## Example Usage. +```hcl +resource "ibm_is_private_path_service_gateway" "example" { + default_access_policy = "permit" + name = "my-example-ppsg" + load_balancer = ibm_is_lb.testacc_LB.id + zonal_affinity = true + service_endpoints = ["myexamplefqdn"] +} + resource "ibm_is_private_path_service_gateway_revoke_account" "example" { + account = "7f75c7b025e54bc5635f754b2f888665" + private_path_service_gateway = ibm_is_private_path_service_gateway.example.id +} +``` + +## Argument Reference + +Review the argument reference that you can specify for your resource. + +- `account` - (Required, String) Account ID to revoke. +- `private_path_service_gateway` - (Required, Forces new resource, String) The private path service gateway +identifier. + +## Attribute Reference + +In addition to all argument references listed, you can access the following attribute references after your resource is created. + +- `account` - (Required, String) Account ID to revoke. +- `private_path_service_gateway` - (String) The private path service gateway +identifier. + diff --git a/website/docs/r/is_subnet.html.markdown b/website/docs/r/is_subnet.html.markdown index c592676aa1..c52035eb5e 100644 --- a/website/docs/r/is_subnet.html.markdown +++ b/website/docs/r/is_subnet.html.markdown @@ -104,6 +104,9 @@ Review the argument references that you can specify for your resource. - `public_gateway` - (Optional, String) The ID of the public gateway for the subnet that you want to attach to the subnet. You create the public gateway with the [`ibm_is_public_gateway` resource](#provider-public-gateway). - `resource_group` - (Optional, Forces new resource, String) The ID of the resource group where you want to create the subnet. - `routing_table` - (Optional, String) The routing table ID associated with the subnet. +- `routing_table_crn` - (Optional, String) The routing table crn associated with the subnet. + ~> **Note** + `routing_table` and `routing_table_crn` are mutually exclusive. - `tags` - (Optional, List of Strings) The tags associated with the subnet. - `total_ipv4_address_count` - (Optional, Forces new resource, String) The total number of IPv4 addresses. Either `ipv4_cidr_block` or `total_pv4_address_count` input parameters must be provided in the resource. diff --git a/website/docs/r/is_subnet_routing_table_attachment.html.markdown b/website/docs/r/is_subnet_routing_table_attachment.html.markdown index feaa3bc11a..85a3e6f84d 100644 --- a/website/docs/r/is_subnet_routing_table_attachment.html.markdown +++ b/website/docs/r/is_subnet_routing_table_attachment.html.markdown @@ -49,6 +49,9 @@ resource "ibm_is_subnet_routing_table_attachment" "example" { Review the argument references that you can specify for your resource. - `routing_table` - (Required, String) The routing table identity. +- `routing_table_crn` - (Optional, String) The routing table crn associated with the subnet. + ~> **Note** + `routing_table` and `routing_table_crn` are mutually exclusive. - `subnet` - (Required, Forces new resource, String) The subnet identifier. diff --git a/website/docs/r/is_virtual_endpoint_gateway.html.markdown b/website/docs/r/is_virtual_endpoint_gateway.html.markdown index bec8e804c7..d15a8aefa5 100644 --- a/website/docs/r/is_virtual_endpoint_gateway.html.markdown +++ b/website/docs/r/is_virtual_endpoint_gateway.html.markdown @@ -77,6 +77,18 @@ resource "ibm_is_virtual_endpoint_gateway" "example4" { resource_group = data.ibm_resource_group.example.id security_groups = [ibm_is_security_group.example.id] } + +// Create endpoint gateway with target as private path service gateway +resource "ibm_is_virtual_endpoint_gateway" "example5" { + name = "example-endpoint-gateway-4" + target { + crn = "crn:v1:bluemix:public:is:us-south:a/123456::private-path-service-gateway:r134-fb880975-db45-4459-8548-64e3995ac213" + resource_type = "private_path_service_gateway" + } + vpc = ibm_is_vpc.example.id + resource_group = data.ibm_resource_group.example.id + security_groups = [ibm_is_security_group.example.id] +} ``` ## Argument reference @@ -113,7 +125,9 @@ Review the argument references that you can specify for your resource. - `name` - (Optional, Forces new resource, String) The endpoint gateway target name. -> **NOTE:** If `name` is not specified, `crn` must be specified. - - `resource_type` - (Required, String) The endpoint gateway target resource type. The possible values are `provider_cloud_service`, `provider_infrastructure_service`. + - `resource_type` - (Required, String) The endpoint gateway target resource type. The possible values are `provider_cloud_service`, `provider_infrastructure_service` and `private_path_service_gateway`. + + ~> **NOTE** The option `private_path_service_gateway` for the argument `target.resource_type` is a select availability feature. - `vpc` - (Required, Forces new resource, String) The VPC ID. ~> **NOTE:** `ips` configured inline in this resource are not modifiable. Prefer using `ibm_is_virtual_endpoint_gateway_ip` resource to bind/unbind new reserved IPs to endpoint gateways and use the resource `ibm_is_subnet_reserved_ip` to create new reserved IP. diff --git a/website/docs/r/is_volume.html.markdown b/website/docs/r/is_volume.html.markdown index 1b909fe257..cc26dc7eb6 100644 --- a/website/docs/r/is_volume.html.markdown +++ b/website/docs/r/is_volume.html.markdown @@ -71,6 +71,8 @@ Review the argument references that you can specify for your resource. **•** For more information, about creating access tags, see [working with tags](https://cloud.ibm.com/docs/account?topic=account-tag&interface=ui#create-access-console).
**•** You must have the access listed in the [Granting users access to tag resources](https://cloud.ibm.com/docs/account?topic=account-access) for `access_tags`
**•** `access_tags` must be in the format `key:value`. +- `adjustable_capacity_states` - (List) The attachment states that support adjustable capacity for this volume. Allowable list items are: `attached`, `unattached`, `unusable`. +- `adjustable_iops_states` - (List) The attachment states that support adjustable IOPS for this volume. Allowable list items are: `attached`, `unattached`, `unusable`. - `capacity` - (Optional, Integer) (The capacity of the volume in gigabytes. This defaults to `100`, minimum to `10 ` and maximum to `16000`. ~> **NOTE:** Supports only expansion on update (must be attached to a running instance and must not be less than the current volume capacity). Can be updated only if volume is attached to an running virtual server instance. Stopped instance will be started on update of capacity of the volume.If `source_snapshot` is provided `capacity` must be at least the snapshot's minimum_capacity. The maximum value may increase in the future and If unspecified, the capacity will be the source snapshot's minimum_capacity. diff --git a/website/docs/r/is_vpc.html.markdown b/website/docs/r/is_vpc.html.markdown index 0fb1d11ba8..d4fc1e0784 100644 --- a/website/docs/r/is_vpc.html.markdown +++ b/website/docs/r/is_vpc.html.markdown @@ -188,6 +188,7 @@ In addition to all argument reference list, you can access the following attribu - `default_network_acl_crn`- (String) CRN of the default network ACL ID created and attached to the VPC. - `default_network_acl`- (String) The default network ACL ID created and attached to the VPC. - `default_routing_table`- (String) The unique identifier of the VPC default routing table. +- `default_routing_table_crn`- (String) CRN of the default routing table. - `health_reasons` - (List) The reasons for the current `health_state` (if any).The enumerated reason code values for this property will expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected reason code was encountered. Nested schema for **health_reasons**: - `code` - (String) A snake case string succinctly identifying the reason for this health state. diff --git a/website/docs/r/is_vpc_routing_table.html.markdown b/website/docs/r/is_vpc_routing_table.html.markdown index ace815208c..917c9f1173 100644 --- a/website/docs/r/is_vpc_routing_table.html.markdown +++ b/website/docs/r/is_vpc_routing_table.html.markdown @@ -90,6 +90,7 @@ Review the argument references that you can specify for your resource. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `crn`- (String) CRN of the default routing table. - `href` - (String) The routing table URL. - `id` - (String) The unique identifier of the routing table. The ID is composed of `/`. - `is_default` - (String) Indicates the default routing table for this VPC. diff --git a/website/docs/r/is_vpn_gateway_connection.html.markdown b/website/docs/r/is_vpn_gateway_connection.html.markdown index 9231e6ddcf..92c84c6ade 100644 --- a/website/docs/r/is_vpn_gateway_connection.html.markdown +++ b/website/docs/r/is_vpn_gateway_connection.html.markdown @@ -107,6 +107,7 @@ Review the argument references that you can specify for your resource. - `action` - (Optional, String) Dead peer detection actions. Supported values are **restart**, **clear**, **hold**, or **none**. Default value is `restart`. - `admin_state_up` - (Optional, Bool) The VPN gateway connection status. Default value is **false**. If set to false, the VPN gateway connection is shut down. +- `distribute_traffic` - (Optional, Bool) Indicates whether the traffic is distributed between the `up` tunnels of the VPN gateway connection when the VPC route's next hop is a VPN connection. If `false`, the traffic is only routed through the `up` tunnel with the lower `public_ip` address. Distributing traffic across tunnels of route-based VPN gateway connections. Traffic across tunnels can be distributed with a status of up in a route-based VPN gateway connection. When creating or updating a route-based VPN gateway connection, set the distribute_traffic property to true (default is false). Existing connections will have the `distribute_traffic` property set to false. - `establish_mode` - (Optional, String) The establish mode of the VPN gateway connection:- `bidirectional`: Either side of the VPN gateway can initiate IKE protocol negotiations or rekeying processes.- `peer_only`: Only the peer can initiate IKE protocol negotiations for this VPN gateway connection. Additionally, the peer is responsible for initiating the rekeying process after the connection is established. If rekeying does not occur, the VPN gateway connection will be brought down after its lifetime expires. - `ike_policy` - (Optional, String) The ID of the IKE policy. Updating value from ID to `""` or making it `null` or removing it will remove the existing policy. - `interval` - (Optional, Integer) Dead peer detection interval in seconds. Default value is 2. diff --git a/website/docs/r/logs_alert.html.markdown b/website/docs/r/logs_alert.html.markdown index 0237ae3017..5b19c2b95f 100644 --- a/website/docs/r/logs_alert.html.markdown +++ b/website/docs/r/logs_alert.html.markdown @@ -84,9 +84,9 @@ Nested schema for **condition**: * `parameters` - (Optional, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -95,7 +95,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -110,7 +110,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -138,7 +138,7 @@ Nested schema for **condition**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **values**: * `id` - (Optional, String) The alert ID. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `not` - (Optional, Boolean) The alert not. * `next_op` - (Optional, String) Operator for the alerts. * Constraints: Allowable values are: `and`, `or`. @@ -153,9 +153,9 @@ Nested schema for **condition**: * `parameters` - (Required, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -164,7 +164,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -179,7 +179,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -198,9 +198,9 @@ Nested schema for **condition**: * `parameters` - (Required, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -209,7 +209,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -224,7 +224,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -245,9 +245,9 @@ Nested schema for **condition**: * `parameters` - (Required, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -256,7 +256,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -271,7 +271,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -290,9 +290,9 @@ Nested schema for **condition**: * `parameters` - (Required, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -301,7 +301,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -316,7 +316,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -335,9 +335,9 @@ Nested schema for **condition**: * `parameters` - (Required, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -346,7 +346,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -361,7 +361,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -380,9 +380,9 @@ Nested schema for **condition**: * `parameters` - (Required, List) The Less than alert condition parameters. Nested schema for **parameters**: * `cardinality_fields` - (Optional, List) Cardinality fields for unique count alert. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields for the alert condition. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `3` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `3` items. The minimum length is `0` items. * `ignore_infinity` - (Optional, Boolean) Should the evaluation ignore infinity value. * `metric_alert_parameters` - (Optional, List) The lucene metric alert parameters if it is a lucene metric alert. Nested schema for **metric_alert_parameters**: @@ -391,7 +391,7 @@ Nested schema for **condition**: * `arithmetic_operator_modifier` - (Optional, Integer) The arithmetic operator modifier of the metric promql alert. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `metric_field` - (Required, String) The metric field of the metric alert. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_source` - (Required, String) The metric source of the metric alert. * Constraints: Allowable values are: `logs2metrics_or_unspecified`, `prometheus`. * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. @@ -406,7 +406,7 @@ Nested schema for **condition**: * `non_null_percentage` - (Optional, Integer) Non null percentage of the evaluation. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `promql_text` - (Required, String) The promql text of the metric alert by fields for the alert condition. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `sample_threshold_percentage` - (Required, Integer) The threshold percentage. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `swap_null_values` - (Optional, Boolean) Should we swap null values with zero. @@ -430,34 +430,34 @@ Nested schema for **expiration**: * `filters` - (Required, List) Alert filters. Nested schema for **filters**: * `alias` - (Optional, String) The alias of the filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filter_type` - (Optional, String) The type of the filter. * Constraints: Allowable values are: `text_or_unspecified`, `template`, `ratio`, `unique_count`, `time_relative`, `metric`, `flow`. * `metadata` - (Optional, List) The metadata filters. Nested schema for **metadata**: * `applications` - (Optional, List) The applications to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `subsystems` - (Optional, List) The subsystems to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `ratio_alerts` - (Optional, List) The ratio alerts. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **ratio_alerts**: * `alias` - (Required, String) The alias of the filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `applications` - (Optional, List) The applications to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `group_by` - (Optional, List) The group by fields. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `severities` - (Optional, List) The severities to filter. * Constraints: Allowable list items are: `debug_or_unspecified`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `subsystems` - (Optional, List) The subsystems to filter. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `text` - (Optional, String) The text to filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severities` - (Optional, List) The severity of the logs to filter. * Constraints: Allowable list items are: `debug_or_unspecified`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `text` - (Optional, String) The text to filter. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `incident_settings` - (Optional, List) Incident settings, will create the incident based on this configuration. Nested schema for **incident_settings**: * `notify_on` - (Optional, String) Notify on setting. @@ -470,18 +470,18 @@ Nested schema for **incident_settings**: * Constraints: The maximum length is `200` items. The minimum length is `0` items. Nested schema for **meta_labels**: * `key` - (Optional, String) The key of the label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `value` - (Optional, String) The value of the label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `meta_labels_strings` - (Optional, List) The Meta labels to add to the alert as string with ':' separator. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `name` - (Required, String) Alert name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `notification_groups` - (Required, List) Alert notification groups. * Constraints: The maximum length is `10` items. The minimum length is `1` item. Nested schema for **notification_groups**: * `group_by_fields` - (Optional, List) Group by fields to group the values by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `20` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `20` items. The minimum length is `0` items. * `notifications` - (Optional, List) Webhook target settings for the the notification. * Constraints: The maximum length is `20` items. The minimum length is `0` items. Nested schema for **notifications**: @@ -496,7 +496,7 @@ Nested schema for **notification_groups**: * `retriggering_period_seconds` - (Optional, Integer) Retriggering period of the alert in seconds. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `notification_payload_filters` - (Optional, List) JSON keys to include in the alert notification, if left empty get the full log text in the alert notification. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `100` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `100` items. The minimum length is `0` items. * `severity` - (Required, String) Alert severity. * Constraints: Allowable values are: `info_or_unspecified`, `warning`, `critical`, `error`. diff --git a/website/docs/r/logs_dashboard.html.markdown b/website/docs/r/logs_dashboard.html.markdown index 3d87455344..9afb379e4e 100644 --- a/website/docs/r/logs_dashboard.html.markdown +++ b/website/docs/r/logs_dashboard.html.markdown @@ -149,7 +149,7 @@ Nested schema for **annotations**: * `id` - (Required, String) Unique identifier within the dashboard. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (Required, String) Name of the annotation. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source` - (Required, List) Source of the annotation events. Nested schema for **source**: * `logs` - (Optional, List) Logs source. @@ -158,15 +158,15 @@ Nested schema for **annotations**: * Constraints: The maximum length is `10` items. The minimum length is `0` items. Nested schema for **label_fields**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Required, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The Lucene query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `message_template` - (Optional, String) Template for the annotation message. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `strategy` - (Required, List) Strategy for turning logs data into annotations. Nested schema for **strategy**: * `duration` - (Optional, List) Event start timestamp and duration are extracted from the log entry. @@ -174,13 +174,13 @@ Nested schema for **annotations**: * `duration_field` - (Required, List) Field to count distinct values of. Nested schema for **duration_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `start_timestamp_field` - (Required, List) Field to count distinct values of. Nested schema for **start_timestamp_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `instant` - (Optional, List) Event timestamp is extracted from the log entry. @@ -188,7 +188,7 @@ Nested schema for **annotations**: * `timestamp_field` - (Required, List) Field to count distinct values of. Nested schema for **timestamp_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `range` - (Optional, List) Event start and end timestamps are extracted from the log entry. @@ -196,31 +196,31 @@ Nested schema for **annotations**: * `end_timestamp_field` - (Required, List) Field to count distinct values of. Nested schema for **end_timestamp_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `start_timestamp_field` - (Required, List) Field to count distinct values of. Nested schema for **start_timestamp_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (Optional, List) Metrics source. Nested schema for **metrics**: * `labels` - (Optional, List) Labels to display in the annotation. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `message_template` - (Optional, String) Template for the annotation message. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `promql_query` - (Optional, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The PromQL query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `strategy` - (Optional, List) Strategy for turning metrics data into annotations. Nested schema for **strategy**: * `start_time_metric` - (Optional, List) Take first data point and use its value as annotation timestamp (instead of point own timestamp). Nested schema for **start_time_metric**: * `description` - (Optional, String) Brief description or summary of the dashboard's purpose or content. - * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `false` - (Optional, List) Auto refresh interval is set to off. Nested schema for **false**: * `filters` - (Optional, List) List of filters that can be applied to the dashboard's data. @@ -235,7 +235,7 @@ Nested schema for **filters**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -249,7 +249,7 @@ Nested schema for **filters**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -257,11 +257,11 @@ Nested schema for **filters**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -273,7 +273,7 @@ Nested schema for **filters**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -281,7 +281,7 @@ Nested schema for **filters**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `five_minutes` - (Optional, List) Auto refresh interval is set to five minutes. Nested schema for **five_minutes**: * `folder_id` - (Optional, List) Unique identifier of the folder containing the dashboard. @@ -291,7 +291,7 @@ Nested schema for **folder_id**: * `folder_path` - (Optional, List) Path of the folder containing the dashboard. Nested schema for **folder_path**: * `segments` - (Optional, List) The segments of the folder path. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `href` - (Optional, String) Unique identifier for the dashboard. * Constraints: The maximum length is `21` characters. The minimum length is `21` characters. The value must match regular expression `/^[a-zA-Z0-9]{21}$/`. * `layout` - (Required, List) Layout configuration for the dashboard's visual elements. @@ -300,7 +300,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **sections**: * `href` - (Optional, String) The unique identifier of the section within the layout. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (Required, List) Unique identifier of the folder containing the dashboard. Nested schema for **id**: * `value` - (Required, String) The UUID value. @@ -312,7 +312,7 @@ Nested schema for **layout**: Nested schema for **appearance**: * `height` - (Required, Integer) The height of the row. * `href` - (Optional, String) The unique identifier of the row within the layout. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (Required, List) Unique identifier of the folder containing the dashboard. Nested schema for **id**: * `value` - (Required, String) The UUID value. @@ -326,7 +326,7 @@ Nested schema for **layout**: * `bar_chart` - (Optional, List) Bar chart widget. Nested schema for **bar_chart**: * `color_scheme` - (Required, String) Supported vaues: classic, severity, cold, negative, green, red, blue. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `colors_by` - (Required, List) Coloring mode. Nested schema for **colors_by**: * `aggregation` - (Optional, List) Each aggregation will have different color and stack color will be derived from aggregation color. @@ -338,7 +338,7 @@ Nested schema for **layout**: * `data_mode_type` - (Optional, String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `group_name_template` - (Required, String) Template for bar labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `max_bars_per_chart` - (Required, Integer) Maximum number of bars to present in the chart. * `query` - (Required, List) Data source specific query, defines from where and how to fetch the data. Nested schema for **query**: @@ -347,7 +347,7 @@ Nested schema for **layout**: * `dataprime_query` - (Required, List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Optional, List) Extra filter on top of the Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -356,7 +356,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -370,7 +370,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -378,11 +378,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -394,7 +394,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -402,11 +402,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (Optional, List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `stacked_group_name` - (Optional, String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `logs` - (Optional, List) Logs specific query. Nested schema for **logs**: * `aggregation` - (Required, List) Aggregations. @@ -416,7 +416,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (Optional, List) Count the number of entries. @@ -426,7 +426,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (Optional, List) Calculate maximum value of log field. @@ -434,7 +434,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (Optional, List) Calculate minimum value of log field. @@ -442,7 +442,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (Optional, List) Calculate percentile value of log field. @@ -450,7 +450,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Required, Float) Value in range (0, 100]. @@ -459,7 +459,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (Optional, List) Extra filter on top of Lucene query. @@ -468,7 +468,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -482,7 +482,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -490,22 +490,22 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names_fields` - (Optional, List) Fiel to group by. * Constraints: The maximum length is `2` items. The minimum length is `1` item. Nested schema for **group_names_fields**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Optional, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name_field` - (Optional, List) Field to count distinct values of. Nested schema for **stacked_group_name_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (Optional, List) Metrics specific query. @@ -514,7 +514,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -526,7 +526,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -534,15 +534,15 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (Optional, List) Labels to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `promql_query` - (Optional, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name` - (Optional, String) Label to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `scale_type` - (Required, String) Scale type. * Constraints: Allowable values are: `unspecified`, `linear`, `logarithmic`. * `sort_by` - (Required, String) Sorting mode. @@ -551,7 +551,7 @@ Nested schema for **layout**: Nested schema for **stack_definition**: * `max_slices_per_bar` - (Optional, Integer) Maximum number of slices per bar. * `stack_name_template` - (Optional, String) Template for stack slice label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (Required, String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `x_axis` - (Required, List) X axis mode. @@ -569,14 +569,14 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **columns**: * `field` - (Required, String) References a field in result set. In case of aggregation, it references the aggregation identifier. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `width` - (Optional, Integer) Column width. * `data_mode_type` - (Optional, String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `order_by` - (Optional, List) Column used for ordering the results. Nested schema for **order_by**: * `field` - (Optional, String) The field to order by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order_direction` - (Optional, String) The direction of the order: ascending or descending. * Constraints: Allowable values are: `unspecified`, `asc`, `desc`. * `query` - (Required, List) Data source specific query, defines from where and how to fetch the data. @@ -586,7 +586,7 @@ Nested schema for **layout**: * `dataprime_query` - (Required, List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Optional, List) Extra filtering on top of the Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -595,7 +595,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -609,7 +609,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -617,11 +617,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -633,7 +633,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -641,7 +641,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs` - (Optional, List) Logs specific query. Nested schema for **logs**: * `filters` - (Optional, List) Extra filtering on top of the Lucene query. @@ -650,7 +650,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -664,7 +664,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -672,7 +672,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `grouping` - (Optional, List) Grouping and aggregation. Nested schema for **grouping**: * `aggregations` - (Optional, List) Aggregations. @@ -685,7 +685,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (Optional, List) Count the number of entries. @@ -695,7 +695,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (Optional, List) Calculate maximum value of log field. @@ -703,7 +703,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (Optional, List) Calculate minimum value of log field. @@ -711,7 +711,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (Optional, List) Calculate percentile value of log field. @@ -719,7 +719,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Required, Float) Value in range (0, 100]. @@ -728,32 +728,32 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `id` - (Required, String) Aggregation identifier, must be unique within grouping configuration. - * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `is_visible` - (Required, Boolean) Whether the aggregation is visible. * `name` - (Required, String) Aggregation name, used as column name. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `group_bys` - (Optional, List) Fields to group by. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **group_bys**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Optional, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metrics` - (Optional, List) Metrics specific query. Nested schema for **metrics**: * `filters` - (Optional, List) Extra filtering on top of the PromQL query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -765,7 +765,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -773,11 +773,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `promql_query` - (Required, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `results_per_page` - (Required, Integer) Number of results per page. * `row_style` - (Required, String) Display style for table rows. * Constraints: Allowable values are: `unspecified`, `one_line`, `two_line`, `condensed`, `json`, `list`. @@ -794,7 +794,7 @@ Nested schema for **layout**: * `dataprime_query` - (Required, List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Optional, List) Extra filters applied on top of Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -803,7 +803,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -817,7 +817,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -825,11 +825,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -841,7 +841,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -849,7 +849,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs` - (Optional, List) Logs specific query. Nested schema for **logs**: * `filters` - (Optional, List) Extra filters applied on top of Lucene query. @@ -858,7 +858,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -872,7 +872,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -880,7 +880,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs_aggregation` - (Optional, List) Aggregations. Nested schema for **logs_aggregation**: * `average` - (Optional, List) Calculate average value of log field. @@ -888,7 +888,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (Optional, List) Count the number of entries. @@ -898,7 +898,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (Optional, List) Calculate maximum value of log field. @@ -906,7 +906,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (Optional, List) Calculate minimum value of log field. @@ -914,7 +914,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (Optional, List) Calculate percentile value of log field. @@ -922,7 +922,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Required, Float) Value in range (0, 100]. @@ -931,13 +931,13 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Optional, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metrics` - (Optional, List) Metrics specific query. Nested schema for **metrics**: * `aggregation` - (Required, String) Aggregation. When AGGREGATION_UNSPECIFIED is selected, widget uses instant query. Otherwise, it uses range query. @@ -946,7 +946,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -958,7 +958,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -966,11 +966,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `promql_query` - (Required, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `show_inner_arc` - (Required, Boolean) Show inner arc (styling). * `show_outer_arc` - (Required, Boolean) Show outer arc (styling). * `threshold_by` - (Required, String) What threshold color should be applied to: value or background. @@ -979,14 +979,14 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **thresholds**: * `color` - (Required, String) Color. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `from` - (Required, Float) Value at which the color should change. * `unit` - (Required, String) Query result value interpretation. * Constraints: Allowable values are: `unspecified`, `number`, `percent`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `horizontal_bar_chart` - (Optional, List) Horizontal bar chart widget. Nested schema for **horizontal_bar_chart**: * `color_scheme` - (Required, String) Color scheme name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `colors_by` - (Optional, List) Coloring mode. Nested schema for **colors_by**: * `aggregation` - (Optional, List) Each aggregation will have different color and stack color will be derived from aggregation color. @@ -999,7 +999,7 @@ Nested schema for **layout**: * Constraints: Allowable values are: `high_unspecified`, `archive`. * `display_on_bar` - (Optional, Boolean) Whether to display values on the bars. * `group_name_template` - (Optional, String) Template for bar labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `max_bars_per_chart` - (Optional, Integer) Maximum number of bars to display in the chart. * `query` - (Optional, List) Data source specific query, defines from where and how to fetch the data. Nested schema for **query**: @@ -1008,7 +1008,7 @@ Nested schema for **layout**: * `dataprime_query` - (Optional, List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Optional, List) Extra filter on top of the Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -1017,7 +1017,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -1031,7 +1031,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1039,11 +1039,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -1055,7 +1055,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1063,11 +1063,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (Optional, List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `stacked_group_name` - (Optional, String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `logs` - (Optional, List) Logs specific query. Nested schema for **logs**: * `aggregation` - (Optional, List) Aggregations. @@ -1077,7 +1077,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (Optional, List) Count the number of entries. @@ -1087,7 +1087,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (Optional, List) Calculate maximum value of log field. @@ -1095,7 +1095,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (Optional, List) Calculate minimum value of log field. @@ -1103,7 +1103,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (Optional, List) Calculate percentile value of log field. @@ -1111,7 +1111,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Required, Float) Value in range (0, 100]. @@ -1120,7 +1120,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (Optional, List) Extra filter on top of the Lucene query. @@ -1129,7 +1129,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -1143,7 +1143,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1151,22 +1151,22 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names_fields` - (Optional, List) Fields to group by. * Constraints: The maximum length is `2` items. The minimum length is `1` item. Nested schema for **group_names_fields**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Optional, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name_field` - (Optional, List) Field to count distinct values of. Nested schema for **stacked_group_name_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (Optional, List) Metrics specific query. @@ -1175,7 +1175,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -1187,7 +1187,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1195,15 +1195,15 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (Optional, List) Labels to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `promql_query` - (Optional, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name` - (Optional, String) Label to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `scale_type` - (Optional, String) Scale type. * Constraints: Allowable values are: `unspecified`, `linear`, `logarithmic`. * `sort_by` - (Optional, String) Sorting mode. @@ -1212,7 +1212,7 @@ Nested schema for **layout**: Nested schema for **stack_definition**: * `max_slices_per_bar` - (Optional, Integer) Maximum number of slices per bar. * `stack_name_template` - (Optional, String) Template for stack slice label. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (Optional, String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `y_axis_view_by` - (Optional, List) Y-axis view mode. @@ -1233,14 +1233,14 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **query_definitions**: * `color_scheme` - (Optional, String) Color scheme for the series. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `data_mode_type` - (Optional, String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `id` - (Required, String) Unique identifier of the query within the widget. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `is_visible` - (Required, Boolean) Whether data for this query should be visible on the chart. * `name` - (Optional, String) Query name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `query` - (Required, List) Data source specific query, defines from where and how to fetch the data. Nested schema for **query**: * `dataprime` - (Optional, List) Dataprime language based query. @@ -1248,7 +1248,7 @@ Nested schema for **layout**: * `dataprime_query` - (Required, List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Optional, List) Filters to be applied to query results. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -1257,7 +1257,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -1271,7 +1271,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1279,11 +1279,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -1295,7 +1295,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1303,7 +1303,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `logs` - (Optional, List) Logs specific query. Nested schema for **logs**: * `aggregations` - (Optional, List) Aggregations. @@ -1314,7 +1314,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (Optional, List) Count the number of entries. @@ -1324,7 +1324,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (Optional, List) Calculate maximum value of log field. @@ -1332,7 +1332,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (Optional, List) Calculate minimum value of log field. @@ -1340,7 +1340,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (Optional, List) Calculate percentile value of log field. @@ -1348,7 +1348,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Required, Float) Value in range (0, 100]. @@ -1357,7 +1357,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (Optional, List) Extra filtering on top of the Lucene query. @@ -1366,7 +1366,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -1380,7 +1380,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1388,27 +1388,27 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_by` - (Optional, List) Group by fields (deprecated). - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_bys` - (Optional, List) Group by fields. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **group_bys**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Optional, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metrics` - (Optional, List) Metrics specific query. Nested schema for **metrics**: * `filters` - (Optional, List) Filtering to be applied to query results. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -1420,7 +1420,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1428,11 +1428,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `promql_query` - (Optional, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `resolution` - (Required, List) Resolution of the data. Nested schema for **resolution**: * `buckets_presented` - (Optional, Integer) Maximum number of data points to fetch. @@ -1443,7 +1443,7 @@ Nested schema for **layout**: * `series_count_limit` - (Optional, String) Maximum number of series to display. * Constraints: The maximum length is `19` characters. The minimum length is `1` character. The value must match regular expression `/^-?\\d{1,19}$/`. * `series_name_template` - (Optional, String) Template for series name in legend and tooltip. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (Optional, String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `stacked_line` - (Optional, String) Stacked lines. @@ -1456,17 +1456,17 @@ Nested schema for **layout**: * `markdown` - (Optional, List) Markdown widget. Nested schema for **markdown**: * `markdown_text` - (Required, String) Markdown text to render. - * Constraints: The maximum length is `10000` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `10000` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `tooltip_text` - (Optional, String) Tooltip text on hover. - * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `1000` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `pie_chart` - (Optional, List) Pie chart widget. Nested schema for **pie_chart**: * `color_scheme` - (Required, String) Color scheme name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `data_mode_type` - (Optional, String) Data mode type. * Constraints: Allowable values are: `high_unspecified`, `archive`. * `group_name_template` - (Optional, String) Template for group labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `label_definition` - (Required, List) Label settings. Nested schema for **label_definition**: * `is_visible` - (Optional, Boolean) Controls whether to show the label. @@ -1484,7 +1484,7 @@ Nested schema for **layout**: * `dataprime_query` - (Required, List) Dataprime query. Nested schema for **dataprime_query**: * `text` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Optional, List) Extra filters on top of Dataprime query. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: @@ -1493,7 +1493,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -1507,7 +1507,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1515,11 +1515,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metrics` - (Optional, List) Filtering to be applied to query results. Nested schema for **metrics**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -1531,7 +1531,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1539,11 +1539,11 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (Optional, List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `stacked_group_name` - (Optional, String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `logs` - (Optional, List) Logs specific query. Nested schema for **logs**: * `aggregation` - (Required, List) Aggregations. @@ -1553,7 +1553,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `count` - (Optional, List) Count the number of entries. @@ -1563,7 +1563,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `max` - (Optional, List) Calculate maximum value of log field. @@ -1571,7 +1571,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `min` - (Optional, List) Calculate minimum value of log field. @@ -1579,7 +1579,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percentile` - (Optional, List) Calculate percentile value of log field. @@ -1587,7 +1587,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `percent` - (Required, Float) Value in range (0, 100]. @@ -1596,7 +1596,7 @@ Nested schema for **layout**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `filters` - (Optional, List) Extra filters on top of Lucene query. @@ -1605,7 +1605,7 @@ Nested schema for **layout**: * `observation_field` - (Optional, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `operator` - (Optional, List) Operator to use for filtering the logs. @@ -1619,7 +1619,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1627,22 +1627,22 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names_fields` - (Optional, List) Fields to group by. * Constraints: The maximum length is `2` items. The minimum length is `1` item. Nested schema for **group_names_fields**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `lucene_query` - (Optional, List) Lucene query. Nested schema for **lucene_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name_field` - (Optional, List) Field to count distinct values of. Nested schema for **stacked_group_name_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metrics` - (Optional, List) Metrics specific query. @@ -1651,7 +1651,7 @@ Nested schema for **layout**: * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: * `label` - (Optional, String) Label associated with the metric. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `operator` - (Optional, List) Operator to use for filtering the logs. Nested schema for **operator**: * `equals` - (Optional, List) Equality comparison. @@ -1663,7 +1663,7 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `not_equals` - (Optional, List) Non-equality comparison. Nested schema for **not_equals**: * `selection` - (Optional, List) Selection criteria for the non-equality comparison. @@ -1671,36 +1671,36 @@ Nested schema for **layout**: * `list` - (Optional, List) Represents a selection from a list of values. Nested schema for **list**: * `values` - (Optional, List) List of values for the selection. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `group_names` - (Optional, List) Fields to group by. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `2` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `2` items. The minimum length is `1` item. * `promql_query` - (Required, List) PromQL query. Nested schema for **promql_query**: * `value` - (Optional, String) The query string. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `stacked_group_name` - (Optional, String) Field to stack by. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `show_legend` - (Required, Boolean) Controls whether to show the legend. * `stack_definition` - (Required, List) Stack definition. Nested schema for **stack_definition**: * `max_slices_per_stack` - (Optional, Integer) Maximum number of slices per stack. * `stack_name_template` - (Optional, String) Template for stack labels. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `unit` - (Optional, String) Unit of the data. * Constraints: Allowable values are: `unspecified`, `microseconds`, `milliseconds`, `seconds`, `bytes`, `kbytes`, `mbytes`, `gbytes`, `bytes_iec`, `kibytes`, `mibytes`, `gibytes`, `eur_cents`, `eur`, `usd_cents`, `usd`. * `description` - (Optional, String) Widget description. - * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `200` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `href` - (Optional, String) Widget identifier within the dashboard. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `id` - (Required, List) Unique identifier of the folder containing the dashboard. Nested schema for **id**: * `value` - (Required, String) The UUID value. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `title` - (Required, String) Widget title. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `updated_at` - (Optional, String) Last update timestamp. * `name` - (Required, String) Display name of the dashboard. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `relative_time_frame` - (Optional, String) Relative time frame specifying a duration from the current time. * Constraints: The maximum length is `10` characters. The minimum length is `2` characters. The value must match regular expression `/^[0-9]+[smhdw]?$/`. * `two_minutes` - (Optional, List) Auto refresh interval is set to two minutes. @@ -1719,33 +1719,33 @@ Nested schema for **variables**: * `list` - (Optional, List) Specific values are selected. Nested schema for **list**: * `values` - (Optional, List) Selected values. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `source` - (Required, List) Variable value source. Nested schema for **source**: * `constant_list` - (Optional, List) List of constant values. Nested schema for **constant_list**: * `values` - (Required, List) List of constant values. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `logs_path` - (Optional, List) Unique values for a given logs path. Nested schema for **logs_path**: * `observation_field` - (Required, List) Field to count distinct values of. Nested schema for **observation_field**: * `keypath` - (Optional, List) Path within the dataset scope. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `scope` - (Optional, String) Scope of the dataset. * Constraints: Allowable values are: `unspecified`, `user_data`, `label`, `metadata`. * `metric_label` - (Optional, List) Unique values for a given metric label. Nested schema for **metric_label**: * `label` - (Required, String) Metric label to source unique values from. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `metric_name` - (Required, String) Metric name to source unique values from. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `values_order_direction` - (Required, String) The direction of the order: ascending or descending. * Constraints: Allowable values are: `unspecified`, `asc`, `desc`. * `display_name` - (Required, String) Name used in variable UI. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `name` - (Required, String) Name of the variable which can be used in templates. - * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `100` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference diff --git a/website/docs/r/logs_dashboard_folder.html.markdown b/website/docs/r/logs_dashboard_folder.html.markdown index 2baef5f3ec..59e628b886 100644 --- a/website/docs/r/logs_dashboard_folder.html.markdown +++ b/website/docs/r/logs_dashboard_folder.html.markdown @@ -30,9 +30,9 @@ You can specify the following arguments for this resource. * `region` - (Optional, Forces new resource, String) Cloud Logs Instance Region. * `endpoint_type` - (Optional, String) Cloud Logs Instance Endpoint type. Allowed values `public` and `private`. * `name` - (Required, String) The dashboard folder name, required. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `parent_id` - (Optional, String) The dashboard folder parent ID, optional. If not set, the folder is a root folder, if set, the folder is a subfolder of the parent folder and needs to be a uuid. - * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference diff --git a/website/docs/r/logs_data_access_rule.html.markdown b/website/docs/r/logs_data_access_rule.html.markdown index 6bfcad750f..02473beeba 100644 --- a/website/docs/r/logs_data_access_rule.html.markdown +++ b/website/docs/r/logs_data_access_rule.html.markdown @@ -38,7 +38,7 @@ You can specify the following arguments for this resource. * `description` - (Optional, String) Optional Data Access Rule Description. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\-\\s]+$/`. * `display_name` - (Required, String) Data Access Rule Display Name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `filters` - (Required, List) List of filters that the Data Access Rule is composed of. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **filters**: diff --git a/website/docs/r/logs_e2m.html.markdown b/website/docs/r/logs_e2m.html.markdown index 910061ac2e..41da0f769f 100644 --- a/website/docs/r/logs_e2m.html.markdown +++ b/website/docs/r/logs_e2m.html.markdown @@ -42,15 +42,15 @@ You can specify the following arguments for this resource. * `logs_query` - (Optional, List) E2M logs query. Nested schema for **logs_query**: * `alias` - (Optional, String) Alias. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `applicationname_filters` - (Optional, List) Application name filters. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `lucene` - (Optional, String) Lucene query. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severity_filters` - (Optional, List) Severity type filters. * Constraints: Allowable list items are: `unspecified`, `debug`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `subsystemname_filters` - (Optional, List) Subsystem names filters. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `0` items. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `0` items. * `metric_fields` - (Optional, List) E2M metric fields. * Constraints: The maximum length is `10` items. The minimum length is `0` items. Nested schema for **metric_fields**: @@ -69,20 +69,20 @@ Nested schema for **metric_fields**: * `sample_type` - (Optional, String) Sample type min/max. * Constraints: Allowable values are: `unspecified`, `min`, `max`. * `target_metric_name` - (Optional, String) Target metric field alias name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source_field` - (Optional, String) Source field. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `target_base_metric_name` - (Optional, String) Target metric field alias name. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[\\w\/-]+$/`. * `metric_labels` - (Optional, List) E2M metric labels. * Constraints: The maximum length is `4096` items. The minimum length is `0` items. Nested schema for **metric_labels**: * `source_field` - (Optional, String) Metric label source field. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `target_label` - (Optional, String) Metric label target alias name. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[\\w\/-]+$/`. * `name` - (Required, String) Name of the E2M. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `type` - (Optional, String) E2M type. * Constraints: Allowable values are: `unspecified`, `logs2metrics`. @@ -93,14 +93,14 @@ After your resource is created, you can read values from the listed arguments an * `id` - The unique identifier of the logs_e2m resource. * `e2m_id` - The unique identifier of the logs e2m. * `create_time` - (String) E2M create time. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `is_internal` - (Boolean) A flag that represents if the e2m is for internal usage. * `permutations` - (List) Represents the limit of the permutations and if the limit was exceeded. Nested schema for **permutations**: * `has_exceeded_limit` - (Boolean) Flag to indicate if limit was exceeded. * `limit` - (Integer) E2M permutation limit. * `update_time` - (String) E2M update time. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Import diff --git a/website/docs/r/logs_enrichment.html.markdown b/website/docs/r/logs_enrichment.html.markdown index 92b467e00b..c038203e89 100644 --- a/website/docs/r/logs_enrichment.html.markdown +++ b/website/docs/r/logs_enrichment.html.markdown @@ -41,7 +41,7 @@ Nested schema for **enrichment_type**: * `suspicious_ip` - (Optional, List) The suspicious ip enrichment. Nested schema for **suspicious_ip**: * `field_name` - (Required, Forces new resource, String) The enrichment field name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference diff --git a/website/docs/r/logs_outgoing_webhook.html.markdown b/website/docs/r/logs_outgoing_webhook.html.markdown index 2239f555d2..d70478e743 100644 --- a/website/docs/r/logs_outgoing_webhook.html.markdown +++ b/website/docs/r/logs_outgoing_webhook.html.markdown @@ -40,15 +40,15 @@ Nested schema for **ibm_event_notifications**: * `region_id` - (Required, String) The region ID of the selected IBM Event Notifications instance. * Constraints: The maximum length is `4096` characters. The minimum length is `4` characters. The value must match regular expression `/^[a-z]{2}-[a-z]+$/`. * `source_id` - (Optional, String) The ID of the created source in the IBM Event Notifications instance. Corresponds to the Cloud Logs instance crn. Not required when creating an Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `source_name` - (Optional, String) The name of the created source in the IBM Event Notifications instance. Not required when creating an Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `name` - (Required, String) The name of the Outbound Integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `type` - (Required, String) The type of the deployed Outbound Integrations to list. * Constraints: Allowable values are: `ibm_event_notifications`. * `url` - (Optional, String) The URL of the Outbound Integration. Null for IBM Event Notifications integration. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference diff --git a/website/docs/r/logs_policy.html.markdown b/website/docs/r/logs_policy.html.markdown index d9f185b306..802fefaa3e 100644 --- a/website/docs/r/logs_policy.html.markdown +++ b/website/docs/r/logs_policy.html.markdown @@ -40,7 +40,7 @@ You can specify the following arguments for this resource. * `application_rule` - (Optional, List) Rule for matching with application. Nested schema for **application_rule**: * `name` - (Required, String) Value of the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_type_id` - (Required, String) Identifier of the rule. * Constraints: Allowable values are: `unspecified`, `is`, `is_not`, `start_with`, `includes`. * `archive_retention` - (Optional, List) Archive retention definition. @@ -54,13 +54,13 @@ Nested schema for **log_rules**: * `severities` - (Optional, List) Source severities to match with. * Constraints: Allowable list items are: `unspecified`, `debug`, `verbose`, `info`, `warning`, `error`, `critical`. The maximum length is `4096` items. The minimum length is `0` items. * `name` - (Required, String) Name of policy. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `priority` - (Required, String) The data pipeline sources that match the policy rules will go through. * Constraints: Allowable values are: `type_unspecified`, `type_block`, `type_low`, `type_medium`, `type_high`. * `subsystem_rule` - (Optional, List) Rule for matching with application. Nested schema for **subsystem_rule**: * `name` - (Required, String) Value of the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_type_id` - (Required, String) Identifier of the rule. * Constraints: Allowable values are: `unspecified`, `is`, `is_not`, `start_with`, `includes`. diff --git a/website/docs/r/logs_rule_group.html.markdown b/website/docs/r/logs_rule_group.html.markdown index 4e60114923..753aba22d2 100644 --- a/website/docs/r/logs_rule_group.html.markdown +++ b/website/docs/r/logs_rule_group.html.markdown @@ -55,10 +55,10 @@ You can specify the following arguments for this resource. * `endpoint_type` - (Optional, String) Cloud Logs Instance Endpoint type. Allowed values `public` and `private`. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `description` - (Optional, String) A description for the rule group, should express what is the rule group purpose. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `enabled` - (Optional, Boolean) Whether or not the rule is enabled. * `name` - (Required, String) The name of the rule group. - * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Optional, Integer) // The order in which the rule group will be evaluated. The lower the order, the more priority the group will have. Not providing the order will by default create a group with the last order. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `rule_matchers` - (Optional, List) // Optional rule matchers which if matched will make the rule go through the rule group. @@ -67,7 +67,7 @@ Nested schema for **rule_matchers**: * `application_name` - (Optional, List) ApplicationName constraint. Nested schema for **application_name**: * `value` - (Required, String) Only logs with this ApplicationName value will match. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `severity` - (Optional, List) Severity constraint. Nested schema for **severity**: * `value` - (Required, String) Only logs with this severity value will match. @@ -75,7 +75,7 @@ Nested schema for **rule_matchers**: * `subsystem_name` - (Optional, List) SubsystemName constraint. Nested schema for **subsystem_name**: * `value` - (Required, String) Only logs with this SubsystemName value will match. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule_subgroups` - (Required, List) Rule subgroups. Will try to execute the first rule subgroup, and if not matched will try to match the next one in order. * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **rule_subgroups**: @@ -93,7 +93,7 @@ Nested schema for **rule_subgroups**: * `id` - (Required, String) Unique identifier of the rule. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (Required, String) Name of the rule. - * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `255` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `order` - (Required, Integer) The ordering of the rule subgroup. Lower order will run first. 0 is considered as no value. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. * `parameters` - (Required, List) Parameters for a rule which specifies how it should run. @@ -126,33 +126,33 @@ Nested schema for **rule_subgroups**: Nested schema for **json_parse_parameters**: * `delete_source` - (Optional, Boolean) Whether or not to delete the source field after running this rule. * `destination_field` - (Required, String) Destination field under which to put the json object. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `override_dest` - (Required, Boolean) Destination field in which to put the json stringified content. * `json_stringify_parameters` - (Optional, List) Parameters for json stringify rule. Nested schema for **json_stringify_parameters**: * `delete_source` - (Optional, Boolean) Whether or not to delete the source field after running this rule. * `destination_field` - (Required, String) Destination field in which to put the json stringified content. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `parse_parameters` - (Optional, List) Parameters for parse rule. Nested schema for **parse_parameters**: * `destination_field` - (Required, String) In which field to put the parsed text. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `rule` - (Required, String) Regex which will parse the source field and extract the json keys from it while removing the source field. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `remove_fields_parameters` - (Optional, List) Parameters for remove fields rule. Nested schema for **remove_fields_parameters**: * `fields` - (Required, List) Json field paths to drop from the log. - * Constraints: The list items must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. The maximum length is `4096` items. The minimum length is `1` item. + * Constraints: The list items must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. The maximum length is `4096` items. The minimum length is `1` item. * `replace_parameters` - (Optional, List) Parameters for replace rule. Nested schema for **replace_parameters**: * `destination_field` - (Required, String) In which field to put the modified text. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `replace_new_val` - (Required, String) The value to replace the matched text with. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `rule` - (Required, String) Regex which will match parts in the text to replace. * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^.*$/`. * `source_field` - (Required, String) A field on which value to execute the rule. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference diff --git a/website/docs/r/logs_view.html.markdown b/website/docs/r/logs_view.html.markdown index 763de51618..7549ce8f3c 100644 --- a/website/docs/r/logs_view.html.markdown +++ b/website/docs/r/logs_view.html.markdown @@ -75,16 +75,16 @@ Nested schema for **filters**: * Constraints: The maximum length is `4096` items. The minimum length is `1` item. Nested schema for **filters**: * `name` - (Required, String) Filter name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `selected_values` - (Required, Map) Filter selected values. * `folder_id` - (Optional, String) View folder ID. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/`. * `name` - (Required, String) View name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `search_query` - (Optional, List) View search query. Nested schema for **search_query**: * `query` - (Required, String) View search query. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `time_selection` - (Required, List) View time selection. Nested schema for **time_selection**: * `custom_selection` - (Optional, List) Custom time selection. @@ -94,7 +94,7 @@ Nested schema for **time_selection**: * `quick_selection` - (Optional, List) Quick time selection. Nested schema for **quick_selection**: * `caption` - (Required, String) Quick time selection caption. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. * `seconds` - (Required, Integer) Quick time selection amount of seconds. * Constraints: The maximum value is `4294967295`. The minimum value is `0`. diff --git a/website/docs/r/logs_view_folder.html.markdown b/website/docs/r/logs_view_folder.html.markdown index 7cf071b3da..eb1053b316 100644 --- a/website/docs/r/logs_view_folder.html.markdown +++ b/website/docs/r/logs_view_folder.html.markdown @@ -30,7 +30,7 @@ You can specify the following arguments for this resource. * `region` - (Optional, Forces new resource, String) Cloud Logs Instance Region. * `endpoint_type` - (Optional, String) Cloud Logs Instance Endpoint type. Allowed values `public` and `private`. * `name` - (Required, String) Folder name. - * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `/^[A-Za-z0-9_\\.,\\-"{}()\\[\\]=!:#\/$|' ]+$/`. + * Constraints: The maximum length is `4096` characters. The minimum length is `1` character. The value must match regular expression `^[\\p{L}\\p{N}\\p{P}\\p{Z}\\p{S}\\p{M}]+$`. ## Attribute Reference diff --git a/website/docs/r/onboarding_catalog_deployment.html.markdown b/website/docs/r/onboarding_catalog_deployment.html.markdown index 753fdbe7cd..25e4d80ad1 100644 --- a/website/docs/r/onboarding_catalog_deployment.html.markdown +++ b/website/docs/r/onboarding_catalog_deployment.html.markdown @@ -17,9 +17,9 @@ Create, update, and delete onboarding_catalog_deployments with this resource. ```hcl resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_instance" { active = true - catalog_plan_id = "catalog_plan_id" - catalog_product_id = "catalog_product_id" - disabled = true + catalog_plan_id = ibm_onboarding_catalog_plan.onboarding_catalog_plan_instance.onboarding_catalog_plan_id + catalog_product_id = ibm_onboarding_catalog_product.onboarding_catalog_product_instance.onboarding_catalog_product_id + disabled = false kind = "deployment" metadata { rc_compatible = true @@ -28,22 +28,28 @@ resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_inst en { bullets { description = "description" - description_i18n = { "key" = "inner" } + description_i18n = { "key" = "anything as a string" } title = "title" - title_i18n = { "key" = "inner" } + title_i18n = { "key" = "anything as a string" } } media { caption = "caption" - caption_i18n = { "key" = "inner" } + caption_i18n = { "key" = "anything as a string" } thumbnail = "thumbnail" type = "image" url = "url" } + embeddable_dashboard = "embeddable_dashboard" } } urls { doc_url = "doc_url" + apidocs_url = "apidocs_url" terms_url = "terms_url" + instructions_url = "instructions_url" + catalog_details_url = "catalog_details_url" + custom_create_page_url = "custom_create_page_url" + dashboard = "dashboard" } hidden = true side_by_side_index = 1.0 @@ -51,9 +57,21 @@ resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_inst service { rc_provisionable = true iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true + } + deployment { + broker { + name = "name" + guid = "guid" + } + location = "location" + location_url = "location_url" + target_crn = "target_crn" } } - name = "name" + name = "deployment-eu-de" object_provider { name = "name" email = "email" @@ -65,7 +83,7 @@ resource "ibm_onboarding_catalog_deployment" "onboarding_catalog_deployment_inst long_description = "long_description" } } - product_id = "product_id" + product_id = ibm_onboarding_product.onboarding_product_instance.id } ``` @@ -85,11 +103,28 @@ You can specify the following arguments for this resource. * Constraints: Allowable values are: `deployment`. * `metadata` - (Optional, List) Global catalog deployment metadata. Nested schema for **metadata**: + * `deployment` - (Optional, List) The global catalog metadata of the deployment. + Nested schema for **deployment**: + * `broker` - (Optional, List) The global catalog metadata of the deployment. + Nested schema for **broker**: + * `guid` - (Optional, String) Crn or guid of the resource broker. + * Constraints: The maximum length is `2000` characters. The minimum length is `2` characters. The value must match regular expression `/^[ -~\\s]*$/`. + * `name` - (Optional, String) The name of the resource broker. + * Constraints: The maximum length is `2000` characters. The minimum length is `2` characters. The value must match regular expression `/^[ -~\\s]*$/`. + * `location` - (Optional, String) The global catalog deployment location. + * Constraints: The maximum length is `2000` characters. The minimum length is `1` character. The value must match regular expression `/^[ -~\\s]*$/`. + * `location_url` - (Optional, String) The global catalog deployment URL of location. + * Constraints: The maximum length is `2083` characters. The minimum length is `1` character. The value must match regular expression `/^(?!mailto:)(?:(?:http|https|ftp):\/\/)(?:\\S+(?::\\S*)?@)?(?:(?:(?:[1-9]\\d?|1\\d\\d|2[01]\\d|22[0-3])(?:\\.(?:1?\\d{1,2}|2[0-4]\\d|25[0-5])){2}(?:\\.(?:[0-9]\\d?|1\\d\\d|2[0-4]\\d|25[0-4]))|(?:(?:[a-z\\u00a1-\\uffff0-9]+-?)*[a-z\\u00a1-\\uffff0-9]+)(?:\\.(?:[a-z\\u00a1-\\uffff0-9]+-?)*[a-z\\u00a1-\\uffff0-9]+)*(?:\\.(?:[a-z\\u00a1-\\uffff]{2,})))|localhost)(?::\\d{2,5})?(?:(\/|\\?|#)[^\\s]*)?$/`. + * `target_crn` - (Optional, String) Region crn. + * Constraints: The maximum length is `2000` characters. The minimum length is `1` character. The value must match regular expression `/^[ -~\\s]*$/`. * `rc_compatible` - (Optional, Boolean) Whether the object is compatible with the resource controller service. * `service` - (Optional, List) The global catalog metadata of the service. Nested schema for **service**: + * `bindable` - (Optional, Boolean) Deprecated. Controls the Connections tab on the Resource Details page. * `iam_compatible` - (Optional, Boolean) Whether the service is compatible with the IAM service. + * `plan_updateable` - (Optional, Boolean) Indicates plan update support and controls the Plan tab on the Resource Details page. * `rc_provisionable` - (Optional, Boolean) Whether the service is provisionable by the resource controller service. + * `service_key_supported` - (Optional, Boolean) Indicates service credentials support and controls the Service Credential tab on Resource Details page. * `ui` - (Optional, List) The UI metadata of this service. Nested schema for **ui**: * `hidden` - (Optional, Boolean) Whether the object is hidden from the consumption catalog. @@ -107,6 +142,8 @@ Nested schema for **metadata**: * `title` - (Optional, String) The descriptive title for the feature. * Constraints: The maximum length is `256` characters. The minimum length is `0` characters. The value must match regular expression `/^[ -~\\s]*$/`. * `title_i18n` - (Optional, Map) The descriptive title for the feature in translation. + * `embeddable_dashboard` - (Optional, String) On a service kind record this controls if your service has a custom dashboard or Resource Detail page. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `media` - (Optional, List) The list of supporting media for this product. * Constraints: The maximum length is `100` items. The minimum length is `0` items. Nested schema for **media**: @@ -119,10 +156,22 @@ Nested schema for **metadata**: * Constraints: Allowable values are: `image`, `youtube`, `video_mp_4`, `video_webm`. * `url` - (Required, String) The URL that links to the media that shows off the product. * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. - * `urls` - (Optional, List) The UI based URLs. + * `urls` - (Optional, List) Metadata with URLs related to a service. Nested schema for **urls**: - * `doc_url` - (Optional, String) The URL for your product documentation. + * `apidocs_url` - (Optional, String) The URL for your product's API documentation. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `catalog_details_url` - (Optional, String) Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `custom_create_page_url` - (Optional, String) Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `dashboard` - (Optional, String) Controls if your service has a custom dashboard or Resource Detail page. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `doc_url` - (Optional, String) The URL for your product's documentation. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `instructions_url` - (Optional, String) Controls the Getting Started tab on the Resource Details page. Setting it the content is loaded from the specified URL. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `terms_url` - (Optional, String) The URL for your product's end user license agreement. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `name` - (Required, String) The programmatic name of this deployment. * Constraints: The value must match regular expression `/^[a-z0-9\\-.]+$/`. * `object_provider` - (Required, List) The provider or owner of the product. @@ -165,5 +214,5 @@ The `id` property can be formed from `product_id`, `catalog_product_id`, `catalo # Syntax
-$ terraform import ibm_onboarding_catalog_deployment.onboarding_catalog_deployment //
+$ terraform import ibm_onboarding_catalog_deployment.onboarding_catalog_deployment product_id/catalog_product_id/catalog_plan_id/catalog_deployment_id
 
diff --git a/website/docs/r/onboarding_catalog_plan.html.markdown b/website/docs/r/onboarding_catalog_plan.html.markdown index a436683e59..0e59638eb1 100644 --- a/website/docs/r/onboarding_catalog_plan.html.markdown +++ b/website/docs/r/onboarding_catalog_plan.html.markdown @@ -17,8 +17,8 @@ Create, update, and delete onboarding_catalog_plans with this resource. ```hcl resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { active = true - catalog_product_id = "catalog_product_id" - disabled = true + catalog_product_id = ibm_onboarding_catalog_product.onboarding_catalog_product_instance.onboarding_catalog_product_id + disabled = false kind = "plan" metadata { rc_compatible = true @@ -27,32 +27,49 @@ resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { en { bullets { description = "description" - description_i18n = { "key" = "inner" } + description_i18n = { "key" = "anything as a string" } title = "title" - title_i18n = { "key" = "inner" } + title_i18n = { "key" = "anything as a string" } } media { caption = "caption" - caption_i18n = { "key" = "inner" } + caption_i18n = { "key" = "anything as a string" } thumbnail = "thumbnail" type = "image" url = "url" } + embeddable_dashboard = "embeddable_dashboard" } } urls { doc_url = "doc_url" + apidocs_url = "apidocs_url" terms_url = "terms_url" + instructions_url = "instructions_url" + catalog_details_url = "catalog_details_url" + custom_create_page_url = "custom_create_page_url" + dashboard = "dashboard" } hidden = true side_by_side_index = 1.0 } + service { + rc_provisionable = true + iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true + } pricing { type = "free" origin = "global_catalog" } + plan { + allow_internal_users = true + bindable = true + } } - name = "name" + name = "free-plan2" object_provider { name = "name" email = "email" @@ -64,7 +81,7 @@ resource "ibm_onboarding_catalog_plan" "onboarding_catalog_plan_instance" { long_description = "long_description" } } - product_id = "product_id" + product_id = ibm_onboarding_product.onboarding_product_instance.id } ``` @@ -82,13 +99,24 @@ You can specify the following arguments for this resource. * Constraints: Allowable values are: `plan`. * `metadata` - (Optional, List) Global catalog plan metadata. Nested schema for **metadata**: + * `plan` - (Optional, List) Metadata controlling Plan related settings. + Nested schema for **plan**: + * `allow_internal_users` - (Optional, Boolean) Controls if IBMers are allowed to provision this plan. + * `bindable` - (Optional, Boolean) Deprecated. Controls the Connections tab on the Resource Details page. * `pricing` - (Optional, List) The pricing metadata of this object. Nested schema for **pricing**: * `origin` - (Optional, String) The source of the pricing information: global_catalog or pricing_catalog. * Constraints: Allowable values are: `global_catalog`, `pricing_catalog`. * `type` - (Optional, String) The type of the pricing plan. - * Constraints: Allowable values are: `free`, `paid`, `Free`, `Paid`. + * Constraints: Allowable values are: `free`, `paid`, `Free`, `Paid`, `subscription`, `Subscription`. * `rc_compatible` - (Optional, Boolean) Whether the object is compatible with the resource controller service. + * `service` - (Optional, List) The global catalog metadata of the service. + Nested schema for **service**: + * `bindable` - (Optional, Boolean) Deprecated. Controls the Connections tab on the Resource Details page. + * `iam_compatible` - (Optional, Boolean) Whether the service is compatible with the IAM service. + * `plan_updateable` - (Optional, Boolean) Indicates plan update support and controls the Plan tab on the Resource Details page. + * `rc_provisionable` - (Optional, Boolean) Whether the service is provisionable by the resource controller service. + * `service_key_supported` - (Optional, Boolean) Indicates service credentials support and controls the Service Credential tab on Resource Details page. * `ui` - (Optional, List) The UI metadata of this service. Nested schema for **ui**: * `hidden` - (Optional, Boolean) Whether the object is hidden from the consumption catalog. @@ -106,6 +134,8 @@ Nested schema for **metadata**: * `title` - (Optional, String) The descriptive title for the feature. * Constraints: The maximum length is `256` characters. The minimum length is `0` characters. The value must match regular expression `/^[ -~\\s]*$/`. * `title_i18n` - (Optional, Map) The descriptive title for the feature in translation. + * `embeddable_dashboard` - (Optional, String) On a service kind record this controls if your service has a custom dashboard or Resource Detail page. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `media` - (Optional, List) The list of supporting media for this product. * Constraints: The maximum length is `100` items. The minimum length is `0` items. Nested schema for **media**: @@ -118,10 +148,22 @@ Nested schema for **metadata**: * Constraints: Allowable values are: `image`, `youtube`, `video_mp_4`, `video_webm`. * `url` - (Required, String) The URL that links to the media that shows off the product. * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. - * `urls` - (Optional, List) The UI based URLs. + * `urls` - (Optional, List) Metadata with URLs related to a service. Nested schema for **urls**: - * `doc_url` - (Optional, String) The URL for your product documentation. + * `apidocs_url` - (Optional, String) The URL for your product's API documentation. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `catalog_details_url` - (Optional, String) Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `custom_create_page_url` - (Optional, String) Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `dashboard` - (Optional, String) Controls if your service has a custom dashboard or Resource Detail page. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `doc_url` - (Optional, String) The URL for your product's documentation. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `instructions_url` - (Optional, String) Controls the Getting Started tab on the Resource Details page. Setting it the content is loaded from the specified URL. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `terms_url` - (Optional, String) The URL for your product's end user license agreement. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `name` - (Required, String) The programmatic name of this plan. * Constraints: The value must match regular expression `/^[a-z0-9\\-.]+$/`. * `object_provider` - (Required, List) The provider or owner of the product. @@ -163,5 +205,5 @@ The `id` property can be formed from `product_id`, `catalog_product_id`, and `ca # Syntax
-$ terraform import ibm_onboarding_catalog_plan.onboarding_catalog_plan //
+$ terraform import ibm_onboarding_catalog_plan.onboarding_catalog_plan product_id/catalog_product_id/catalog_plan_id;
 
diff --git a/website/docs/r/onboarding_catalog_product.html.markdown b/website/docs/r/onboarding_catalog_product.html.markdown index 7275371ab7..ca3867ea27 100644 --- a/website/docs/r/onboarding_catalog_product.html.markdown +++ b/website/docs/r/onboarding_catalog_product.html.markdown @@ -17,7 +17,7 @@ Create, update, and delete onboarding_catalog_products with this resource. ```hcl resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" { active = true - disabled = true + disabled = false images { image = "image" } @@ -40,11 +40,17 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" type = "image" url = "url" } + embeddable_dashboard = "embeddable_dashboard" } } urls { doc_url = "doc_url" + apidocs_url = "apidocs_url" terms_url = "terms_url" + instructions_url = "instructions_url" + catalog_details_url = "catalog_details_url" + custom_create_page_url = "custom_create_page_url" + dashboard = "dashboard" } hidden = true side_by_side_index = 1.0 @@ -52,6 +58,9 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" service { rc_provisionable = true iam_compatible = true + bindable = true + plan_updateable = true + service_key_supported = true } other { pc { @@ -93,9 +102,17 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" } } } + composite { + composite_kind = "service" + composite_tag = "composite_tag" + children { + kind = "service" + name = "name" + } + } } } - name = "name" + name = "1p-service-08-06" object_provider { name = "name" email = "email" @@ -107,7 +124,7 @@ resource "ibm_onboarding_catalog_product" "onboarding_catalog_product_instance" long_description = "long_description" } } - product_id = "product_id" + product_id = ibm_onboarding_product.onboarding_product_instance.id } ``` @@ -122,12 +139,26 @@ You can specify the following arguments for this resource. * `images` - (Optional, List) Images from the global catalog entry that help illustrate the service. Nested schema for **images**: * `image` - (Optional, String) The URL for your product logo. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `kind` - (Required, String) The kind of the global catalog object. - * Constraints: Allowable values are: `service`, `platform_service`. + * Constraints: Allowable values are: `service`, `platform_service`, `composite`. * `metadata` - (Optional, List) The global catalog service metadata object. Nested schema for **metadata**: * `other` - (Optional, List) The additional metadata of the service in global catalog. Nested schema for **other**: + * `composite` - (Optional, List) Optional metadata of the service defining it as a composite. + Nested schema for **composite**: + * `children` - (Optional, List) + * Constraints: The maximum length is `1000` items. The minimum length is `0` items. + Nested schema for **children**: + * `kind` - (Optional, String) The type of the composite child. + * Constraints: Allowable values are: `service`, `platform_service`. + * `name` - (Optional, String) The name of the composite child. + * Constraints: The maximum length is `100` characters. The minimum length is `2` characters. The value must match regular expression `/^[a-z0-9\\-.]+$/`. + * `composite_kind` - (Optional, String) The type of the composite service. + * Constraints: Allowable values are: `service`, `platform_service`. + * `composite_tag` - (Optional, String) The tag used for the composite parent and its children. + * Constraints: The maximum length is `100` characters. The minimum length is `2` characters. The value must match regular expression `/^[ -~\\s]*$/`. * `pc` - (Optional, List) The metadata of the service owned and managed by Partner Center - Sell. Nested schema for **pc**: * `support` - (Optional, List) The support metadata of the service. @@ -177,8 +208,11 @@ Nested schema for **metadata**: * `rc_compatible` - (Optional, Boolean) Whether the object is compatible with the resource controller service. * `service` - (Optional, List) The global catalog metadata of the service. Nested schema for **service**: + * `bindable` - (Optional, Boolean) Deprecated. Controls the Connections tab on the Resource Details page. * `iam_compatible` - (Optional, Boolean) Whether the service is compatible with the IAM service. + * `plan_updateable` - (Optional, Boolean) Indicates plan update support and controls the Plan tab on the Resource Details page. * `rc_provisionable` - (Optional, Boolean) Whether the service is provisionable by the resource controller service. + * `service_key_supported` - (Optional, Boolean) Indicates service credentials support and controls the Service Credential tab on Resource Details page. * `ui` - (Optional, List) The UI metadata of this service. Nested schema for **ui**: * `hidden` - (Optional, Boolean) Whether the object is hidden from the consumption catalog. @@ -196,6 +230,8 @@ Nested schema for **metadata**: * `title` - (Optional, String) The descriptive title for the feature. * Constraints: The maximum length is `256` characters. The minimum length is `0` characters. The value must match regular expression `/^[ -~\\s]*$/`. * `title_i18n` - (Optional, Map) The descriptive title for the feature in translation. + * `embeddable_dashboard` - (Optional, String) On a service kind record this controls if your service has a custom dashboard or Resource Detail page. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `media` - (Optional, List) The list of supporting media for this product. * Constraints: The maximum length is `100` items. The minimum length is `0` items. Nested schema for **media**: @@ -208,10 +244,22 @@ Nested schema for **metadata**: * Constraints: Allowable values are: `image`, `youtube`, `video_mp_4`, `video_webm`. * `url` - (Required, String) The URL that links to the media that shows off the product. * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. - * `urls` - (Optional, List) The UI based URLs. + * `urls` - (Optional, List) Metadata with URLs related to a service. Nested schema for **urls**: - * `doc_url` - (Optional, String) The URL for your product documentation. + * `apidocs_url` - (Optional, String) The URL for your product's API documentation. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `catalog_details_url` - (Optional, String) Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `custom_create_page_url` - (Optional, String) Controls the Provisioning page URL, if set the assumption is that this URL is the provisioning URL for your service. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `dashboard` - (Optional, String) Controls if your service has a custom dashboard or Resource Detail page. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `doc_url` - (Optional, String) The URL for your product's documentation. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. + * `instructions_url` - (Optional, String) Controls the Getting Started tab on the Resource Details page. Setting it the content is loaded from the specified URL. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `terms_url` - (Optional, String) The URL for your product's end user license agreement. + * Constraints: The maximum length is `2083` characters. The minimum length is `0` characters. * `name` - (Required, String) The programmatic name of this product. * Constraints: The value must match regular expression `/^[a-z0-9\\-.]+$/`. * `object_provider` - (Required, List) The provider or owner of the product. @@ -252,5 +300,5 @@ The `id` property can be formed from `product_id`, and `catalog_product_id` in t # Syntax
-$ terraform import ibm_onboarding_catalog_product.onboarding_catalog_product /;
+$ terraform import ibm_onboarding_catalog_product.onboarding_catalog_product <product_id>/<catalog_product_id>
 
diff --git a/website/docs/r/onboarding_iam_registration.html.markdown b/website/docs/r/onboarding_iam_registration.html.markdown index 5db8927f8e..c03add751b 100644 --- a/website/docs/r/onboarding_iam_registration.html.markdown +++ b/website/docs/r/onboarding_iam_registration.html.markdown @@ -62,7 +62,8 @@ resource "ibm_onboarding_iam_registration" "onboarding_iam_registration_instance zh_tw = "zh_tw" zh_cn = "zh_cn" } - product_id = "product_id" + name = "name" + product_id = ibm_onboarding_product.onboarding_product_instance.id resource_hierarchy_attribute { key = "key" value = "value" @@ -198,7 +199,7 @@ resource "ibm_onboarding_iam_registration" "onboarding_iam_registration_instance zh_cn = "zh_cn" } options { - access_policy = { "key" = "inner" } + access_policy = true policy_type = [ "access" ] account_type = "enterprise" } @@ -297,7 +298,7 @@ Nested schema for **display_name**: * `enabled` - (Optional, Boolean) Whether the service is enabled or disabled for IAM. * `env` - (Optional, String) The environment to fetch this object from. * Constraints: The maximum length is `64` characters. The minimum length is `1` character. The value must match regular expression `/^[a-z]+$/`. -* `name` - (Optional, String) The IAM registration name, which must be the programmatic name of the product. +* `name` - (Required, String) The IAM registration name, which must be the programmatic name of the product. * Constraints: The value must match regular expression `/^[a-z0-9\\-.]+$/`. * `parent_ids` - (Optional, List) The list of parent IDs for product access management. * Constraints: The list items must match regular expression `/^[a-z0-9\\-.]+$/`. The maximum length is `100` items. The minimum length is `0` items. @@ -522,7 +523,7 @@ Nested schema for **supported_roles**: * Constraints: The maximum length is `256` characters. The minimum length is `0` characters. The value must match regular expression `/^[ -~\\s]*$/`. * `options` - (Optional, List) The supported role options. Nested schema for **options**: - * `access_policy` - (Optional, Map) Optional opt-in to require access control on the role. + * `access_policy` - (Required, Boolean) Optional opt-in to require access control on the role. * `account_type` - (Optional, String) Optional opt-in to require checking account type when applying the role. * Constraints: Allowable values are: `enterprise`. * `policy_type` - (Optional, List) Optional opt-in to require checking policy type when applying the role. @@ -548,5 +549,5 @@ The `name` property can be formed from `product_id`, and `name` in the following # Syntax
-$ terraform import ibm_onboarding_iam_registration.onboarding_iam_registration /;
+$ terraform import ibm_onboarding_iam_registration.onboarding_iam_registration product_id/name;
 
diff --git a/website/docs/r/onboarding_product.html.markdown b/website/docs/r/onboarding_product.html.markdown index 2712779297..59edcaacca 100644 --- a/website/docs/r/onboarding_product.html.markdown +++ b/website/docs/r/onboarding_product.html.markdown @@ -27,7 +27,7 @@ resource "ibm_onboarding_product" "onboarding_product_instance" { role = "role" } } - type = "software" + type = "service" } ``` @@ -64,6 +64,8 @@ After your resource is created, you can read values from the listed arguments an * `account_id` - (String) The IBM Cloud account ID of the provider. * `approver_resource_id` - (String) The ID of the approval workflow of your product. * `global_catalog_offering_id` - (String) The ID of a global catalog object. +* `iam_registration_id` - (String) IAM registration identifier. + * Constraints: The maximum length is `512` characters. The minimum length is `2` characters. The value must match regular expression `/^[a-z0-9\\-.]+$/`. * `private_catalog_id` - (String) The ID of the private catalog that contains the product. Only applicable for software type products. * Constraints: The value must match regular expression `/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/`. * `private_catalog_offering_id` - (String) The ID of the linked private catalog product. Only applicable for software type products. @@ -77,5 +79,5 @@ You can import the `ibm_onboarding_product` resource by using `id`. The ID of a # Syntax
-$ terraform import ibm_onboarding_product.onboarding_product ;
+$ terraform import ibm_onboarding_product.onboarding_product id;
 
diff --git a/website/docs/r/onboarding_registration.html.markdown b/website/docs/r/onboarding_registration.html.markdown index 100a78f658..bfb633cb4d 100644 --- a/website/docs/r/onboarding_registration.html.markdown +++ b/website/docs/r/onboarding_registration.html.markdown @@ -16,8 +16,8 @@ Create, update, and delete onboarding_registrations with this resource. ```hcl resource "ibm_onboarding_registration" "onboarding_registration_instance" { - account_id = "account_id" - company_name = "company_name" + account_id = "4a5c3c51b97a446fbb1d0e1ef089823b" + company_name = "Beautiful Company" primary_contact { name = "name" email = "email" @@ -49,8 +49,6 @@ Nested schema for **primary_contact**: After your resource is created, you can read values from the listed arguments and the following attributes. * `id` - The unique identifier of the onboarding_registration. -* `account_dpa_id` - (String) The ID of the IBM Digital Provider Agreement. -* `account_dra_id` - (String) The ID of the IBM Digital Platform Reseller Agreement. * `created_at` - (String) The time when the registration was created. * `updated_at` - (String) The time when the registration was updated. @@ -61,5 +59,5 @@ You can import the `ibm_onboarding_registration` resource by using `id`. The ID # Syntax
-$ terraform import ibm_onboarding_registration.onboarding_registration ;
+$ terraform import ibm_onboarding_registration.onboarding_registration id;
 
diff --git a/website/docs/r/onboarding_resource_broker.html.markdown b/website/docs/r/onboarding_resource_broker.html.markdown index a13d8e24cb..dddfd283b6 100644 --- a/website/docs/r/onboarding_resource_broker.html.markdown +++ b/website/docs/r/onboarding_resource_broker.html.markdown @@ -16,11 +16,15 @@ Create, update, and delete onboarding_resource_brokers with this resource. ```hcl resource "ibm_onboarding_resource_broker" "onboarding_resource_broker_instance" { - auth_password = "auth_password" - auth_scheme = "auth_scheme" - auth_username = "auth_username" - broker_url = "broker_url" - name = "name" + allow_context_updates = false + auth_scheme = "bearer" + auth_username = "apikey" + broker_url = "https://broker-url-for-my-service.com" + catalog_type = "service" + name = "brokername" + region = "global" + resource_group_crn = "crn:v1:bluemix:public:resource-controller::a/4a5c3c51b97a446fbb1d0e1ef089823b::resource-group:4fae20bd538a4a738475350dfdc1596f" + state = "active" type = "provision_through" } ``` @@ -30,9 +34,11 @@ resource "ibm_onboarding_resource_broker" "onboarding_resource_broker_instance" You can specify the following arguments for this resource. * `allow_context_updates` - (Optional, Boolean) Whether the resource controller will call the broker for any context changes to the instance. Currently, the only context related change is an instance name update. -* `auth_password` - (Required, String) The authentication password to reach the broker. +* `auth_password` - (Optional, String) The authentication password to reach the broker. * `auth_scheme` - (Required, String) The supported authentication scheme for the broker. -* `auth_username` - (Required, String) The authentication username to reach the broker. + * Constraints: Allowable values are: `bearer`, `bearer-crn`. +* `auth_username` - (Optional, String) The authentication username to reach the broker. + * Constraints: Allowable values are: `apikey`. * `broker_url` - (Required, String) The URL associated with the broker application. * `catalog_type` - (Optional, String) To enable the provisioning of your broker, set this parameter value to `service`. * `env` - (Optional, String) The environment to fetch this object from. @@ -78,5 +84,5 @@ You can import the `ibm_onboarding_resource_broker` resource by using `id`. The # Syntax
-$ terraform import ibm_onboarding_resource_broker.onboarding_resource_broker ;
+$ terraform import ibm_onboarding_resource_broker.onboarding_resource_broker id;
 
diff --git a/website/docs/r/pi_capture.html.markdown b/website/docs/r/pi_capture.html.markdown index 6bc24cc06d..1ed656619f 100644 --- a/website/docs/r/pi_capture.html.markdown +++ b/website/docs/r/pi_capture.html.markdown @@ -71,11 +71,12 @@ Review the argument references that you can specify for your resource. - `pi_capture_cloud_storage_access_key`- (Optional,String) Cloud Storage Access key - `pi_capture_cloud_storage_secret_key`- (Optional,String) Cloud Storage Secret key - `pi_capture_storage_image_path` - (Optional,String) Cloud Storage Image Path (bucket-name [/folder/../..]) - +- `pi_user_tags` - (Optional, List) List of user tags attached to the resource. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `crn` - (String) The CRN of the resource. - `id` - (String) The image id of the capture instance. The ID is composed of `//`. - `image_id` - (String) The image id of the capture instance. diff --git a/website/docs/r/pi_image.html.markdown b/website/docs/r/pi_image.html.markdown index 77ea5a66b8..c65d404edd 100644 --- a/website/docs/r/pi_image.html.markdown +++ b/website/docs/r/pi_image.html.markdown @@ -95,11 +95,13 @@ Review the argument references that you can specify for your resource. - `license_type` - (Required, String) Origin of the license of the product. Allowable value is: `byol`. - `product` - (Required, String) Product within the image.Allowable values are: `Hana`, `Netweaver`. - `vendor` - (Required, String) Vendor supporting the product. Allowable value is: `SAP`. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `crn` - (String) The CRN of this resource. - `id` - (String) The unique identifier of an image. The ID is composed of `/`. - `image_id` - (String) The unique identifier of an image. diff --git a/website/docs/r/pi_instance.html.markdown b/website/docs/r/pi_instance.html.markdown index efc06f6aab..1761b45f43 100644 --- a/website/docs/r/pi_instance.html.markdown +++ b/website/docs/r/pi_instance.html.markdown @@ -67,6 +67,7 @@ Review the argument references that you can specify for your resource. - `pi_affinity_volume`- (Optional, String) Volume (ID or Name) to base storage affinity policy against; required if requesting `affinity` and `pi_affinity_instance` is not provided. - `pi_anti_affinity_instances` - (Optional, String) List of pvmInstances to base storage anti-affinity policy against; required if requesting `anti-affinity` and `pi_anti_affinity_volumes` is not provided. - `pi_anti_affinity_volumes`- (Optional, String) List of volumes to base storage anti-affinity policy against; required if requesting `anti-affinity` and `pi_anti_affinity_instances` is not provided. +- `pi_boot_volume_replication_enabled` - (Optional, Boolean) Indicates if the boot volume should be replication enabled or not. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_deployment_target` - (Optional, List) The deployment of a dedicated host. Max items: 1. @@ -106,6 +107,7 @@ Review the argument references that you can specify for your resource. - `pi_replicants` - (Optional, Integer) The number of instances that you want to provision with the same configuration. If this parameter is not set, `1` is used by default. - `pi_replication_policy` - (Optional, String) The replication policy that you want to use, either `affinity`, `anti-affinity` or `none`. If this parameter is not set, `none` is used by default. - `pi_replication_scheme` - (Optional, String) The replication scheme that you want to set, either `prefix` or `suffix`. +- `pi_replication_sites` - (Optional, List) Indicates the replication sites of the boot volume. - `pi_sap_profile_id` - (Optional, String) SAP Profile ID for the amount of cores and memory. - Required only when creating SAP instances. - `pi_sap_deployment_type` - (Optional, String) Custom SAP deployment type information (For Internal Use Only). @@ -113,10 +115,11 @@ Review the argument references that you can specify for your resource. - `pi_storage_pool` - (Optional, String) Storage Pool for server deployment; if provided then `pi_affinity_policy` will be ignored; Only valid when you deploy one of the IBM supplied stock images. Storage pool for a custom image (an imported image or an image that is created from a VM capture) defaults to the storage pool the image was created in. - `pi_storage_pool_affinity` - (Optional, Boolean) Indicates if all volumes attached to the server must reside in the same storage pool. The default value is `true`. To attach data volumes from a different storage pool (mixed storage) set to `false` and use `pi_volume_attach` resource. Once set to `false`, cannot be set back to `true` unless all volumes attached reside in the same storage type and pool. - `pi_storage_type` - (Optional, String) - Storage type for server deployment; If storage type is not provided the storage type will default to `tier3`. To get a list of available storage types, please use the [ibm_pi_storage_types_capacity](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/pi_storage_types_capacity) data source. -- `pi_storage_connection` - (Optional, String) - Storage Connectivity Group (SCG) for server deployment. Only supported value is `vSCSI`. +- `pi_storage_connection` - (Optional, String) - Storage Connectivity Group (SCG) for server deployment. Supported values are `vSCSI`, `maxVolumeSupport`. - `pi_sys_type` - (Optional, String) The type of system on which to create the VM (e880/e980/e1080/s922/s1022). - Supported SAP system types are (e880/e980/e1080). - `pi_user_data` - (Optional, String) The user data `cloud-init` to pass to the instance during creation. It can be a base64 encoded or an unencoded string. If it is an unencoded string, the provider will encode it before it passing it down. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. - `pi_virtual_cores_assigned` - (Optional, Integer) Specify the number of virtual cores to be assigned. - `pi_virtual_optical_device` - (Optional, String) Virtual Machine's Cloud Initialization Virtual Optical Device. - `pi_volume_ids` - (Optional, List of String) The list of volume IDs that you want to attach to the instance during creation. @@ -125,6 +128,7 @@ Review the argument references that you can specify for your resource. In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `crn` - (String) The CRN of this resource. - `fault` - (Map) Fault information, if any. Nested scheme for `fault`: diff --git a/website/docs/r/pi_network.html.markdown b/website/docs/r/pi_network.html.markdown index b74018f7c4..e8ece20e93 100644 --- a/website/docs/r/pi_network.html.markdown +++ b/website/docs/r/pi_network.html.markdown @@ -72,11 +72,13 @@ Review the argument references that you can specify for your resource. - `pi_network_jumbo` - (Deprecated, Optional, Bool) MTU Jumbo option of the network (for multi-zone locations only). - `pi_network_mtu` - (Optional, Integer) Maximum Transmission Unit option of the network, min size = 1450 & max size = 9000. - `pi_network_access_config` - (Optional, String) The network communication configuration option of the network (for satellite locations only). +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `crn` - (String) The CRN of this resource. - `id` - (String) The unique identifier of the network. The ID is composed of `/`. - `network_id` - (String) The unique identifier of the network. - `vlan_id` - (Integer) The ID of the VLAN that your network is attached to. diff --git a/website/docs/r/pi_network_address_group.html.markdown b/website/docs/r/pi_network_address_group.html.markdown new file mode 100644 index 0000000000..e2859aa1b5 --- /dev/null +++ b/website/docs/r/pi_network_address_group.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_address_group" +description: |- + Manages network address group. +subcategory: "Power Systems" +--- + +# ibm_pi_network_address_group + +Create, update, and delete a network address group. + +## Example Usage + +The following example creates a network address group. + +```terraform + resource "ibm_pi_network_address_group" "network_address_group_instance" { + pi_cloud_instance_id = "" + pi_name = "name" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Timeouts + +ibm_pi_network_address_group provides the following [timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **delete** - (Default 5 minutes) Used for deleting network address group. + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_name` - (Required, String) The name of the Network Address Group. +- `pi_user_tags` - (Optional, List) List of user tags attached to the resource. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `crn` - (String) The network address group's crn. +- `id` - (String) The unique identifier of this resource. The ID is composed of `/`. +- `network_address_group_id` - (String) The unique identifier of the network address group. +- `members` - (List) The list of IP addresses in CIDR notation in the network address group. + + Nested schema for `members`: + - `cidr` - (String) The IP addresses in CIDR notation. + - `id` - (String) The id of the network address group member IP addresses. + +## Import + +The `ibm_pi_network_address_group` resource can be imported by using `cloud_instance_id` and `network_address_group_id`. + +## Example + +```bash +terraform import ibm_pi_network_address_group.example d7bec597-4726-451f-8a63-e62e6f19c32c/041b186b-9598-4cb9-bf70-966d7b9d1dc8 +``` diff --git a/website/docs/r/pi_network_address_group_member.html.markdown b/website/docs/r/pi_network_address_group_member.html.markdown new file mode 100644 index 0000000000..15204bf033 --- /dev/null +++ b/website/docs/r/pi_network_address_group_member.html.markdown @@ -0,0 +1,78 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_address_group_member" +description: |- + Manages pi_network_address_group_member. +subcategory: "Power Systems" +--- + +# ibm_pi_network_address_group_member + +Add or remove a network address group member. + +## Example Usage + +```terraform + resource "ibm_pi_network_address_group_member" "network_address_group_member" { + pi_cloud_instance_id = "" + pi_cidr = "cidr" + pi_network_address_group_id = "network_address_group_id" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Timeouts + +ibm_pi_network_address_group_member provides the following [timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **create** - (Default 5 minutes) Used for creating network address group member. +- **delete** - (Default 5 minutes) Used for deleting network address group member. + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_cidr` - (Optional, String) The member to add in CIDR format, for example 192.168.1.5/32. Required if `pi_network_address_group_member_id` not provided. +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_network_address_group_id` - (Required, String) network address group id. +- `pi_network_address_group_member_id` - (Optional, String) The network address group member id to remove. Required if `pi_cidr` not provided. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `crn` - (String) The network address group's crn. +- `id` -(String) The unique identifier of the network address group resource. Composed of `//network_address_group_member_id`. +- `network_address_group_id` - (String) The unique identifier of the network address group. +- `members` - (List) The list of IP addresses in CIDR notation in the network address group. + + Nested schema for `members`: + - `cidr` - (String) The IP addresses in CIDR notation + - `id` - (String) The id of the network address group member IP addresses. +- `name` - (String) The name of the network address group. +- `user_tags` - (List) List of user tags attached to the resource. + +## Import + +The `ibm_pi_network_address_group_member` resource can be imported by using `cloud_instance_id` and `network_address_group_id`, and `network_address_group_member_id`. + +## Example + +```bash +terraform import ibm_pi_network_address_group_member.example d7bec597-4726-451f-8a63-e62e6f19c32c/041b186b-9598-4cb9-bf70-966d7b9d1dc8 +``` diff --git a/website/docs/r/pi_network_interface.html.markdown b/website/docs/r/pi_network_interface.html.markdown new file mode 100644 index 0000000000..39a0921d14 --- /dev/null +++ b/website/docs/r/pi_network_interface.html.markdown @@ -0,0 +1,84 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_interface" +description: |- + Manages pi_network_interface. +subcategory: "Power Systems" +--- + +# ibm_pi_network_interface + +Create, update, and delete a network interface. + +## Example Usage + +```terraform + resource "ibm_pi_network_interface" "network_interface" { + pi_cloud_instance_id = "" + pi_network_id = "network_id" + pi_name = "network-interface-name" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Timeouts + +The `ibm_pi_network` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **create** - (Default 60 minutes) Used for creating a network interface. +- **update** - (Default 60 minutes) Used for updating a network interface. +- **delete** - (Default 60 minutes) Used for deleting a network interface. + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_instance_id` - (Optional, String) If supplied populated it attaches to the InstanceID, if empty detaches from InstanceID. +- `pi_ip_address` - (Optional, String) The requested IP address of this network interface. +- `pi_name` - (Optional, String) Name of the network interface. +- `pi_network_id` - (Required, String) network id. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `crn` - (String) The network interface's crn. +- `id` - (String) The unique identifier of the network interface resource. The ID is composed of `//`. +- `instance` - (List) The attached instance to this Network Interface. + + Nested scheme for `instance`: + - `href` - (String) Link to instance resource. + - `instance_id` - (String) The attached instance id. +- `ip_address` - (String) The ip address of this network interface. +- `mac_address` - (String) The mac address of the network interface. +- `name` - (String) Name of the network interface (not unique or indexable). +- `network_interface_id` - (String) The unique identifier of the network interface. +- `network_security_group_id` - (String) ID of the Network Security Group the network interface will be added to. +- `status` - (String) The status of the network address group. + +## Import + +The `ibm_pi_network_interface` resource can be imported by using `cloud_instance_id`, `network_id` and `network_interface_id`. + +## Example + +```bash +terraform import ibm_pi_network_interface.example d7bec597-4726-451f-8a63-e62e6f19c32c/cea6651a-bc0a-4438-9f8a-a0770bbf3ebb/041b186b-9598-4cb9-bf70-966d7b9d1dc8 +``` diff --git a/website/docs/r/pi_network_port_attach.html.markdown b/website/docs/r/pi_network_port_attach.html.markdown index b12d5b2a7f..e55c6badc4 100644 --- a/website/docs/r/pi_network_port_attach.html.markdown +++ b/website/docs/r/pi_network_port_attach.html.markdown @@ -54,6 +54,7 @@ Review the argument references that you can specify for your resource. - `pi_network_name` - (Required, String) The network ID or name. - `pi_network_port_description` - (Optional, String) The description for the Network Port. - `pi_network_port_ipaddress` - (Optional, String) The requested ip address of this port. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. diff --git a/website/docs/r/pi_network_security_group.html.markdown b/website/docs/r/pi_network_security_group.html.markdown new file mode 100644 index 0000000000..c36afbec78 --- /dev/null +++ b/website/docs/r/pi_network_security_group.html.markdown @@ -0,0 +1,103 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_security_group" +description: |- + Manages pi_network_security_group. +subcategory: "Power Systems" +--- + +# ibm_pi_network_security_group + +Create, update, and delete a network security group. + +## Example Usage + +```terraform + resource "ibm_pi_network_security_group" "network_security_group" { + pi_cloud_instance_id = "" + pi_name = "name" + pi_user_tags = ["tag1", "tag2"] + } +``` + +## Timeouts + +The `ibm_pi_network_security_group` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **delete** - (Default 10 minutes) Used for deleting a network security group. + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_name` - (Required, String) The name of the Network Security Group. +- `pi_user_tags` - (Optional, List) A list of tags. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `crn` - (String) The network security group's crn. +- `id` - (String) The unique identifier of the network security group resource. Composed of `/` +- `members` - (List) The list of IPv4 addresses and\or network interfaces in the network security group. + + Nested schema for `members`: + - `id` - (String) The id of the member in a network security group. + - `mac_address` - (String) The mac address of a network interface included if the type is `network-interface`. + - `target` - (String) If `ipv4-address` type, then IPv4 address or if `network-interface` type, then network interface id. + - `type` - (String) The type of member. Supported values are: `ipv4-address`, `network-interface`. + +- `network_security_group_id` - (String) The unique identifier of the network security group. +- `rules` - (List) The list of rules in the network security group. + + Nested schema for `rules`: + - `action` - (String) The action to take if the rule matches network traffic. Supported values are: `allow`, `deny`. + - `destination_port` - (List) The list of destination port. + + Nested schema for `destination_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + - `id` - (String) The id of the rule in a network security group. + - `protocol` - (List) The list of protocol. + + Nested schema for `protocol`: + - `icmp_type` - (String) If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched. + - `tcp_flags` - (String) If tcp type, the list of TCP flags and if not present then all flags are matched. Supported values are: `syn`, `ack`, `fin`, `rst`, `urg`, `psh`, `wnd`, `chk`, `seq`. + - `type` - (String) The protocol of the network traffic. Supported values are: `icmp`, `tcp`, `udp`, `all`. + - `remote` - (List) List of remote. + + Nested schema for `remote`: + - `id` - (String) The id of the remote network Address group or network security group the rules apply to. Not required for default-network-address-group. + - `type` - (String) The type of remote group the rules apply to. Supported values are: `network-security-group`, `network-address-group`, `default-network-address-group`. + - `source_port` - (List) List of source port + + Nested schema for `source_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + +## Import + +The `ibm_pi_network_security_group` resource can be imported by using `cloud_instance_id` and `network_security_group_id`. + +## Example + +```bash +terraform import ibm_pi_network_security_group.example d7bec597-4726-451f-8a63-e62e6f19c32c/cea6651a-bc0a-4438-9f8a-a0770bbf3ebb +``` diff --git a/website/docs/r/pi_network_security_group_action.html.markdown b/website/docs/r/pi_network_security_group_action.html.markdown new file mode 100644 index 0000000000..39f2a36dc3 --- /dev/null +++ b/website/docs/r/pi_network_security_group_action.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_security_group_action" +description: |- + Manages pi_network_security_group_action. +subcategory: "Power Systems" +--- + +# ibm_pi_network_security_group_action + +Enable or disable a network security group in your workspace. + +## Example Usage + +```terraform + resource "ibm_pi_network_security_group_action" "network_security_group_action" { + pi_cloud_instance_id = "" + pi_action = "enable" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Timeouts + +The `ibm_pi_network_security_group_action` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **create** - (Default 5 minutes) Used for enabling a network security group. +- **update** - (Default 5 minutes) Used for disabling a network security group. + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_action` - (Required, String) Name of the action to take; can be enable to enable NSGs in a workspace or disable to disable NSGs in a workspace. Supported values are: `enable`, `disable`. +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `state` - (String) The workspace network security group's state. diff --git a/website/docs/r/pi_network_security_group_member.html.markdown b/website/docs/r/pi_network_security_group_member.html.markdown new file mode 100644 index 0000000000..f0d4d6e540 --- /dev/null +++ b/website/docs/r/pi_network_security_group_member.html.markdown @@ -0,0 +1,108 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_security_group_member" +description: |- + Manages pi_network_security_group_member. +subcategory: "Power Systems" +--- + +# ibm_pi_network_security_group_member + +Add or remove a network security group member. + +## Example Usage + +```terraform + resource "ibm_pi_network_security_group_member" "network_security_group_member" { + pi_cloud_instance_id = "" + pi_network_security_group_id = "network_security_group_id" + pi_target = "target" + pi_type = "ipv4-address" + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Timeouts + +The `ibm_pi_network_security_group_member` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **delete** - (Default 5 minutes) Used for deleting a network security group member. + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_network_security_group_id` - (Required, String) Network security group ID. +- `pi_network_security_group_member_id` - (Optional, String) Network security group member ID. This conflicts with `pi_target` and `pi_type`. +- `pi_target` - (Optional, String) The target member to add. Required with `pi_type`. This conflicts with `pi_network_security_group_member_id`. +- `pi_type` - (Optional, String) The type of member. Supported values are: `ipv4-address`, `network-interface`. Required with `pi_target`. This conflicts with `pi_network_security_group_member_id`. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `crn` - (String) The network security group's crn. +- `id` - (String) The unique identifier of the network security group resource. Composed of `//` +- `members` - (List) The list of IPv4 addresses and\or network interfaces in the network security group. + + Nested schema for `members`: + - `id` - (String) The id of the member in a network security group. + - `mac_address` - (String) The mac address of a network interface included if the type is `network-interface`. + - `target` - (String) If `ipv4-address` type, then IPv4 address or if `network-interface` type, then network interface id. + - `type` - (String) The type of member. Supported values are: `ipv4-address`, `network-interface`. + +- `name` - (String) The name of the network security group. +- `network_security_group_member_id` - (String) The unique identifier of the network security group resource. +- `rules` - (List) The list of rules in the network security group. + + Nested schema for `rules`: + - `action` - (String) The action to take if the rule matches network traffic. Supported values are: `allow`, `deny`. + - `destination_port` - (List) The list of destination port. + + Nested schema for `destination_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + - `id` - (String) The id of the rule in a network security group. + - `protocol` - (List) The list of protocol. + + Nested schema for `protocol`: + - `icmp_type` - (String) If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched. + - `tcp_flags` - (String) If tcp type, the list of TCP flags and if not present then all flags are matched. Supported values are: `syn`, `ack`, `fin`, `rst`, `urg`, `psh`, `wnd`, `chk`, `seq`. + - `type` - (String) The protocol of the network traffic. Supported values are: `icmp`, `tcp`, `udp`, `all`. + - `remote` - (List) List of remote. + + Nested schema for `remote`: + - `id` - (String) The id of the remote network Address group or network security group the rules apply to. Not required for default-network-address-group. + - `type` - (String) The type of remote group the rules apply to. Supported values are: `network-security-group`, `network-address-group`, `default-network-address-group`. + - `source_port` - (List) List of source port + + Nested schema for `source_port`: + - `maximum` - (Integer) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Integer) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum +- `user_tags` - (List) List of user tags attached to the resource. + +## Import + +The `ibm_pi_network_security_group_member` resource can be imported by using `cloud_instance_id`, `network_security_group_id` and `network_security_group_member_id`. + +## Example + +```bash +terraform import ibm_pi_network_security_group_member.example d7bec597-4726-451f-8a63-e62e6f19c32c/cea6651a-bc0a-4438-9f8a-a0770bbf3ebb +``` diff --git a/website/docs/r/pi_network_security_group_rule.html.markdown b/website/docs/r/pi_network_security_group_rule.html.markdown new file mode 100644 index 0000000000..4dc7aba8ed --- /dev/null +++ b/website/docs/r/pi_network_security_group_rule.html.markdown @@ -0,0 +1,154 @@ +--- +layout: "ibm" +page_title: "IBM : ibm_pi_network_security_group_rule" +description: |- + Manages pi_network_security_group_rule. +subcategory: "Power Systems" +--- + +# ibm_pi_network_security_group_rule + +Add or remove a network security group rule. + +## Example Usage + +```terraform + resource "ibm_pi_network_security_group_rule" "network_security_group_rule" { + pi_cloud_instance_id = "" + pi_network_security_group_id = "" + pi_action = "allow" + pi_destination_ports { + minimum = 1200 + maximum = 37466 + } + pi_source_ports { + minimum = 1000 + maximum = 19500 + } + pi_protocol { + tcp_flags { + flag = "ack" + } + tcp_flags { + flag = "syn" + } + tcp_flags { + flag = "psh" + } + type = "tcp" + } + pi_remote { + id = "" + type = "network-security-group" + } + } +``` + +### Notes + +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Timeouts + +The `ibm_pi_network_security_group` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options: + +- **create** - (Default 10 minutes) Used for creating a network security group rule. +- **delete** - (Default 10 minutes) Used for deleting a network security group rule. + +## Argument Reference + +Review the argument references that you can specify for your resource. + +- `pi_action` - (Optional, String) The action to take if the rule matches network traffic. Supported values are: `allow`, `deny`. Required if `pi_network_security_group_rule_id` is not provided. +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_destination_port` - (Optional, List) The list of destination port. + + Nested schema for `pi_destination_port`: + - `maximum` - (Optional, Int) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Optional, Int) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. +- `pi_network_security_group_id` - (Required, String) The unique identifier of the network security group. +- `pi_network_security_group_rule_id` - (Optional, String) The network security group rule id to remove. Required if none of the other optional fields are provided. +- `pi_protocol` - (Optional, List) The list of protocol. Required if `pi_network_security_group_rule_id` is not provided. + + Nested schema for `pi_protocol`: + - `icmp_type` - (Optional, String) If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched. Supported values are: `all`, `destination-unreach`, `echo`, `echo-reply`, `source-quench`, `time-exceeded`. + - `tcp_flags` - (Optional, String) If tcp type, the list of TCP flags and if not present then all flags are matched. Supported values are: `syn`, `ack`, `fin`, `rst`, `urg`, `psh`, `wnd`, `chk`, `seq`. + - `type` - (Required, String) The protocol of the network traffic. Supported values are: `icmp`, `tcp`, `udp`, `all`. +- `pi_remote` - (Optional, List) List of remote. Required if `pi_network_security_group_rule_id` is not provided. + + Nested schema for `pi_remote`: + - `id` - (Optional, String) The id of the remote network address group or network security group the rules apply to. Not required for default-network-address-group. + - `type` - (Optional, String) The type of remote group the rules apply to. Supported values are: `network-security-group`, `network-address-group`, `default-network-address-group`. +- `pi_source_port` - (Optional, List) List of source port + + Nested schema for `pi_source_port`: + - `maximum` - (Optional, Int) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Optional, Int) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + +- `pi_name` - (Optional, String) The name of the network security group rule. Required if `pi_network_security_group_rule_id` is not provided. + +## Attribute Reference + +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `crn` - (String) The network security group's crn. +- `id` - (String) The unique identifier of the network security group resource. Composed of `/` +- `members` - (List) The list of IPv4 addresses and\or network interfaces in the network security group. + + Nested schema for `members`: + - `id` - (String) The id of the member in a network security group. + - `mac_address` - (String) The mac address of a network interface included if the type is `network-interface`. + - `target` - (String) If `ipv4-address` type, then IPv4 address or if `network-interface` type, then network interface id. + - `type` - (String) The type of member. Supported values are: `ipv4-address`, `network-interface`. + +- `network_security_group_id` -(String) The unique identifier of the network security group. +- `rules` - (List) The list of rules in the network security group. + + Nested schema for `rules`: + - `action` - (String) The action to take if the rule matches network traffic. Supported values are: `allow`, `deny`. + - `destination_port` - (List) The list of destination port. + + Nested schema for `destination_port`: + - `maximum` - (Int) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Int) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. + - `id` - (String) The id of the rule in a network security group. + - `name` - (String) The unique name of the network security group rule. + - `protocol` - (List) The list of protocol. + + Nested schema for `protocol`: + - `icmp_type` - (String) If icmp type, a ICMP packet type affected by ICMP rules and if not present then all types are matched. Supported values are: `all`, `destination-unreach`, `echo`, `echo-reply`, `source-quench`, `time-exceeded`. + - `tcp_flags` - (String) If tcp type, the list of TCP flags and if not present then all flags are matched. Supported values are: `syn`, `ack`, `fin`, `rst`, `urg`, `psh`, `wnd`, `chk`, `seq`. + - `type` - (String) The protocol of the network traffic. Supported values are: `icmp`, `tcp`, `udp`, `all`. + - `remote` - (List) List of remote. + + Nested schema for `remote`: + - `id` - (String) The id of the remote network address group or network security group the rules apply to. Not required for default-network-address-group. + - `type` - (String) The type of remote group the rules apply to. Supported values are: `network-security-group`, `network-address-group`, `default-network-address-group`. + - `source_port` - (List) List of source port + + Nested schema for `source_port`: + - `maximum` - (Int) The end of the port range, if applicable. If the value is not present then the default value of 65535 will be the maximum port number. + - `minimum` - (Int) The start of the port range, if applicable. If the value is not present then the default value of 1 will be the minimum port number. +- `user_tags` - (List) List of user tags attached to the resource. + +## Import + +The `ibm_pi_network_security_group_rule` resource can be imported by using `cloud_instance_id`, `network_security_group_id` and `network_security_group_rule_id`. + +## Example + +```bash +terraform import ibm_pi_network_security_group_rule.example d7bec597-4726-451f-8a63-e62e6f19c32c/cea6651a-bc0a-4438-9f8a-a0770bbf3ebb +``` diff --git a/website/docs/r/pi_shared_processor_pool.html.markdown b/website/docs/r/pi_shared_processor_pool.html.markdown index e051594767..be21015050 100644 --- a/website/docs/r/pi_shared_processor_pool.html.markdown +++ b/website/docs/r/pi_shared_processor_pool.html.markdown @@ -58,6 +58,7 @@ Review the argument references that you can specify for your resource. * `pi_shared_processor_pool_name` - (Required, String) The name of the shared processor pool. * `pi_shared_processor_pool_reserved_cores` - (Required, Integer) The amount of reserved cores for the shared processor pool. * `pi_shared_processor_pool_placement_group_id` - (Optional, String) The ID of the placement group the shared processor pool is created in. +* `pi_user_tags` - (Optional, List) The user tags attached to this resource. ## Attribute reference @@ -65,6 +66,7 @@ Review the argument references that you can specify for your resource. * `allocated_cores` - (Float) The allocated cores in the shared processor pool. * `available_cores` - (Integer) The available cores in the shared processor pool. +* `crn` - (String) The CRN of this resource. * `host_id` - (Integer) The host ID where the shared processor pool resides. * `instances` - (List of Map) The list of server instances that are deployed in the shared processor pool. diff --git a/website/docs/r/pi_snapshot.html.markdown b/website/docs/r/pi_snapshot.html.markdown index e50423df31..db7df2a599 100644 --- a/website/docs/r/pi_snapshot.html.markdown +++ b/website/docs/r/pi_snapshot.html.markdown @@ -56,6 +56,7 @@ Review the argument references that you can specify for your resource. - `pi_description` - (Optional, String) Description of the PVM instance snapshot. - `pi_instance_name` - (Required, String) The name of the instance you want to take a snapshot of. - `pi_snap_shot_name` - (Required, String) The unique name of the snapshot. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. - `pi_volume_ids` - (Optional, String) A list of volume IDs of the instance that will be part of the snapshot. If none are provided, then all the volumes of the instance will be part of the snapshot. ## Attribute reference @@ -63,6 +64,7 @@ Review the argument references that you can specify for your resource. In addition to all argument reference list, you can access the following attribute reference after your resource is created. - `creation_date` - (String) Creation date of the snapshot. +- `crn` - (String) The CRN of this resource. - `id` - (String) The unique identifier of the snapshot. The ID is composed of /. - `last_update_date` - (String) The last updated date of the snapshot. - `snapshot_id` - (String) ID of the PVM instance snapshot. diff --git a/website/docs/r/pi_volume.html.markdown b/website/docs/r/pi_volume.html.markdown index c75ab9b075..6737608882 100644 --- a/website/docs/r/pi_volume.html.markdown +++ b/website/docs/r/pi_volume.html.markdown @@ -59,6 +59,11 @@ Review the argument references that you can specify for your resource. - `pi_anti_affinity_volumes`- (Optional, String) List of volumes to base volume anti-affinity policy against; required if requesting `anti-affinity` and `pi_anti_affinity_instances` is not provided. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_replication_enabled` - (Optional, Boolean) Indicates if the volume should be replication enabled or not. + + **Note:** `replication_sites` will be populated automatically with default sites if set to true and sites are not specified. + +- `pi_replication_sites` - (Optional, List) List of replication sites for volume replication. Must set `pi_replication_enabled` to true to use. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. - `pi_volume_name` - (Required, String) The name of the volume. - `pi_volume_pool` - (Optional, String) Volume pool where the volume will be created; if provided then `pi_affinity_policy` values will be ignored. - `pi_volume_shareable` - (Required, Boolean) If set to **true**, the volume can be shared across Power Systems Virtual Server instances. If set to **false**, you can attach it only to one instance. @@ -72,6 +77,7 @@ In addition to all argument reference list, you can access the following attribu - `auxiliary` - (Boolean) Indicates if the volume is auxiliary or not. - `auxiliary_volume_name` - (String) The auxiliary volume name. - `consistency_group_name` - (String) The consistency group name if volume is a part of volume group. +- `crn` - (String) The CRN of this resource. - `delete_on_termination` - (Boolean) Indicates if the volume should be deleted when the server terminates. - `group_id` - (String) The volume group id to which volume belongs. - `id` - (String) The unique identifier of the volume. The ID is composed of `/`. @@ -80,6 +86,7 @@ In addition to all argument reference list, you can access the following attribu - `mirroring_state` - (String) Mirroring state for replication enabled volume. - `primary_role` - (String) Indicates whether `master`/`auxiliary` volume is playing the primary role. - `replication_status` - (String) The replication status of the volume. +- `replication_sites` - (List) List of replication sites for volume replication. - `replication_type` - (String) The replication type of the volume `metro` or `global`. - `volume_id` - (String) The unique identifier of the volume. - `volume_status` - (String) The status of the volume. diff --git a/website/docs/r/pi_volume_clone.html.markdown b/website/docs/r/pi_volume_clone.html.markdown index 6eb0ed1ee1..aa0637742f 100644 --- a/website/docs/r/pi_volume_clone.html.markdown +++ b/website/docs/r/pi_volume_clone.html.markdown @@ -54,6 +54,7 @@ Review the argument references that you can specify for your resource. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_replication_enabled` - (Optional, Boolean) Indicates whether the cloned volume should have replication enabled. If no value is provided, it will default to the replication status of the source volume(s). - `pi_target_storage_tier` - (Optional, String) The storage tier for the cloned volume(s). To get a list of available storage tiers, please use the [ibm_pi_storage_types_capacity](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/pi_storage_types_capacity) data source. +- `pi_user_tags` - (Optional, List) The user tags attached to this resource. - `pi_volume_clone_name` - (Required, String) The base name of the newly cloned volume(s). - `pi_volume_ids` - (Required, Set of String) List of volumes to be cloned. diff --git a/website/docs/r/pi_volume_group.html.markdown b/website/docs/r/pi_volume_group.html.markdown index 27c927952f..40b2b5b910 100644 --- a/website/docs/r/pi_volume_group.html.markdown +++ b/website/docs/r/pi_volume_group.html.markdown @@ -59,8 +59,10 @@ Review the argument references that you can specify for your resource. In addition to all argument reference list, you can access the following attribute reference after your resource is created. +- `auxiliary` - (Boolean) Indicates if the volume group is auxiliary. - `consistency_group_name` - (String) The consistency Group Name if volume is a part of volume group. - `id` - (String) The unique identifier of the volume group. The ID is composed of `/`. +- `replication_sites` - (List) Indicates the replication sites of the volume group. - `replication_status` - (String) The replication status of volume group. - `status_description_errors` - (Set) The status details of the volume group. @@ -68,6 +70,7 @@ In addition to all argument reference list, you can access the following attribu - `key` - (String) The volume group error key. - `message` - (String) The failure message providing more details about the error key. - `volume_ids` - (List of String) List of volume IDs, which failed to be added to or removed from the volume group, with the given error. +- `storage_pool` - (String) Storage pool of the volume group. - `volume_group_id` - (String) The unique identifier of the volume group. - `volume_group_status` - (String) The status of the volume group. diff --git a/website/docs/r/pi_workspace.html.markdown b/website/docs/r/pi_workspace.html.markdown index 04a92a2c57..192785d58f 100644 --- a/website/docs/r/pi_workspace.html.markdown +++ b/website/docs/r/pi_workspace.html.markdown @@ -47,13 +47,15 @@ Review the argument references that you can specify for your resource. - `pi_name` - (Required, String) A descriptive name used to identify the workspace. - `pi_plan` - (Optional, String) Plan associated with the offering; Valid values are `public` or `private`. The default value is `public`. - `pi_resource_group_id` - (Required, String) The ID of the resource group where you want to create the workspace. You can retrieve the value from data source `ibm_resource_group`. +- `pi_user_tags` - (Optional, List) List of user tags attached to the resource. ## Attribute reference In addition to all argument reference listed, you can access the following attribute references after your resource source is created. - `id` - (String) Workspace ID. -- `workspace_details` - (Map) Workspace information. +- `crn` - (String) Workspace crn. +- `workspace_details` - (Deprecated, Map) Workspace information. Nested schema for `workspace_details`: - `creation_date` - (String) Date of workspace creation. diff --git a/website/docs/r/resource_tag.html.markdown b/website/docs/r/resource_tag.html.markdown index 84cc6f229a..d2ed232716 100644 --- a/website/docs/r/resource_tag.html.markdown +++ b/website/docs/r/resource_tag.html.markdown @@ -44,7 +44,7 @@ The `ibm_resource_tag` resource provides the following [Timeouts](https://www.te Review the argument references that you can specify for your resource. - `resource_id` - (Required, String) The CRN of the resource on which the tags is be attached. -- `resource_type` - (Optional, String) The resource type on which the tags should be attached. +- `resource_type` - (Optional, String) The resource type on which the tags should be attached. This is valid for Classic Infrastructure resources only. The `resource_type` allowed values are: `SoftLayer_Virtual_DedicatedHost`, `SoftLayer_Hardware`, `SoftLayer_Hardware_Server`, `SoftLayer_Network_Application_Delivery_Controller`, `SoftLayer_Network_Vlan`, `SoftLayer_Network_Vlan_Firewall`, `SoftLayer_Network_Component_Firewall`, `SoftLayer_Network_Firewall_Module_Context`, `SoftLayer_Virtual_Guest`. A wrong value would result in an error in the `terraform apply` command. - `tag_type` - (Optional, String) Type of the tag. Supported values are: `user` or `access`. The default value is user. - `tags` - (Required, Array of strings) List of tags associated with resource instance. - `replace` - (Optional, Bool) If true, it indicates that the attaching operation is a replacement operation diff --git a/website/docs/r/schematics_agent.html.markdown b/website/docs/r/schematics_agent.html.markdown index 802bc9aa2d..c720f10b90 100644 --- a/website/docs/r/schematics_agent.html.markdown +++ b/website/docs/r/schematics_agent.html.markdown @@ -35,6 +35,7 @@ resource "ibm_schematics_agent" "schematics_agent_instance" { schematics_location = "us-south" tags = ["agent-MyDevAgent"] version = "1.0.0" + run_destroy_resources = 1 } ``` @@ -69,6 +70,7 @@ Nested scheme for **user_state**: * `state` - (Optional, String) User-defined states * `enable` Agent is enabled by the user. * `disable` Agent is disbaled by the user. * Constraints: Allowable values are: `enable`, `disable`. * `version` - (Required, String) Agent version. +* `run_destroy_resources` - (Optional, Int) Argument which helps to run destroy resources job. Increment the value to destroy resources associated with agent deployment. ## Attribute Reference