diff --git a/go.mod b/go.mod index ee677740c..b791976a8 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 github.com/mitchellh/go-homedir v1.1.0 - github.com/opentelekomcloud/gophertelekomcloud v0.9.4-0.20241202142411-fd6fb8c8c54d + github.com/opentelekomcloud/gophertelekomcloud v0.9.4-0.20241205120206-477490ae82ff github.com/unknwon/com v1.0.1 golang.org/x/crypto v0.23.0 golang.org/x/sync v0.1.0 diff --git a/go.sum b/go.sum index b08cbf1c5..14b4aadb1 100644 --- a/go.sum +++ b/go.sum @@ -156,8 +156,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/opentelekomcloud/gophertelekomcloud v0.9.4-0.20241202142411-fd6fb8c8c54d h1:7bxIT3di/P4VxlwCoc8jkaeQzTh896lbWiHq3UbcA8E= -github.com/opentelekomcloud/gophertelekomcloud v0.9.4-0.20241202142411-fd6fb8c8c54d/go.mod h1:M1F6OfSRZRzAmAFKQqSLClX952at5hx5rHe4UTEykgg= +github.com/opentelekomcloud/gophertelekomcloud v0.9.4-0.20241205120206-477490ae82ff h1:VFHE+geNpvJV8NpJ2cRzqjpwevph9DL8ExWmubteCjc= +github.com/opentelekomcloud/gophertelekomcloud v0.9.4-0.20241205120206-477490ae82ff/go.mod h1:M1F6OfSRZRzAmAFKQqSLClX952at5hx5rHe4UTEykgg= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/opentelekomcloud/acceptance/cce/resource_opentelekomcloud_cce_node_v3_test.go b/opentelekomcloud/acceptance/cce/resource_opentelekomcloud_cce_node_v3_test.go index c23824520..d36fcd435 100644 --- a/opentelekomcloud/acceptance/cce/resource_opentelekomcloud_cce_node_v3_test.go +++ b/opentelekomcloud/acceptance/cce/resource_opentelekomcloud_cce_node_v3_test.go @@ -20,28 +20,40 @@ const ( resourceNameNode = "opentelekomcloud_cce_node_v3.node_1" resourceNameNode2 = "opentelekomcloud_cce_node_v3.node_2" resourceNameNode3 = "opentelekomcloud_cce_node_v3.node_3" - resourceNameNode4 = "opentelekomcloud_cce_node_v3.node_4" ) -func TestAccCCENodesV3Basic(t *testing.T) { +func getCceNodeResourceFunc(cfg *cfg.Config, state *terraform.ResourceState) (interface{}, error) { + client, err := cfg.CceV3Client(env.OS_REGION_NAME) + if err != nil { + return nil, fmt.Errorf("error creating CCE v3 Client: %s", err) + } + return nodes.Get(client, state.Primary.Attributes["cluster_id"], state.Primary.ID) +} + +func TestAccResourceCCENodesV3Basic(t *testing.T) { var node nodes.Nodes - t.Parallel() + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) + shared.BookCluster(t) - quotas.BookMany(t, singleNodeQuotas.X(2)) + t.Parallel() - ip, _ := cidr.Host(shared.SubnetNet, 14) + ip, _ := cidr.Host(shared.SubnetNet, 200) privateIP := ip.String() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3Basic(privateIP), Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "name", "test-node"), resource.TestCheckResourceAttr(resourceNameNode, "flavor_id", "s2.large.2"), resource.TestCheckResourceAttr(resourceNameNode, "os", "EulerOS 2.9"), @@ -60,25 +72,30 @@ func TestAccCCENodesV3Basic(t *testing.T) { }) } -func TestAccCCENodesV3Agency(t *testing.T) { +func TestAccResourceCCENodesV3Agency(t *testing.T) { var node nodes.Nodes - t.Parallel() + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) + shared.BookCluster(t) - quotas.BookMany(t, singleNodeQuotas.X(2)) + t.Parallel() - ip, _ := cidr.Host(shared.SubnetNet, 14) + ip, _ := cidr.Host(shared.SubnetNet, 200) privateIP := ip.String() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3Agency(privateIP), Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "name", "test-node"), resource.TestCheckResourceAttr(resourceNameNode, "flavor_id", "s2.large.2"), resource.TestCheckResourceAttr(resourceNameNode, "os", "EulerOS 2.9"), @@ -90,7 +107,7 @@ func TestAccCCENodesV3Agency(t *testing.T) { }) } -func TestAccCCENodesV3Multiple(t *testing.T) { +func TestAccResourceCCENodesV3Multiple(t *testing.T) { t.Parallel() shared.BookCluster(t) quotas.BookMany(t, singleNodeQuotas.X(2)) @@ -98,7 +115,6 @@ func TestAccCCENodesV3Multiple(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, Steps: []resource.TestStep{ { Config: testAccCCENodeV3Multiple, @@ -107,32 +123,47 @@ func TestAccCCENodesV3Multiple(t *testing.T) { }) } -func TestAccCCENodesV3Timeout(t *testing.T) { +func TestAccResourceCCENodesV3Timeout(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() quotas.BookMany(t, singleNodeQuotas) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3Timeout, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, }, }) } -func TestAccCCENodesV3OS(t *testing.T) { - var node nodes.Nodes +func TestAccResourceCCENodesV3OS(t *testing.T) { var node2 nodes.Nodes var node3 nodes.Nodes - var node4 nodes.Nodes + + rc2 := common.InitResourceCheck( + resourceNameNode2, + &node2, + getCceNodeResourceFunc, + ) + + rc3 := common.InitResourceCheck( + resourceNameNode3, + &node3, + getCceNodeResourceFunc, + ) t.Parallel() shared.BookCluster(t) @@ -146,25 +177,27 @@ func TestAccCCENodesV3OS(t *testing.T) { { Config: testAccCCENodeV3OS, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), - resource.TestCheckResourceAttr(resourceNameNode, "os", "EulerOS 2.5"), - testAccCheckCCENodeV3Exists(resourceNameNode2, shared.DataSourceClusterName, &node2), - resource.TestCheckResourceAttr(resourceNameNode2, "os", "CentOS 7.7"), - testAccCheckCCENodeV3Exists(resourceNameNode3, shared.DataSourceClusterName, &node3), - resource.TestCheckResourceAttr(resourceNameNode3, "os", "EulerOS 2.9"), - testAccCheckCCENodeV3Exists(resourceNameNode4, shared.DataSourceClusterName, &node4), - resource.TestCheckResourceAttr(resourceNameNode4, "os", "Ubuntu 22.04"), + rc2.CheckResourceExists(), + resource.TestCheckResourceAttr(resourceNameNode2, "os", "EulerOS 2.9"), + rc3.CheckResourceExists(), + resource.TestCheckResourceAttr(resourceNameNode3, "os", "Ubuntu 22.04"), ), }, }, }) } -func TestAccCCENodesV3BandWidthResize(t *testing.T) { +func TestAccResourceCCENodesV3BandWidthResize(t *testing.T) { var node nodes.Nodes - t.Parallel() + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) + shared.BookCluster(t) + t.Parallel() qts := quotas.MultipleQuotas{{Q: quotas.FloatingIP, Count: 1}} qts = append(qts, singleNodeQuotas...) quotas.BookMany(t, qts) @@ -172,12 +205,12 @@ func TestAccCCENodesV3BandWidthResize(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3Ip, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "iptype", "5_bgp"), resource.TestCheckResourceAttr(resourceNameNode, "sharetype", "PER"), resource.TestCheckResourceAttr(resourceNameNode, "bandwidth_charge_mode", "traffic"), @@ -187,7 +220,7 @@ func TestAccCCENodesV3BandWidthResize(t *testing.T) { { Config: testAccCCENodeV3BandWidthResize, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "bandwidth_size", "10"), ), }, @@ -195,11 +228,17 @@ func TestAccCCENodesV3BandWidthResize(t *testing.T) { }) } -func TestAccCCENodesV3_eipIds(t *testing.T) { +func TestAccResourceCCENodesV3_eipIds(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() + qts := []*quotas.ExpectedQuota{{Q: quotas.FloatingIP, Count: 2}} qts = append(qts, singleNodeQuotas...) quotas.BookMany(t, qts) @@ -207,29 +246,35 @@ func TestAccCCENodesV3_eipIds(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3IpIDs, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, { Config: testAccCCENodeV3IpIDsUnset, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, }, }) } -func TestAccCCENodesV3IpSetNull(t *testing.T) { +func TestAccResourceCCENodesV3IpSetNull(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() + qts := []*quotas.ExpectedQuota{{Q: quotas.FloatingIP, Count: 2}} qts = append(qts, singleNodeQuotas...) quotas.BookMany(t, qts) @@ -237,12 +282,12 @@ func TestAccCCENodesV3IpSetNull(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3Ip, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "iptype", "5_bgp"), resource.TestCheckResourceAttr(resourceNameNode, "sharetype", "PER"), resource.TestCheckResourceAttr(resourceNameNode, "bandwidth_charge_mode", "traffic"), @@ -251,18 +296,23 @@ func TestAccCCENodesV3IpSetNull(t *testing.T) { { Config: testAccCCENodeV3IpUnset, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, }, }) } -func TestAccCCENodesV3IpCreate(t *testing.T) { +func TestAccResourceCCENodesV3IpCreate(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() qts := []*quotas.ExpectedQuota{{Q: quotas.FloatingIP, Count: 1}} qts = append(qts, singleNodeQuotas...) quotas.BookMany(t, qts) @@ -270,29 +320,34 @@ func TestAccCCENodesV3IpCreate(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3IpUnset, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, { Config: testAccCCENodeV3Ip, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, }, }) } -func TestAccCCENodesV3IpWithExtendedParameters(t *testing.T) { +func TestAccResourceCCENodesV3IpWithExtendedParameters(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() qts := []*quotas.ExpectedQuota{{Q: quotas.FloatingIP, Count: 2}} qts = append(qts, singleNodeQuotas...) quotas.BookMany(t, qts) @@ -300,12 +355,12 @@ func TestAccCCENodesV3IpWithExtendedParameters(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3IpParams, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "iptype", "5_bgp"), resource.TestCheckResourceAttr(resourceNameNode, "sharetype", "PER"), resource.TestCheckResourceAttr(resourceNameNode, "bandwidth_charge_mode", "traffic"), @@ -315,44 +370,57 @@ func TestAccCCENodesV3IpWithExtendedParameters(t *testing.T) { }) } -func TestAccCCENodesV3IpNulls(t *testing.T) { +func TestAccResourceCCENodesV3IpNulls(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() + quotas.BookMany(t, singleNodeQuotas) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3IpNull, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, }, }) } -func TestAccCCENodesV3EncryptedVolume(t *testing.T) { +func TestAccResourceCCENodesV3EncryptedVolume(t *testing.T) { var node nodes.Nodes - t.Parallel() + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) + shared.BookCluster(t) + t.Parallel() + quotas.BookMany(t, singleNodeQuotas) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3EncryptedVolume, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "data_volumes.0.kms_id", env.OS_KMS_ID), resource.TestCheckResourceAttr(resourceNameNode, "root_volume.0.kms_id", env.OS_KMS_ID), ), @@ -361,11 +429,17 @@ func TestAccCCENodesV3EncryptedVolume(t *testing.T) { }) } -func TestAccCCENodesV3TaintsK8sTags(t *testing.T) { +func TestAccResourceCCENodesV3TaintsK8sTags(t *testing.T) { var node nodes.Nodes + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) - t.Parallel() shared.BookCluster(t) + t.Parallel() + quotas.BookMany(t, singleNodeQuotas) ip, _ := cidr.Host(shared.SubnetNet, 15) @@ -374,12 +448,12 @@ func TestAccCCENodesV3TaintsK8sTags(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3TaintsK8sTags(privateIP), Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), resource.TestCheckResourceAttr(resourceNameNode, "taints.0.key", "dedicated"), resource.TestCheckResourceAttr(resourceNameNode, "taints.0.value", "database"), resource.TestCheckResourceAttr(resourceNameNode, "taints.0.effect", "NoSchedule"), @@ -390,21 +464,28 @@ func TestAccCCENodesV3TaintsK8sTags(t *testing.T) { }) } -func TestAccCCENodesV3_extendParams(t *testing.T) { +func TestAccResourceCCENodesV3_extendParams(t *testing.T) { var node nodes.Nodes - t.Parallel() + rc := common.InitResourceCheck( + resourceNameNode, + &node, + getCceNodeResourceFunc, + ) + shared.BookCluster(t) + t.Parallel() + quotas.BookMany(t, singleNodeQuotas) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccCCEKeyPairPreCheck(t) }, ProviderFactories: common.TestAccProviderFactories, - CheckDestroy: testAccCheckCCENodeV3Destroy, + CheckDestroy: rc.CheckResourceDestroy(), Steps: []resource.TestStep{ { Config: testAccCCENodeV3ExtendParams, Check: resource.ComposeTestCheckFunc( - testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node), + rc.CheckResourceExists(), ), }, }, @@ -428,7 +509,7 @@ func testAccCheckCCENodeV3Destroy(s *terraform.State) error { continue } - _, err := nodes.Get(client, clusterID, rs.Primary.ID).Extract() + _, err := nodes.Get(client, clusterID, rs.Primary.ID) if err == nil { return fmt.Errorf("node still exists") } @@ -437,91 +518,12 @@ func testAccCheckCCENodeV3Destroy(s *terraform.State) error { return nil } -func testAccCheckCCENodeV3Exists(n string, cluster string, node *nodes.Nodes) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("not found: %s", n) - } - c, ok := s.RootModule().Resources[cluster] - if !ok { - return fmt.Errorf("cluster not found: %s", c) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("no ID is set") - } - if c.Primary.ID == "" { - return fmt.Errorf("cluster id is not set") - } - - config := common.TestAccProvider.Meta().(*cfg.Config) - client, err := config.CceV3Client(env.OS_REGION_NAME) - if err != nil { - return fmt.Errorf("error creating OpenTelekomCloud CCE client: %s", err) - } - - found, err := nodes.Get(client, c.Primary.ID, rs.Primary.ID).Extract() - if err != nil { - return err - } - - if found.Metadata.Id != rs.Primary.ID { - return fmt.Errorf("node not found") - } - - *node = *found - - return nil - } -} - var testAccCCENodeV3OS = fmt.Sprintf(` %s -resource "opentelekomcloud_cce_node_v3" "node_1" { - cluster_id = data.opentelekomcloud_cce_cluster_v3.cluster.id - name = "test-node" - flavor_id = "s2.large.2" - os = "EulerOS 2.5" - - availability_zone = "%[2]s" - key_pair = "%[3]s" - - root_volume { - size = 40 - volumetype = "SATA" - } - - data_volumes { - size = 100 - volumetype = "SATA" - } -} - resource "opentelekomcloud_cce_node_v3" "node_2" { cluster_id = data.opentelekomcloud_cce_cluster_v3.cluster.id - name = "test-node" - flavor_id = "s2.large.2" - os = "CentOS 7.7" - - availability_zone = "%[2]s" - key_pair = "%[3]s" - - root_volume { - size = 40 - volumetype = "SATA" - } - - data_volumes { - size = 100 - volumetype = "SATA" - } -} - -resource "opentelekomcloud_cce_node_v3" "node_3" { - cluster_id = data.opentelekomcloud_cce_cluster_v3.cluster.id - name = "test-node" + name = "test-node-euler" flavor_id = "s2.large.2" os = "EulerOS 2.9" @@ -539,9 +541,9 @@ resource "opentelekomcloud_cce_node_v3" "node_3" { } } -resource "opentelekomcloud_cce_node_v3" "node_4" { +resource "opentelekomcloud_cce_node_v3" "node_3" { cluster_id = data.opentelekomcloud_cce_cluster_v3.cluster.id - name = "test-node" + name = "test-node-ubuntu" flavor_id = "s2.large.2" os = "Ubuntu 22.04" diff --git a/opentelekomcloud/acceptance/cce/shared/cluster.go b/opentelekomcloud/acceptance/cce/shared/cluster.go index bc05de426..6c52336b8 100644 --- a/opentelekomcloud/acceptance/cce/shared/cluster.go +++ b/opentelekomcloud/acceptance/cce/shared/cluster.go @@ -119,27 +119,39 @@ func BookCluster(t *testing.T) { func CreateSharedCluster(t *testing.T, client *golangsdk.ServiceClient, subnet *subnets.Subnet) string { t.Log("starting creating shared cluster") - job, err := clusters.Create(client, clusters.CreateOpts{ + + cluster, err := clusters.Create(client, clusters.CreateOpts{ Kind: "Cluster", ApiVersion: "v3", Metadata: clusters.CreateMetaData{ Name: sharedClusterName, }, Spec: clusters.Spec{ + Category: "Turbo", Type: "VirtualMachine", Flavor: "cce.s2.small", Description: "Shared cluster for CCE acceptance tests", - ContainerNetwork: clusters.ContainerNetworkSpec{ - Mode: "vpc-router", - }, HostNetwork: clusters.HostNetworkSpec{ VpcId: subnet.VpcID, SubnetId: subnet.ID, }, + ContainerNetwork: clusters.ContainerNetworkSpec{ + Mode: "eni", + }, + EniNetwork: &clusters.EniNetworkSpec{ + SubnetId: subnet.SubnetID, + Cidr: subnet.CIDR, + }, + Authentication: clusters.AuthenticationSpec{ + Mode: "rbac", + AuthenticatingProxy: make(map[string]string), + }, + KubernetesSvcIpRange: "10.247.0.0/16", }, }) + th.AssertNoErr(t, err) - sharedClusterID = job.Metadata.Id + sharedClusterID = cluster.Metadata.Id stateConf := &resource.StateChangeConf{ Pending: []string{"Creating"}, diff --git a/opentelekomcloud/services/cce/resource_opentelekomcloud_cce_node_v3.go b/opentelekomcloud/services/cce/resource_opentelekomcloud_cce_node_v3.go index ef0125ed8..f1ddb5bce 100644 --- a/opentelekomcloud/services/cce/resource_opentelekomcloud_cce_node_v3.go +++ b/opentelekomcloud/services/cce/resource_opentelekomcloud_cce_node_v3.go @@ -589,7 +589,7 @@ func resourceCCENodeV3Create(ctx context.Context, d *schema.ResourceData, meta i stateCluster := &resource.StateChangeConf{ Target: []string{"Available"}, Refresh: waitForClusterAvailable(client, clusterID), - Timeout: 15 * time.Minute, + Timeout: 20 * time.Minute, Delay: 15 * time.Second, MinTimeout: 3 * time.Second, } @@ -599,7 +599,7 @@ func resourceCCENodeV3Create(ctx context.Context, d *schema.ResourceData, meta i } log.Printf("[DEBUG] Create Options: %#v", createOpts) - node, err := nodes.Create(client, clusterID, createOpts).Extract() + node, err := nodes.Create(client, clusterID, createOpts) switch err.(type) { case golangsdk.ErrDefault403: retryNode, err := recursiveCreate(ctx, client, createOpts, clusterID) @@ -640,7 +640,7 @@ func resourceCCENodeV3Create(ctx context.Context, d *schema.ResourceData, meta i // getNodeIDFromJob wait until job starts (status Running) and returns Node ID func getNodeIDFromJob(ctx context.Context, client *golangsdk.ServiceClient, jobID string, timeout time.Duration) (string, error) { - job, err := nodes.GetJobDetails(client, jobID).ExtractJob() + job, err := nodes.GetJobDetails(client, jobID) if err != nil { return "", fmt.Errorf("error fetching OpenTelekomCloud Job Details: %s", err) } @@ -650,7 +650,7 @@ func getNodeIDFromJob(ctx context.Context, client *golangsdk.ServiceClient, jobI Pending: []string{"Initializing"}, Target: []string{"Running"}, Refresh: func() (interface{}, string, error) { - subJob, err := nodes.GetJobDetails(client, jobResourceId).ExtractJob() + subJob, err := nodes.GetJobDetails(client, jobResourceId) if err != nil { return nil, "ERROR", fmt.Errorf("error fetching OpenTelekomCloud Job Details: %s", err) } @@ -691,7 +691,7 @@ func resourceCCENodeV3Read(ctx context.Context, d *schema.ResourceData, meta int } clusterID := d.Get("cluster_id").(string) - node, err := nodes.Get(client, clusterID, d.Id()).Extract() + node, err := nodes.Get(client, clusterID, d.Id()) if err != nil { if _, ok := err.(golangsdk.ErrDefault404); ok { d.SetId("") @@ -856,7 +856,7 @@ func resourceCCENodeV3Update(ctx context.Context, d *schema.ResourceData, meta i updateOpts.Metadata.Name = d.Get("name").(string) clusterID := d.Get("cluster_id").(string) - _, err = nodes.Update(client, clusterID, d.Id(), updateOpts).Extract() + _, err = nodes.Update(client, clusterID, d.Id(), updateOpts) if err != nil { return fmterr.Errorf("error updating OpenTelekomCloud CCE node: %s", err) } @@ -941,7 +941,7 @@ func resourceCCENodeV3Delete(ctx context.Context, d *schema.ResourceData, meta i } clusterID := d.Get("cluster_id").(string) - if err := nodes.Delete(client, clusterID, d.Id()).ExtractErr(); err != nil { + if err := nodes.Delete(client, clusterID, d.Id()); err != nil { return fmterr.Errorf("error deleting OpenTelekomCloud CCE Cluster: %w", err) } stateConf := &resource.StateChangeConf{ @@ -1115,7 +1115,7 @@ func checkCCENodeV3PublicIpParams(d *schema.ResourceData) { func waitForCceNodeActive(cceClient *golangsdk.ServiceClient, clusterId, nodeId string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - n, err := nodes.Get(cceClient, clusterId, nodeId).Extract() + n, err := nodes.Get(cceClient, clusterId, nodeId) if err != nil { return nil, "", err } @@ -1128,7 +1128,7 @@ func waitForCceNodeDelete(cceClient *golangsdk.ServiceClient, clusterId, nodeId return func() (interface{}, string, error) { log.Printf("[DEBUG] Attempting to delete OpenTelekomCloud CCE Node %s.\n", nodeId) - r, err := nodes.Get(cceClient, clusterId, nodeId).Extract() + r, err := nodes.Get(cceClient, clusterId, nodeId) if err != nil { if _, ok := err.(golangsdk.ErrDefault404); ok { @@ -1156,7 +1156,7 @@ func waitForClusterAvailable(cceClient *golangsdk.ServiceClient, clusterId strin } } -func recursiveCreate(ctx context.Context, client *golangsdk.ServiceClient, opts nodes.CreateOptsBuilder, clusterID string) (*nodes.Nodes, string) { +func recursiveCreate(ctx context.Context, client *golangsdk.ServiceClient, opts nodes.CreateOpts, clusterID string) (*nodes.Nodes, string) { stateCluster := &resource.StateChangeConf{ Target: []string{"Available"}, Refresh: waitForClusterAvailable(client, clusterID), @@ -1168,7 +1168,7 @@ func recursiveCreate(ctx context.Context, client *golangsdk.ServiceClient, opts if stateErr != nil { log.Printf("[INFO] Cluster Unavailable %s.\n", stateErr) } - node, err := nodes.Create(client, clusterID, opts).Extract() + node, err := nodes.Create(client, clusterID, opts) if err != nil { if _, ok := err.(golangsdk.ErrDefault403); ok { return recursiveCreate(ctx, client, opts, clusterID) diff --git a/releasenotes/notes/cce-refactor-nodes-64c5205dc4a2b762.yaml b/releasenotes/notes/cce-refactor-nodes-64c5205dc4a2b762.yaml new file mode 100644 index 000000000..446b989fe --- /dev/null +++ b/releasenotes/notes/cce-refactor-nodes-64c5205dc4a2b762.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + **[CCE]** Refactor v3 node tests and fixed to match changes in gopher (`#2756 `_)