Skip to content

Commit

Permalink
test: fix v3 migration e2e tests
Browse files Browse the repository at this point in the history
Signed-off-by: Carlos Salas <carlos.salas@suse.com>
  • Loading branch information
salasberryfin committed Jul 15, 2024
1 parent ac3aa5c commit 19c32e2
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 25 deletions.
78 changes: 56 additions & 22 deletions test/e2e/specs/v1v3migrate_gitops.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ type MigrateV1V3UsingGitOpsSpecInput struct {
}

// MigrateV1V3UsingGitOpsSpec implements a spec that will create a cluster via Fleet and test that it
// automatically imports into Rancher Manager, then it will enbale v3 cluster feature and check that migration
// automatically imports into Rancher Manager, then it will enable v3 cluster feature and check that migration
// happened succesfully.
func MigrateV1V3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateV1V3UsingGitOpsSpecInput) {
var (
Expand Down Expand Up @@ -151,7 +151,7 @@ func MigrateV1V3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateV
Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code")
}

validateV3RancherCluster := func() {
validateV3RancherCluster := func(migration bool) {
By("Waiting for the rancher cluster record to appear")
rancherClusters := &managementv3.ClusterList{}
selectors := []client.ListOption{
Expand Down Expand Up @@ -181,26 +181,31 @@ func MigrateV1V3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateV
}, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue())

By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig")
secretName := fmt.Sprintf("%s-kubeconfig", v3rancherCluster.Name)
if migration {
secretName = fmt.Sprintf("%s-capi-kubeconfig", capiCluster.Name)
}
turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{
Getter: input.BootstrapClusterProxy.GetClient(),
SecretName: fmt.Sprintf("%s-kubeconfig", v3rancherCluster.Name),
SecretName: secretName,
Namespace: v3rancherCluster.Spec.FleetWorkspaceName,
RancherServerURL: input.RancherServerURL,
WriteToTempFile: true,
}, rancherKubeconfig)

turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{
Command: "kubectl",
Args: []string{
"--kubeconfig",
rancherKubeconfig.TempFilePath,
"get",
"nodes",
"--insecure-skip-tls-verify",
},
}, rancherConnectRes)
Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig")
Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code")
Eventually(func() bool {
turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{
Command: "kubectl",
Args: []string{
"--kubeconfig",
rancherKubeconfig.TempFilePath,
"get",
"nodes",
"--insecure-skip-tls-verify",
},
}, rancherConnectRes)
return rancherConnectRes.Error == nil && rancherConnectRes.ExitCode == 0
}, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue(), "Failed to connect to Rancher cluster using kubeconfig")
}

BeforeEach(func() {
Expand Down Expand Up @@ -368,7 +373,7 @@ func MigrateV1V3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateV
rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent"
} else {
// NOTE: this was the default previously in the chart locally and ok as
// we where loading the image into kind manually.
// we were loading the image into kind manually.
rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never"
}
testenv.UpgradeRancherTurtles(ctx, rtInput)
Expand All @@ -390,12 +395,42 @@ func MigrateV1V3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateV
}), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())

// Annotate v1 cluster as migrated
Eventually(komega.Update(v1rancherCluster, func() {
v3rancherCluster.Annotations[input.V1ClusterMigratedAnnotation] = "true"
}), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())
Eventually(komega.Get(v1rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())
annotations := v1rancherCluster.GetAnnotations()
annotations[input.V1ClusterMigratedAnnotation] = "true"
v1rancherCluster.SetAnnotations(annotations)
err := input.BootstrapClusterProxy.GetClient().Update(ctx, v1rancherCluster)
Expect(err).NotTo(HaveOccurred(), "Failed to add 'migrated' annotation to Rancher v1 cluster")

By("Rancher V1 cluster should have the 'migrated' annotation")
Eventually(func() bool {
Eventually(komega.Get(v1rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())
annotations := v1rancherCluster.GetAnnotations()

return annotations[input.V1ClusterMigratedAnnotation] == "true"
}, capiClusterCreateWait...).Should(BeTrue(), "Failed to detect 'migrated' annotation on CAPI cluster")

By("Redeploying Rancher after controller migration")
Eventually(func() error {
restartRes := new(turtlesframework.RunCommandResult)
turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{
Command: "kubectl",
Args: []string{
"rollout",
"restart",
"deployment",
"--namespace",
e2e.RancherNamespace,
"rancher",
},
}, restartRes)

return restartRes.Error
}, capiClusterCreateWait...).Should(Succeed(), "Failed to restart Rancher deployment after controller migration")

// Check v3 cluster
validateV3RancherCluster()
By("Running checks on Rancher cluster after controller migration, check that v3 cluster is created")
validateV3RancherCluster(true)

if input.TestClusterReimport {
By("Deleting Rancher cluster record to simulate unimporting the cluster")
Expand Down Expand Up @@ -425,13 +460,12 @@ func MigrateV1V3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateV
Eventually(func() bool {
Eventually(komega.Get(capiCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed())
annotations := capiCluster.GetAnnotations()
fmt.Printf("Annotations: %v\n", annotations)

return annotations["imported"] != "true"
}, capiClusterCreateWait...).Should(BeTrue(), "CAPI cluster still contains the 'imported' annotation")

By("Rancher should be available after removing 'imported' annotation")
validateV3RancherCluster()
validateV3RancherCluster(false)
}
})

Expand Down
8 changes: 8 additions & 0 deletions test/e2e/suites/v1v3migration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,14 @@ var _ = BeforeSuite(func() {
"rancherTurtles.features.managementv3-cluster.enabled": "false", // disable management.cattle.io/v3 controller
},
}

testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{
HelmBinaryPath: flagVals.HelmBinaryPath,
ChartsPath: flagVals.ChartPath,
BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy,
WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"),
})

if flagVals.UseEKS {
rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}"
rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent"
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/suites/v1v3migration/v1v3migration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
"github.com/rancher/turtles/test/e2e/specs"
)

var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel), func() {
var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Should be able to migrate to v3 import controller after provisioning a cluster and using namespace auto-import", Label(e2e.ShortTestLabel), func() {
BeforeEach(func() {
komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient())
komega.SetContext(ctx)
Expand Down Expand Up @@ -66,7 +66,7 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and dele
})
})

var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.DontRunLabel), func() {
var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Should be able to migrate to v3 import controller after provisioning a cluster and using namespace auto-import", Label(e2e.DontRunLabel), func() {
BeforeEach(func() {
komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient())
komega.SetContext(ctx)
Expand Down Expand Up @@ -101,7 +101,7 @@ var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Create and delete CA
})
})

var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() {
var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Should be able to migrate to v3 import controller after provisioning a cluster and using namespace auto-import", Label(e2e.FullTestLabel), func() {
BeforeEach(func() {
komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient())
komega.SetContext(ctx)
Expand Down

0 comments on commit 19c32e2

Please sign in to comment.