Skip to content

Commit

Permalink
Merge pull request #4292 from faiq/faiq/remove-set-role
Browse files Browse the repository at this point in the history
fix: remove set nodes role
  • Loading branch information
k8s-ci-robot authored May 25, 2023
2 parents 8aa6461 + 6d908b4 commit 75ccd55
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 12 deletions.
7 changes: 1 addition & 6 deletions pkg/cloud/services/iamauth/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)
Expand Down Expand Up @@ -105,11 +104,7 @@ func (s *Service) getARNForRole(role string) (string, error) {
}

func (s *Service) getRolesForWorkers(ctx context.Context) (map[string]struct{}, error) {
// previously this was the default role always added to the IAM authenticator config
// we'll keep this to not break existing behavior for users
allRoles := map[string]struct{}{
fmt.Sprintf("nodes%s", iamv1.DefaultNameSuffix): {},
}
allRoles := map[string]struct{}{}
if err := s.getRolesForMachineDeployments(ctx, allRoles); err != nil {
return nil, fmt.Errorf("failed to get roles from machine deployments %w", err)
}
Expand Down
11 changes: 7 additions & 4 deletions test/e2e/suites/managed/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,13 @@ func ManagedClusterSpec(ctx context.Context, inputGetter func() ManagedClusterSp
verifySecretExists(ctx, fmt.Sprintf("%s-kubeconfig", input.ClusterName), input.Namespace.Name, bootstrapClient)
verifySecretExists(ctx, fmt.Sprintf("%s-user-kubeconfig", input.ClusterName), input.Namespace.Name, bootstrapClient)

ginkgo.By("Checking that aws-iam-authenticator config map exists")
workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName)
workloadClient := workloadClusterProxy.GetClient()
verifyConfigMapExists(ctx, "aws-auth", metav1.NamespaceSystem, workloadClient)
// this will not be created unless there are worker machines or set by IAMAuthenticatorConfig on the managed control plane spec
if input.WorkerMachineCount > 0 {
ginkgo.By("Checking that aws-iam-authenticator config map exists")
workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName)
workloadClient := workloadClusterProxy.GetClient()
verifyConfigMapExists(ctx, "aws-auth", metav1.NamespaceSystem, workloadClient)
}
}

// DeleteClusterSpecInput is the input to DeleteClusterSpec.
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/suites/managed/eks_ipv6_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ var _ = ginkgo.Describe("[managed] [general] [ipv6] EKS cluster tests", func() {
ClusterName: clusterName,
Flavour: EKSIPv6ClusterFlavor,
ControlPlaneMachineCount: 1, //NOTE: this cannot be zero as clusterctl returns an error
WorkerMachineCount: 1,
WorkerMachineCount: 0,
}
})

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/suites/managed/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ var _ = ginkgo.Describe("EKS Cluster upgrade test", func() {
ClusterName: clusterName,
Flavour: EKSControlPlaneOnlyFlavor, // TODO (richardcase) - change in the future when upgrades to machinepools work
ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error
WorkerMachineCount: 1,
WorkerMachineCount: 0,
KubernetesVersion: initialVersion,
}
})
Expand Down

0 comments on commit 75ccd55

Please sign in to comment.