From de95481affea496ceb043113a347f64a61abe42c Mon Sep 17 00:00:00 2001 From: Lukas Krejci Date: Fri, 22 Mar 2024 16:18:57 +0100 Subject: [PATCH] KSPACE-46: Use SpaceProvisionerConfig for space placement decisions (#991) --- ...in.dev.openshift.com_toolchainconfigs.yaml | 11 +- .../deactivation_controller_test.go | 15 +- .../space_completion_controller_test.go | 51 ++-- controllers/toolchainconfig/configuration.go | 8 +- .../toolchainconfig/configuration_test.go | 5 +- .../toolchainconfig_controller_test.go | 15 +- controllers/usersignup/approval_test.go | 117 ++++---- .../usersignup/usersignup_controller_test.go | 280 +++++++++-------- go.mod | 4 +- go.sum | 8 +- main.go | 4 +- pkg/capacity/manager.go | 205 ++++++++----- pkg/capacity/manager_test.go | 283 ++++++++++-------- test/spaceprovisionerconfig/util.go | 35 +++ 14 files changed, 571 insertions(+), 470 deletions(-) create mode 100644 test/spaceprovisionerconfig/util.go diff --git a/config/crd/bases/toolchain.dev.openshift.com_toolchainconfigs.yaml b/config/crd/bases/toolchain.dev.openshift.com_toolchainconfigs.yaml index 5952c8a12..b21b922d6 100644 --- a/config/crd/bases/toolchain.dev.openshift.com_toolchainconfigs.yaml +++ b/config/crd/bases/toolchain.dev.openshift.com_toolchainconfigs.yaml @@ -53,19 +53,20 @@ spec: type: boolean type: object capacityThresholds: - description: Keeps parameters necessary for configuring capacity - limits + description: 'Keeps parameters necessary for configuring capacity + limits Deprecated: This is no longer used for anything.' properties: maxNumberOfSpacesPerMemberCluster: additionalProperties: type: integer - description: Contains a map of maximal number of spaces that + description: 'Contains a map of maximal number of spaces that can be provisioned per member cluster mapped by the cluster - name + name Deprecated: This is no longer used for anything.' type: object x-kubernetes-map-type: atomic resourceCapacityThreshold: - description: Contains capacity threshold configuration + description: 'Contains capacity threshold configuration Deprecated: + This is no longer used for anything.' properties: defaultThreshold: description: It is the default capacity threshold (in diff --git a/controllers/deactivation/deactivation_controller_test.go b/controllers/deactivation/deactivation_controller_test.go index 3aa53f81b..c9bcb3183 100644 --- a/controllers/deactivation/deactivation_controller_test.go +++ b/controllers/deactivation/deactivation_controller_test.go @@ -45,9 +45,7 @@ const ( ) func TestReconcile(t *testing.T) { - config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.CapacityThresholds().MaxNumberOfSpaces( - testconfig.PerMemberCluster("member1", 321)), - testconfig.Deactivation().DeactivatingNotificationDays(3)) + config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.Deactivation().DeactivatingNotificationDays(3)) // given logf.SetLogger(zap.New(zap.UseDevMode(true))) @@ -65,7 +63,6 @@ func TestReconcile(t *testing.T) { states.SetDeactivating(userSignupFoobar, true) t.Run("controller should not deactivate user", func(t *testing.T) { - // the time since the mur was provisioned is within the deactivation timeout period for the 'deactivate30' tier t.Run("usersignup should not be deactivated - deactivate30 (30 days)", func(t *testing.T) { // given @@ -137,10 +134,7 @@ func TestReconcile(t *testing.T) { // a user that belongs to the deactivation domain excluded list t.Run("user deactivation excluded", func(t *testing.T) { // given - config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.CapacityThresholds().MaxNumberOfSpaces( - testconfig.PerMemberCluster("member1", 321)), - testconfig.Deactivation().DeactivatingNotificationDays(3), - ) + config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.Deactivation().DeactivatingNotificationDays(3)) restore := commontest.SetEnvVarAndRestore(t, "HOST_OPERATOR_DEACTIVATION_DOMAINS_EXCLUDED", "@redhat.com") defer restore() murProvisionedTime := &metav1.Time{Time: time.Now().Add(-time.Duration(expectedDeactivationTimeoutDeactivate30Tier*24) * time.Hour)} @@ -155,11 +149,9 @@ func TestReconcile(t *testing.T) { require.Equal(t, time.Duration(0), res.RequeueAfter, "requeueAfter should not be set") assertThatUserSignupDeactivated(t, cl, username, false) }) - }) // in these tests, the controller should (eventually) deactivate the user t.Run("controller should deactivate user", func(t *testing.T) { - userSignupFoobar := userSignupWithEmail(username, "foo@bar.com") t.Run("usersignup should be marked as deactivating - deactivate30 (30 days)", func(t *testing.T) { // given @@ -288,7 +280,6 @@ func TestReconcile(t *testing.T) { assertThatUserSignupDeactivated(t, cl, username, true) AssertMetricsCounterEquals(t, 1, metrics.UserSignupAutoDeactivatedTotal) }) - }) t.Run("test usersignup deactivating state reset to false", func(t *testing.T) { @@ -355,7 +346,6 @@ func TestReconcile(t *testing.T) { }) t.Run("failures", func(t *testing.T) { - // cannot find UserTier t.Run("unable to get UserTier", func(t *testing.T) { // given @@ -433,7 +423,6 @@ func TestReconcile(t *testing.T) { assertThatUserSignupDeactivated(t, cl, username, false) }) }) - } func prepareReconcile(t *testing.T, name string, initObjs ...runtime.Object) (reconcile.Reconciler, reconcile.Request, *commontest.FakeClient) { diff --git a/controllers/spacecompletion/space_completion_controller_test.go b/controllers/spacecompletion/space_completion_controller_test.go index c949fbc1b..2990b2707 100644 --- a/controllers/spacecompletion/space_completion_controller_test.go +++ b/controllers/spacecompletion/space_completion_controller_test.go @@ -2,6 +2,7 @@ package spacecompletion_test import ( "context" + "errors" "fmt" "os" "testing" @@ -12,13 +13,12 @@ import ( "github.com/codeready-toolchain/host-operator/pkg/capacity" "github.com/codeready-toolchain/host-operator/pkg/counter" . "github.com/codeready-toolchain/host-operator/test" - "github.com/codeready-toolchain/toolchain-common/pkg/cluster" - "github.com/codeready-toolchain/toolchain-common/pkg/configuration" + hspc "github.com/codeready-toolchain/host-operator/test/spaceprovisionerconfig" "github.com/codeready-toolchain/toolchain-common/pkg/test" spacetest "github.com/codeready-toolchain/toolchain-common/pkg/test/space" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,14 +26,13 @@ import ( ) func TestCreateSpace(t *testing.T) { - member1 := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) - getMemberClusters := NewGetMemberClusters(member1) + spc := hspc.NewEnabledValidTenantSPC("member1") t.Run("without any field set - then it sets only tierName", func(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "without-fields", spacetest.WithTierName("")) - r, req, cl := prepareReconcile(t, space, getMemberClusters) + r, req, cl := prepareReconcile(t, space, spc) // when _, err := r.Reconcile(context.TODO(), req) @@ -49,7 +48,7 @@ func TestCreateSpace(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "without-targetCluster", spacetest.WithTierName("advanced")) - r, req, cl := prepareReconcile(t, space, getMemberClusters) + r, req, cl := prepareReconcile(t, space, spc) // when _, err := r.Reconcile(context.TODO(), req) @@ -66,7 +65,7 @@ func TestCreateSpace(t *testing.T) { space := spacetest.NewSpace(test.HostOperatorNs, "without-tierName", spacetest.WithTierName(""), spacetest.WithSpecTargetCluster("member2")) - r, req, cl := prepareReconcile(t, space, getMemberClusters) + r, req, cl := prepareReconcile(t, space, spc) // when _, err := r.Reconcile(context.TODO(), req) @@ -84,7 +83,7 @@ func TestCreateSpace(t *testing.T) { space := spacetest.NewSpace(test.HostOperatorNs, "with-fields", spacetest.WithTierName("advanced"), spacetest.WithSpecTargetCluster("member2")) - r, req, cl := prepareReconcile(t, space, getMemberClusters) + r, req, cl := prepareReconcile(t, space, spc) // when _, err := r.Reconcile(context.TODO(), req) @@ -101,7 +100,7 @@ func TestCreateSpace(t *testing.T) { space := spacetest.NewSpace(test.HostOperatorNs, "without-fields", spacetest.WithTierName(""), spacetest.WithDeletionTimestamp()) - r, req, cl := prepareReconcile(t, space, getMemberClusters) + r, req, cl := prepareReconcile(t, space, spc) // when _, err := r.Reconcile(context.TODO(), req) @@ -117,7 +116,7 @@ func TestCreateSpace(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "without-members", spacetest.WithTierName("advanced")) - r, req, cl := prepareReconcile(t, space, NewGetMemberClusters()) + r, req, cl := prepareReconcile(t, space, nil) // when _, err := r.Reconcile(context.TODO(), req) @@ -133,7 +132,7 @@ func TestCreateSpace(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "not-found", spacetest.WithTierName("advanced")) - r, req, _ := prepareReconcile(t, space, NewGetMemberClusters()) + r, req, _ := prepareReconcile(t, space, nil) empty := test.NewFakeClient(t) empty.MockUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { return fmt.Errorf("shouldn't be called") @@ -151,7 +150,7 @@ func TestCreateSpace(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "get-fails", spacetest.WithTierName("advanced")) - r, req, cl := prepareReconcile(t, space, NewGetMemberClusters()) + r, req, cl := prepareReconcile(t, space, nil) cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { return fmt.Errorf("some error") } @@ -171,7 +170,7 @@ func TestCreateSpace(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "oddity", spacetest.WithTierName("")) - r, req, cl := prepareReconcile(t, space, NewGetMemberClusters()) + r, req, cl := prepareReconcile(t, space, nil) cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { if key.Name == "config" { return fmt.Errorf("some error") @@ -189,16 +188,16 @@ func TestCreateSpace(t *testing.T) { HasSpecTargetCluster("") }) - t.Run("when Get ToolchainConfig fails and only targetCluster is missing", func(t *testing.T) { + t.Run("when listing SpaceProvisionerConfig fails and only targetCluster is missing", func(t *testing.T) { // given space := spacetest.NewSpace(test.HostOperatorNs, "oddity", spacetest.WithTierName("advanced")) - r, req, cl := prepareReconcile(t, space, NewGetMemberClusters()) - cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { - if key.Name == "config" { - return fmt.Errorf("some error") + r, req, cl := prepareReconcile(t, space, nil) + cl.MockList = func(ctx context.Context, list runtimeclient.ObjectList, opts ...runtimeclient.ListOption) error { + if _, ok := list.(*toolchainv1alpha1.SpaceProvisionerConfigList); ok { + return errors.New("some error") } - return cl.Client.Get(ctx, key, obj) + return cl.Client.List(ctx, list, opts...) } // when @@ -213,7 +212,7 @@ func TestCreateSpace(t *testing.T) { }) } -func prepareReconcile(t *testing.T, space *toolchainv1alpha1.Space, getMemberClusters cluster.GetMemberClustersFunc) (*spacecompletion.Reconciler, reconcile.Request, *test.FakeClient) { +func prepareReconcile(t *testing.T, space *toolchainv1alpha1.Space, member1SpaceProvisionerConfig *toolchainv1alpha1.SpaceProvisionerConfig) (*spacecompletion.Reconciler, reconcile.Request, *test.FakeClient) { require.NoError(t, os.Setenv("WATCH_NAMESPACE", test.HostOperatorNs)) s := scheme.Scheme err := apis.AddToScheme(s) @@ -224,14 +223,16 @@ func prepareReconcile(t *testing.T, space *toolchainv1alpha1.Space, getMemberClu t.Cleanup(counter.Reset) InitializeCounters(t, toolchainStatus) - conf := configuration.NewToolchainConfigObjWithReset(t) - - fakeClient := test.NewFakeClient(t, toolchainStatus, space, conf) + objs := []runtime.Object{toolchainStatus, space} + if member1SpaceProvisionerConfig != nil { + objs = append(objs, member1SpaceProvisionerConfig) + } + fakeClient := test.NewFakeClient(t, objs...) r := &spacecompletion.Reconciler{ Client: fakeClient, Namespace: test.HostOperatorNs, - ClusterManager: capacity.NewClusterManager(getMemberClusters, fakeClient), + ClusterManager: capacity.NewClusterManager(test.HostOperatorNs, fakeClient), } req := reconcile.Request{ NamespacedName: types.NamespacedName{ diff --git a/controllers/toolchainconfig/configuration.go b/controllers/toolchainconfig/configuration.go index 5eb286a83..921901668 100644 --- a/controllers/toolchainconfig/configuration.go +++ b/controllers/toolchainconfig/configuration.go @@ -82,7 +82,8 @@ func (c *ToolchainConfig) Environment() string { } func (c *ToolchainConfig) GitHubSecret() GitHubSecret { - return GitHubSecret{s: c.cfg.Host.ToolchainStatus.GitHubSecret, + return GitHubSecret{ + s: c.cfg.Host.ToolchainStatus.GitHubSecret, secrets: c.secrets, } } @@ -91,6 +92,7 @@ func (c *ToolchainConfig) AutomaticApproval() AutoApprovalConfig { return AutoApprovalConfig{c.cfg.Host.AutomaticApproval} } +// Deprecated: This is no longer used for anything. func (c *ToolchainConfig) CapacityThresholds() CapacityThresholdsConfig { return CapacityThresholdsConfig{c.cfg.Host.CapacityThresholds} } @@ -150,18 +152,22 @@ func (s SpaceConfig) SpaceBindingRequestIsEnabled() bool { return commonconfig.GetBool(s.spaceConfig.SpaceBindingRequestEnabled, false) } +// Deprecated: This is no longer used for anything. type CapacityThresholdsConfig struct { capacityThresholds toolchainv1alpha1.CapacityThresholds } +// Deprecated: This is no longer used for anything. func (c CapacityThresholdsConfig) MaxNumberOfSpacesSpecificPerMemberCluster() map[string]int { return c.capacityThresholds.MaxNumberOfSpacesPerMemberCluster } +// Deprecated: This is no longer used for anything. func (c CapacityThresholdsConfig) ResourceCapacityThresholdDefault() int { return commonconfig.GetInt(c.capacityThresholds.ResourceCapacityThreshold.DefaultThreshold, 80) } +// Deprecated: This is no longer used for anything. func (c CapacityThresholdsConfig) ResourceCapacityThresholdSpecificPerMemberCluster() map[string]int { return c.capacityThresholds.ResourceCapacityThreshold.SpecificPerMemberCluster } diff --git a/controllers/toolchainconfig/configuration_test.go b/controllers/toolchainconfig/configuration_test.go index 48d6eef94..0c3a57340 100644 --- a/controllers/toolchainconfig/configuration_test.go +++ b/controllers/toolchainconfig/configuration_test.go @@ -181,7 +181,6 @@ func TestForceLoadToolchainConfig(t *testing.T) { assert.Equal(t, "unit-tests", toolchainCfg.Environment()) // returns actual value assert.Equal(t, "def456", toolchainCfg.Notifications().MailgunAPIKey()) // returns actual value }) - }) t.Run("error retrieving config object", func(t *testing.T) { @@ -260,7 +259,7 @@ func TestCapacityThresholdsConfig(t *testing.T) { assert.Empty(t, toolchainCfg.CapacityThresholds().ResourceCapacityThresholdSpecificPerMemberCluster()) }) t.Run("non-default", func(t *testing.T) { - cfg := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)).ResourceCapacityThreshold(456, testconfig.PerMemberCluster("member1", 654))) + cfg := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)).ResourceCapacityThreshold(456, testconfig.PerMemberCluster("member1", 654))) //nolint:staticcheck // this will be removed once we also remove the deprecated method toolchainCfg := newToolchainConfig(cfg, map[string]map[string]string{}) assert.Equal(t, cfg.Spec.Host.CapacityThresholds.MaxNumberOfSpacesPerMemberCluster, toolchainCfg.CapacityThresholds().MaxNumberOfSpacesSpecificPerMemberCluster()) @@ -336,7 +335,7 @@ func TestNotifications(t *testing.T) { assert.Empty(t, toolchainCfg.Notifications().MailgunReplyToEmail()) assert.Equal(t, "mailgun", toolchainCfg.Notifications().NotificationDeliveryService()) assert.Equal(t, 24*time.Hour, toolchainCfg.Notifications().DurationBeforeNotificationDeletion()) - assert.Equal(t, "sandbox", toolchainCfg.Notifications().TemplateSetName()) //default notificationEnv + assert.Equal(t, "sandbox", toolchainCfg.Notifications().TemplateSetName()) // default notificationEnv }) t.Run("non-default", func(t *testing.T) { cfg := commonconfig.NewToolchainConfigObjWithReset(t, diff --git a/controllers/toolchainconfig/toolchainconfig_controller_test.go b/controllers/toolchainconfig/toolchainconfig_controller_test.go index 62275ebfd..7197a8771 100644 --- a/controllers/toolchainconfig/toolchainconfig_controller_test.go +++ b/controllers/toolchainconfig/toolchainconfig_controller_test.go @@ -33,7 +33,6 @@ func TestReconcile(t *testing.T) { specificMemberConfig := testconfig.NewMemberOperatorConfigObj(testconfig.MemberStatus().RefreshPeriod("10s")) t.Run("success", func(t *testing.T) { - t.Run("config not found", func(t *testing.T) { hostCl := test.NewFakeClient(t) member1 := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) @@ -61,13 +60,12 @@ func TestReconcile(t *testing.T) { _, err = getMemberConfig(member2) assert.Error(t, err) assert.True(t, errors.IsNotFound(err)) - }) t.Run("config exists", func(t *testing.T) { config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)), + testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)), //nolint:staticcheck // this will be removed once we also remove the deprecated method testconfig.Members().Default(defaultMemberConfig.Spec), testconfig.Members().SpecificPerMemberCluster("member1", specificMemberConfig.Spec)) hostCl := test.NewFakeClient(t, config) @@ -181,12 +179,11 @@ func TestReconcile(t *testing.T) { }) t.Run("failures", func(t *testing.T) { - t.Run("error getting the toolchainconfig resource", func(t *testing.T) { // given config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)), + testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)), //nolint:staticcheck // this will be removed once we also remove the deprecated method testconfig.Members().Default(defaultMemberConfig.Spec), testconfig.Members().SpecificPerMemberCluster("member1", specificMemberConfig.Spec)) hostCl := test.NewFakeClient(t, config) @@ -215,7 +212,7 @@ func TestReconcile(t *testing.T) { // given config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)), + testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321)), //nolint:staticcheck // this will be removed once we also remove the deprecated method testconfig.Members().Default(defaultMemberConfig.Spec), testconfig.Members().SpecificPerMemberCluster("member1", specificMemberConfig.Spec)) hostCl := test.NewFakeClient(t, config) @@ -247,7 +244,7 @@ func TestReconcile(t *testing.T) { // given config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321))) + testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321))) //nolint:staticcheck // this will be removed once we also remove the deprecated method hostCl := test.NewFakeClient(t, config) hostCl.MockCreate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.CreateOption) error { return fmt.Errorf("create error") @@ -275,7 +272,7 @@ func TestReconcile(t *testing.T) { t.Run("sync failed", func(t *testing.T) { // given config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), testconfig.Members().Default(defaultMemberConfig.Spec), testconfig.Members().SpecificPerMemberCluster("missing-member", specificMemberConfig.Spec), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321))) + testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321))) //nolint:staticcheck // this will be removed once we also remove the deprecated method hostCl := test.NewFakeClient(t, config) members := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) controller := newController(t, hostCl, members) @@ -304,7 +301,7 @@ func TestReconcile(t *testing.T) { func TestWrapErrorWithUpdateStatus(t *testing.T) { // given config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321))) + testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 321))) //nolint:staticcheck // this will be removed once we also remove the deprecated method hostCl := test.NewFakeClient(t, config) members := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) controller := newController(t, hostCl, members) diff --git a/controllers/usersignup/approval_test.go b/controllers/usersignup/approval_test.go index 43d5ddba3..edde2d8e6 100644 --- a/controllers/usersignup/approval_test.go +++ b/controllers/usersignup/approval_test.go @@ -9,14 +9,16 @@ import ( "github.com/codeready-toolchain/host-operator/pkg/capacity" "github.com/codeready-toolchain/host-operator/pkg/metrics" . "github.com/codeready-toolchain/host-operator/test" + hspc "github.com/codeready-toolchain/host-operator/test/spaceprovisionerconfig" commonconfig "github.com/codeready-toolchain/toolchain-common/pkg/configuration" + "github.com/codeready-toolchain/toolchain-common/pkg/test" commontest "github.com/codeready-toolchain/toolchain-common/pkg/test" testconfig "github.com/codeready-toolchain/toolchain-common/pkg/test/config" + spc "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig" commonsignup "github.com/codeready-toolchain/toolchain-common/pkg/test/usersignup" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -45,15 +47,14 @@ func TestGetClusterIfApproved(t *testing.T) { toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval(). Enabled(true), - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + ) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -64,19 +65,17 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("with two clusters available, the second one has required cluster-role label", func(t *testing.T) { // given signup := commonsignup.NewUserSignup() + spc1 := hspc.NewEnabledValidSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval(). Enabled(true), ) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters( - NewMemberClusterWithoutClusterRoles(t, "member1", corev1.ConditionTrue), // no cluster-role label on this member - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue), // by default all member clusters will have the 'tenant' cluster-role - ) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -93,16 +92,13 @@ func TestGetClusterIfApproved(t *testing.T) { toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval(). Enabled(true)) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters( - // member1 doesn't have the cluster-role tenant but it's preferred one - NewMemberClusterWithoutClusterRoles(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue), // by default all member clusters will have the 'tenant' cluster-role - ) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -117,10 +113,9 @@ func TestGetClusterIfApproved(t *testing.T) { Enabled(true)) fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters() // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -130,12 +125,13 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("automatic approval not enabled and user not approved", func(t *testing.T) { // given - fakeClient := commontest.NewFakeClient(t, toolchainStatus, commonconfig.NewToolchainConfigObjWithReset(t)) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + fakeClient := commontest.NewFakeClient(t, toolchainStatus, commonconfig.NewToolchainConfigObjWithReset(t), spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -145,12 +141,13 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("ToolchainConfig not found and user not approved", func(t *testing.T) { // given - fakeClient := commontest.NewFakeClient(t, toolchainStatus) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -160,13 +157,14 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("ToolchainConfig not found and user manually approved without target cluster", func(t *testing.T) { // given - fakeClient := commontest.NewFakeClient(t, toolchainStatus) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) signup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -176,16 +174,14 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("automatic approval not enabled, user manually approved but no cluster has capacity", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds().ResourceCapacityThreshold(50), - ) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxMemoryUtilizationPercent(50)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxMemoryUtilizationPercent(50)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) signup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -195,17 +191,14 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("automatic approval not enabled, user manually approved and second cluster has capacity", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 2000)). - ResourceCapacityThreshold(62)) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(2000), spc.MaxMemoryUtilizationPercent(62)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxMemoryUtilizationPercent(62)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) signup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -215,15 +208,14 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("automatic approval not enabled, user manually approved, no cluster has capacity but targetCluster is specified", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000)) + spc2 := hspc.NewEnabledValidTenantSPC("member2") + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) signup := commonsignup.NewUserSignup(commonsignup.ApprovedManually(), commonsignup.WithTargetCluster("member1")) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.NoError(t, err) @@ -235,15 +227,13 @@ func TestGetClusterIfApproved(t *testing.T) { t.Run("unable to get ToolchainConfig", func(t *testing.T) { // given fakeClient := commontest.NewFakeClient(t, toolchainStatus) - InitializeCounters(t, toolchainStatus) fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { return fmt.Errorf("some error") } InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.EqualError(t, err, "unable to get ToolchainConfig: some error") @@ -261,15 +251,34 @@ func TestGetClusterIfApproved(t *testing.T) { return fakeClient.Client.Get(ctx, key, obj, opts...) } InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) // when - approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(clusters, fakeClient)) + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) // then require.EqualError(t, err, "unable to get the optimal target cluster: unable to read ToolchainStatus resource: some error") assert.False(t, approved) assert.Equal(t, unknown, clusterName) }) + + t.Run("unable to read SpaceProvisionerConfig", func(t *testing.T) { + // given + fakeClient := commontest.NewFakeClient(t, toolchainStatus, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + fakeClient.MockList = func(ctx context.Context, list runtimeclient.ObjectList, opts ...runtimeclient.ListOption) error { + if _, ok := list.(*toolchainv1alpha1.SpaceProvisionerConfigList); ok { + return fmt.Errorf("some error") + } + return fakeClient.Client.List(ctx, list, opts...) + } + InitializeCounters(t, toolchainStatus) + + // when + approved, clusterName, err := getClusterIfApproved(ctx, fakeClient, signup, capacity.NewClusterManager(test.HostOperatorNs, fakeClient)) + + // then + require.EqualError(t, err, "unable to get the optimal target cluster: failed to find the optimal space provisioner config: some error") + assert.False(t, approved) + assert.Equal(t, unknown, clusterName) + }) }) } diff --git a/controllers/usersignup/usersignup_controller_test.go b/controllers/usersignup/usersignup_controller_test.go index fe1231ea9..5eb8580eb 100644 --- a/controllers/usersignup/usersignup_controller_test.go +++ b/controllers/usersignup/usersignup_controller_test.go @@ -23,6 +23,7 @@ import ( tiertest "github.com/codeready-toolchain/host-operator/test/nstemplatetier" segmenttest "github.com/codeready-toolchain/host-operator/test/segment" spacebindingtest "github.com/codeready-toolchain/host-operator/test/spacebinding" + hspc "github.com/codeready-toolchain/host-operator/test/spaceprovisionerconfig" testusertier "github.com/codeready-toolchain/host-operator/test/usertier" "github.com/codeready-toolchain/toolchain-common/pkg/cluster" "github.com/codeready-toolchain/toolchain-common/pkg/condition" @@ -34,6 +35,7 @@ import ( murtest "github.com/codeready-toolchain/toolchain-common/pkg/test/masteruserrecord" testsocialevent "github.com/codeready-toolchain/toolchain-common/pkg/test/socialevent" spacetest "github.com/codeready-toolchain/toolchain-common/pkg/test/space" + spc "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig" commonsignup "github.com/codeready-toolchain/toolchain-common/pkg/test/usersignup" "github.com/gofrs/uuid" @@ -50,16 +52,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var baseNSTemplateTier = tiertest.NewNSTemplateTier("base", "dev", "stage") -var base2NSTemplateTier = tiertest.NewNSTemplateTier("base2", "dev", "stage") -var deactivate30Tier = testusertier.NewUserTier("deactivate30", 30) -var deactivate80Tier = testusertier.NewUserTier("deactivate80", 80) -var event = testsocialevent.NewSocialEvent(test.HostOperatorNs, commonsocialevent.NewName(), - testsocialevent.WithUserTier(deactivate80Tier.Name), - testsocialevent.WithSpaceTier(base2NSTemplateTier.Name)) +var ( + baseNSTemplateTier = tiertest.NewNSTemplateTier("base", "dev", "stage") + base2NSTemplateTier = tiertest.NewNSTemplateTier("base2", "dev", "stage") + deactivate30Tier = testusertier.NewUserTier("deactivate30", 30) + deactivate80Tier = testusertier.NewUserTier("deactivate80", 80) + event = testsocialevent.NewSocialEvent(test.HostOperatorNs, commonsocialevent.NewName(), + testsocialevent.WithUserTier(deactivate80Tier.Name), + testsocialevent.WithSpaceTier(base2NSTemplateTier.Name)) +) func TestUserSignupCreateMUROk(t *testing.T) { - member := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") logf.SetLogger(zap.New(zap.UseDevMode(true))) for testname, userSignup := range map[string]*toolchainv1alpha1.UserSignup{ "manually approved with valid activation annotation": commonsignup.NewUserSignup( @@ -107,7 +111,7 @@ func TestUserSignupCreateMUROk(t *testing.T) { // given defer counter.Reset() config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), config, userSignup, baseNSTemplateTier, deactivate30Tier, deactivate80Tier, event) + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, config, userSignup, baseNSTemplateTier, deactivate30Tier, deactivate80Tier, event) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "1,internal": 0, @@ -177,7 +181,7 @@ func TestUserSignupCreateMUROk(t *testing.T) { } func TestUserSignupCreateSpaceAndSpaceBindingOk(t *testing.T) { - member := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") logf.SetLogger(zap.New(zap.UseDevMode(true))) for testname, userSignup := range map[string]*toolchainv1alpha1.UserSignup{ "without skip space creation annotation": commonsignup.NewUserSignup( @@ -209,7 +213,7 @@ func TestUserSignupCreateSpaceAndSpaceBindingOk(t *testing.T) { mur := newMasterUserRecord(userSignup, "member1", deactivate30Tier.Name, "foo") mur.Labels = map[string]string{toolchainv1alpha1.MasterUserRecordOwnerLabelKey: userSignup.Name} - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, mur, baseNSTemplateTier, base2NSTemplateTier, deactivate30Tier, deactivate80Tier, event) + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, mur, baseNSTemplateTier, base2NSTemplateTier, deactivate30Tier, deactivate80Tier, event) // when res, err := r.Reconcile(context.TODO(), req) @@ -273,7 +277,7 @@ func TestUserSignupCreateSpaceAndSpaceBindingOk(t *testing.T) { func TestDeletingUserSignupShouldNotUpdateMetrics(t *testing.T) { // given - member := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") defer counter.Reset() logf.SetLogger(zap.New(zap.UseDevMode(true))) userSignup := commonsignup.NewUserSignup( @@ -281,7 +285,7 @@ func TestDeletingUserSignupShouldNotUpdateMetrics(t *testing.T) { commonsignup.BeingDeleted(), commonsignup.WithStateLabel(toolchainv1alpha1.UserSignupStateLabelValueNotReady), commonsignup.WithAnnotation(toolchainv1alpha1.UserSignupActivationCounterAnnotationKey, "2")) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "1,internal": 1, @@ -310,12 +314,11 @@ func TestDeletingUserSignupShouldNotUpdateMetrics(t *testing.T) { HaveMasterUserRecordsPerDomain(toolchainv1alpha1.Metric{ string(metrics.External): 12, }) - } func TestUserSignupVerificationRequiredMetric(t *testing.T) { // given - member := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") defer counter.Reset() logf.SetLogger(zap.New(zap.UseDevMode(true))) userSignup := commonsignup.NewUserSignup( @@ -323,7 +326,7 @@ func TestUserSignupVerificationRequiredMetric(t *testing.T) { ) // set verification required to true in spec only, status will be added during reconcile states.SetVerificationRequired(userSignup, true) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, baseNSTemplateTier) AssertMetricsCounterEquals(t, 0, metrics.UserSignupVerificationRequiredTotal) // nothing yet since not reconciled yet // when @@ -365,9 +368,9 @@ func TestUserSignupVerificationRequiredMetric(t *testing.T) { func TestUserSignupWithAutoApprovalWithoutTargetCluster(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "1,external": 1, @@ -508,8 +511,8 @@ func TestUserSignupWithMissingEmailAddressFails(t *testing.T) { userSignup := commonsignup.NewUserSignup() userSignup.Spec.IdentityClaims.Email = "" - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ @@ -558,8 +561,8 @@ func TestUserSignupWithInvalidEmailHashLabelFails(t *testing.T) { userSignup.Spec.IdentityClaims.Email = "foo@redhat.com" - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "1,external": 1, @@ -601,8 +604,8 @@ func TestUpdateOfApprovedLabelFails(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, fakeClient := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") + r, req, fakeClient := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) fakeClient.MockUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { return fmt.Errorf("some error") } @@ -649,8 +652,8 @@ func TestUserSignupWithMissingEmailHashLabelFails(t *testing.T) { userSignup.Spec.IdentityClaims.Email = "foo@redhat.com" userSignup.Labels = map[string]string{"toolchain.dev.openshift.com/approved": "false"} - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "1,external": 1, @@ -689,14 +692,13 @@ func TestUserSignupWithMissingEmailHashLabelFails(t *testing.T) { } func TestNonDefaultNSTemplateTier(t *testing.T) { - // given customNSTemplateTier := tiertest.NewNSTemplateTier("custom", "dev", "stage") customUserTier := testusertier.NewUserTier("custom", 120) config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), testconfig.Tiers().DefaultUserTier("custom"), testconfig.Tiers().DefaultSpaceTier("custom")) userSignup := commonsignup.NewUserSignup() - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, config, customNSTemplateTier, customUserTier) // use custom tier + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spaceProvisionerConfig, userSignup, config, customNSTemplateTier, customUserTier) // use custom tier commonconfig.ResetCache() // reset the config cache so that the update config is picked up InitializeCounters(t, NewToolchainStatus( @@ -773,12 +775,10 @@ func TestNonDefaultNSTemplateTier(t *testing.T) { Exists(). HasSpecTargetCluster("member1"). HasTier(customUserTier.Name) - }) } func TestUserSignupFailedMissingTier(t *testing.T) { - type variation struct { description string config *toolchainv1alpha1.ToolchainConfig @@ -823,14 +823,14 @@ func TestUserSignupFailedMissingTier(t *testing.T) { Status: corev1.ConditionTrue, Reason: "ApprovedAutomatically", }) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1") - objs := []runtime.Object{userSignup, v.config} + objs := []runtime.Object{userSignup, v.config, spaceProvisionerConfig} if strings.Contains(v.description, "spacetier") { // when testing missing spacetier then create mur and usertier so that the error is about space tier objs = append(objs, newMasterUserRecord(userSignup, "member-1", deactivate30Tier.Name, "foo")) objs = append(objs, deactivate30Tier) } - r, req, _ := prepareReconcile(t, userSignup.Name, ready, objs...) // the tier does not exist + r, req, _ := prepareReconcile(t, userSignup.Name, objs...) // the tier does not exist commonconfig.ResetCache() // reset the config cache so that the update config is picked up InitializeCounters(t, NewToolchainStatus( @@ -893,12 +893,13 @@ func TestUnapprovedUserSignupWhenNoClusterReady(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - notReady := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionFalse), - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionFalse)) - config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1))) - r, req, _ := prepareReconcile(t, userSignup.Name, notReady, userSignup, config, baseNSTemplateTier) + spc1 := spc.NewSpaceProvisionerConfig("member1Spc", test.HostOperatorNs, + spc.ReferencingToolchainCluster("member1"), spc.Enabled(true), spc.MaxNumberOfSpaces(1), spc.WithReadyConditionInvalid("intentionally invalid")) + spc2 := spc.NewSpaceProvisionerConfig("member2Spc", test.HostOperatorNs, + spc.ReferencingToolchainCluster("member2"), spc.Enabled(true), spc.WithReadyConditionInvalid("intentionally invalid")) + + config := commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, spc2, userSignup, config, baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "1,external": 2, @@ -956,13 +957,12 @@ func TestUnapprovedUserSignupWhenNoClusterReady(t *testing.T) { func TestUserSignupFailedNoClusterWithCapacityAvailable(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - noCapacity := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) + + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxMemoryUtilizationPercent(60)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxMemoryUtilizationPercent(60)) config := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.AutomaticApproval().Enabled(true), - testconfig.CapacityThresholds().ResourceCapacityThreshold(60)) - r, req, _ := prepareReconcile(t, userSignup.Name, noCapacity, userSignup, config, baseNSTemplateTier) + testconfig.AutomaticApproval().Enabled(true)) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, spc2, userSignup, config, baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1020,9 +1020,9 @@ func TestUserSignupFailedNoClusterWithCapacityAvailable(t *testing.T) { func TestUserSignupWithManualApprovalApproved(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.ApprovedManuallyAgo(time.Minute)) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) + spc1 := hspc.NewEnabledValidTenantSPC("member1") - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1155,9 +1155,9 @@ func TestUserSignupWithNoApprovalPolicyTreatedAsManualApproved(t *testing.T) { config := commonconfig.NewToolchainConfigObjWithReset(t) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) + spc1 := hspc.NewEnabledValidTenantSPC("member1") - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, baseNSTemplateTier, config, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, baseNSTemplateTier, config, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1229,7 +1229,7 @@ func TestUserSignupWithNoApprovalPolicyTreatedAsManualApproved(t *testing.T) { HasTier(baseNSTemplateTier.Name) t.Run("third reconcile", func(t *testing.T) { - //given + // given err = r.setSpaceToReady(mur.Name) require.NoError(t, err) // when @@ -1292,9 +1292,9 @@ func TestUserSignupWithNoApprovalPolicyTreatedAsManualApproved(t *testing.T) { func TestUserSignupWithManualApprovalNotApproved(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) + spc1 := hspc.NewEnabledValidTenantSPC("member1") - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1355,8 +1355,8 @@ func TestUserSignupWithAutoApprovalWithTargetCluster(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.WithTargetCluster("east")) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1493,7 +1493,7 @@ func TestUserSignupWithMissingApprovalPolicyTreatedAsManual(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.WithTargetCluster("east")) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1553,7 +1553,6 @@ type MurOrSpaceCreateFails struct { } func TestUserSignupMUROrSpaceOrSpaceBindingCreateFails(t *testing.T) { - for _, testcase := range []MurOrSpaceCreateFails{ { testName: "create mur error", @@ -1569,7 +1568,6 @@ func TestUserSignupMUROrSpaceOrSpaceBindingCreateFails(t *testing.T) { }, } { t.Run(testcase.testName, func(t *testing.T) { - // given userSignup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) @@ -1578,8 +1576,8 @@ func TestUserSignupMUROrSpaceOrSpaceBindingCreateFails(t *testing.T) { space := NewSpace(userSignup, "member1", "foo", "base") - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - initObjs := []runtime.Object{userSignup, baseNSTemplateTier, deactivate30Tier} + spc1 := hspc.NewEnabledValidTenantSPC("member1") + initObjs := []runtime.Object{userSignup, baseNSTemplateTier, deactivate30Tier, spc1} if testcase.testName == "create space error" { // mur must exist first, space is created on the reconcile after the mur is created initObjs = append(initObjs, mur) @@ -1587,7 +1585,7 @@ func TestUserSignupMUROrSpaceOrSpaceBindingCreateFails(t *testing.T) { // mur and space must exist first, spacebinding is created on the reconcile after the space is created initObjs = append(initObjs, mur, space) } - r, req, fakeClient := prepareReconcile(t, userSignup.Name, ready, initObjs...) + r, req, fakeClient := prepareReconcile(t, userSignup.Name, initObjs...) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1648,8 +1646,8 @@ func TestUserSignupMURReadFails(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, fakeClient := prepareReconcile(t, userSignup.Name, ready, userSignup) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, fakeClient := prepareReconcile(t, userSignup.Name, spc1, userSignup) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1694,8 +1692,8 @@ func TestUserSignupSetStatusApprovedByAdminFails(t *testing.T) { userSignup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) userSignup.Labels[toolchainv1alpha1.UserSignupStateLabelKey] = "approved" - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, fakeClient := prepareReconcile(t, userSignup.Name, ready, userSignup) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, fakeClient := prepareReconcile(t, userSignup.Name, spc1, userSignup) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1740,8 +1738,8 @@ func TestUserSignupSetStatusApprovedAutomaticallyFails(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, fakeClient := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, fakeClient := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1780,14 +1778,13 @@ func TestUserSignupSetStatusApprovedAutomaticallyFails(t *testing.T) { AssertMetricsCounterEquals(t, 0, metrics.UserSignupApprovedTotal) AssertMetricsCounterEquals(t, 1, metrics.UserSignupUniqueTotal) assert.Empty(t, userSignup.Status.Conditions) - } func TestUserSignupSetStatusNoClustersAvailableFails(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - r, req, fakeClient := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + r, req, fakeClient := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1830,7 +1827,6 @@ func TestUserSignupSetStatusNoClustersAvailableFails(t *testing.T) { assert.Equal(t, "pending", userSignup.Labels[toolchainv1alpha1.UserSignupStateLabelKey]) AssertMetricsCounterEquals(t, 0, metrics.UserSignupApprovedTotal) AssertMetricsCounterEquals(t, 1, metrics.UserSignupUniqueTotal) - } func TestUserSignupWithExistingMUROK(t *testing.T) { @@ -1859,8 +1855,8 @@ func TestUserSignupWithExistingMUROK(t *testing.T) { spacebinding := spacebindingtest.NewSpaceBinding("foo", "foo", "admin", userSignup.Name) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -1918,7 +1914,6 @@ func TestUserSignupWithExistingMUROK(t *testing.T) { "1,external": 1, "1,internal": 1, }) - }) } @@ -1937,8 +1932,8 @@ func TestUserSignupWithExistingMURDifferentUserIDOK(t *testing.T) { }, } - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, mur, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -2036,9 +2031,9 @@ func TestUserSignupWithExistingMURDifferentUserIDOK(t *testing.T) { func TestUserSignupPropagatedClaimsSynchronizedToMURWhenModified(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) + spc1 := hspc.NewEnabledValidTenantSPC("member1") - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( @@ -2084,7 +2079,7 @@ func TestUserSignupPropagatedClaimsSynchronizedToMURWhenModified(t *testing.T) { userSignup.Spec.IdentityClaims.PropagatedClaims.UserID = "314159265358979" // Reconcile the UserSignup again - r, req, _ = prepareReconcile(t, userSignup.Name, ready, userSignup, deactivate30Tier) + r, req, _ = prepareReconcile(t, userSignup.Name, spc1, userSignup, deactivate30Tier) res, err = r.Reconcile(context.TODO(), req) require.NoError(t, err) require.Equal(t, reconcile.Result{}, res) @@ -2101,8 +2096,8 @@ func TestUserSignupWithSpecialCharOK(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.WithUsername("foo#$%^bar@redhat.com")) - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -2172,7 +2167,7 @@ func TestUserSignupDeactivatedAfterMURCreated(t *testing.T) { spacebinding := spacebindingtest.NewSpaceBinding("john-doe", "john-doe", "admin", userSignup.Name) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, space, spacebinding, + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ @@ -2230,7 +2225,7 @@ func TestUserSignupDeactivatedAfterMURCreated(t *testing.T) { t.Run("when MUR doesn't exist, then the condition should be set to Deactivated", func(t *testing.T) { // given - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 2, @@ -2322,7 +2317,7 @@ func TestUserSignupDeactivatedAfterMURCreated(t *testing.T) { }, } - r, req, _ := prepareReconcile(t, userSignup2.Name, NewGetMemberClusters(), userSignup2, + r, req, _ := prepareReconcile(t, userSignup2.Name, userSignup2, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, existingNotification) @@ -2405,7 +2400,7 @@ func TestUserSignupFailedToCreateDeactivationNotification(t *testing.T) { t.Run("when the deactivation notification cannot be created", func(t *testing.T) { // given - r, req, fakeClient := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, + r, req, fakeClient := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ @@ -2517,8 +2512,8 @@ func TestUserSignupReactivateAfterDeactivated(t *testing.T) { Reason: "NotificationCRCreated", }, } - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.UserSignupsPerActivationAndDomainMetricKey, toolchainv1alpha1.Metric{ "2,internal": 11, // 11 users signed-up 2 times, including our user above, even though she is not active at the moment @@ -2607,7 +2602,7 @@ func TestUserSignupReactivateAfterDeactivated(t *testing.T) { Reason: "NotificationCRCreated", }, } - r, req, fakeClient := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, fakeClient := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 2, @@ -2722,7 +2717,7 @@ func TestUserSignupDeactivatedWhenMURAndSpaceAndSpaceBindingExists(t *testing.T) key := test.NamespacedName(test.HostOperatorNs, userSignup.Name) t.Run("when MUR exists and not deactivated, nothing should happen", func(t *testing.T) { - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) err := r.setSpaceToReady(mur.Name) // given space is ready require.NoError(t, err) @@ -2767,11 +2762,10 @@ func TestUserSignupDeactivatedWhenMURAndSpaceAndSpaceBindingExists(t *testing.T) }) t.Run("when UserSignup deactivated and MUR and Space and SpaceBinding exists, then they should be deleted", func(t *testing.T) { - // Given states.SetDeactivated(userSignup, true) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -2919,7 +2913,7 @@ func TestUserSignupDeactivatingNotificationCreated(t *testing.T) { userSignup.Labels[toolchainv1alpha1.UserSignupStateLabelKey] = "approved" key := test.NamespacedName(test.HostOperatorNs, userSignup.Name) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, mur, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ @@ -2983,7 +2977,7 @@ func TestUserSignupDeactivatingNotificationCreated(t *testing.T) { } // Prepare the reconciliation again, but this time include the notification that was previously created - r, req, _ = prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, ¬ifications.Items[0], + r, req, _ = prepareReconcile(t, userSignup.Name, userSignup, mur, ¬ifications.Items[0], commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) // Reconcile again @@ -3047,7 +3041,7 @@ func TestUserSignupBannedWithoutMURAndSpace(t *testing.T) { }, } - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, bannedUser, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, bannedUser, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3098,7 +3092,7 @@ func TestUserSignupVerificationRequired(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.VerificationRequired(0)) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3194,7 +3188,7 @@ func TestUserSignupBannedMURAndSpaceExists(t *testing.T) { spacebinding := spacebindingtest.NewSpaceBinding("foo", "foo", "admin", userSignup.Name) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, space, spacebinding, bannedUser, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, mur, space, spacebinding, bannedUser, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3297,7 +3291,7 @@ func TestUserSignupListBannedUsersFails(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - r, req, fakeClient := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, fakeClient := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3371,7 +3365,7 @@ func TestUserSignupDeactivatedButMURDeleteFails(t *testing.T) { spacebinding := spacebindingtest.NewSpaceBinding("alice-mayweather", "alice-mayweather", "admin", userSignup.Name) - r, req, fakeClient := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, fakeClient := prepareReconcile(t, userSignup.Name, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3391,7 +3385,6 @@ func TestUserSignupDeactivatedButMURDeleteFails(t *testing.T) { } t.Run("first reconcile", func(t *testing.T) { - // when _, err := r.Reconcile(context.TODO(), req) require.Error(t, err) @@ -3485,7 +3478,7 @@ func TestUserSignupDeactivatedButStatusUpdateFails(t *testing.T) { mur := murtest.NewMasterUserRecord(t, "john-doe", murtest.MetaNamespace(test.HostOperatorNs)) mur.Labels = map[string]string{toolchainv1alpha1.MasterUserRecordOwnerLabelKey: userSignup.Name} - r, req, fakeClient := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, mur, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, fakeClient := prepareReconcile(t, userSignup.Name, userSignup, mur, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3552,14 +3545,19 @@ func TestDeathBy100Signups(t *testing.T) { "Username length greater than maxlengthWithSuffix characters": {username: "longer-user-names@redhat.com", compliantUsername: "longer-user-names", replacedCompliantUsername: "longer-user-name"}, } { t.Run(testcase, func(t *testing.T) { - logf.SetLogger(zap.New(zap.UseDevMode(true))) userSignup := commonsignup.NewUserSignup( commonsignup.WithName(testusername.username), commonsignup.ApprovedManually()) + spc1 := hspc.NewEnabledValidTenantSPC("member1") initObjs := make([]runtime.Object, 0, 110) - initObjs = append(initObjs, userSignup, deactivate30Tier) - initObjs = append(initObjs, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + initObjs = append(initObjs, + userSignup, + deactivate30Tier, + commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), + baseNSTemplateTier, + spc1, + ) // create 100 MURs and Spaces that follow the naming pattern used by `generateCompliantUsername()`: `foo`, `foo-2`, ..., `foo-100` initObjs = append(initObjs, &toolchainv1alpha1.MasterUserRecord{ @@ -3580,10 +3578,7 @@ func TestDeathBy100Signups(t *testing.T) { spacetest.NewSpace(test.HostOperatorNs, fmt.Sprintf("%s-%d", testusername.replacedCompliantUsername, i))) } - initObjs = append(initObjs, baseNSTemplateTier) - - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, initObjs...) + r, req, _ := prepareReconcile(t, userSignup.Name, initObjs...) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 100, @@ -3699,8 +3694,8 @@ func TestGenerateUniqueCompliantUsername(t *testing.T) { userSignup.Annotations[toolchainv1alpha1.SkipAutoCreateSpaceAnnotationKey] = params.skipSpaceCreation - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, baseNSTemplateTier, + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, baseNSTemplateTier, deactivate30Tier, params.conflictingObject, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus()) @@ -3763,8 +3758,8 @@ func TestUserSignupWithMultipleExistingMURNotOK(t *testing.T) { }, } - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, mur, mur2, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, mur2, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3822,7 +3817,7 @@ func TestApprovedManuallyUserSignupWhenNoMembersAvailable(t *testing.T) { // given userSignup := commonsignup.NewUserSignup(commonsignup.ApprovedManually()) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3874,10 +3869,9 @@ func TestApprovedManuallyUserSignupWhenNoMembersAvailable(t *testing.T) { Status: corev1.ConditionFalse, Reason: "UserIsActive", }) - } -func prepareReconcile(t *testing.T, name string, getMemberClusters cluster.GetMemberClustersFunc, initObjs ...runtime.Object) (*Reconciler, reconcile.Request, *test.FakeClient) { +func prepareReconcile(t *testing.T, name string, initObjs ...runtime.Object) (*Reconciler, reconcile.Request, *test.FakeClient) { os.Setenv("WATCH_NAMESPACE", test.HostOperatorNs) metrics.Reset() @@ -3908,7 +3902,7 @@ func prepareReconcile(t *testing.T, name string, getMemberClusters cluster.GetMe Client: fakeClient, }, Scheme: s, - ClusterManager: capacity.NewClusterManager(getMemberClusters, fakeClient), + ClusterManager: capacity.NewClusterManager(test.HostOperatorNs, fakeClient), SegmentClient: segment.NewClient(segmenttest.NewClient()), } return r, newReconcileRequest(name), fakeClient @@ -3944,7 +3938,7 @@ func TestUsernameWithForbiddenPrefix(t *testing.T) { for _, name := range names { userSignup.Spec.IdentityClaims.PreferredUsername = fmt.Sprintf("%s%s", prefix, name) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -3989,7 +3983,7 @@ func TestUsernameWithForbiddenSuffixes(t *testing.T) { for _, name := range names { userSignup.Spec.IdentityClaims.PreferredUsername = fmt.Sprintf("%s%s", name, suffix) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -4041,7 +4035,7 @@ func TestChangedCompliantUsername(t *testing.T) { CompliantUsername: "foo-old", // outdated UserSignup CompliantUsername } // create the initial resources - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, baseNSTemplateTier, deactivate30Tier) InitializeCounters(t, NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.External): 1, @@ -4124,7 +4118,7 @@ func TestMigrateMur(t *testing.T) { t.Run("mur should be migrated", func(t *testing.T) { // given - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(), userSignup, baseNSTemplateTier, oldMur, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, userSignup, baseNSTemplateTier, oldMur, deactivate30Tier) InitializeCounters(t, NewToolchainStatus()) // when @@ -4136,7 +4130,6 @@ func TestMigrateMur(t *testing.T) { Exists(). HasTier(*deactivate30Tier). // tier name should be set HasLabelWithValue(toolchainv1alpha1.MasterUserRecordOwnerLabelKey, userSignup.Name) // other labels unchanged - }) } @@ -4333,14 +4326,12 @@ func TestUpdateMetricsByState(t *testing.T) { } func TestUserSignupLastTargetClusterAnnotation(t *testing.T) { - t.Run("last target cluster annotation is not initially set but added when mur is created", func(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() - members := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, members, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, spc2, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus()) // when @@ -4360,11 +4351,10 @@ func TestUserSignupLastTargetClusterAnnotation(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() userSignup.Annotations[toolchainv1alpha1.UserSignupLastTargetClusterAnnotationKey] = "member2" - members := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - // member2 cluster lacks capacity because the prepareReconcile only sets up the resource consumption for member1 so member2 is automatically excluded - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, members, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxMemoryUtilizationPercent(70)) + // member2 cluster lacks capacity because the prepareReconcile only sets up the resource consumption for member1 so member2 is automatically excluded + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxMemoryUtilizationPercent(75)) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, spc2, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus()) // when @@ -4382,10 +4372,9 @@ func TestUserSignupLastTargetClusterAnnotation(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() userSignup.Annotations[toolchainv1alpha1.UserSignupLastTargetClusterAnnotationKey] = "member2" - members := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, members, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, spc2, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus()) // set acceptable capacity for member2 cluster @@ -4413,8 +4402,8 @@ func TestUserSignupLastTargetClusterAnnotation(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() userSignup.Annotations[toolchainv1alpha1.UserSignupLastTargetClusterAnnotationKey] = "member2" - members := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, members, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) InitializeCounters(t, NewToolchainStatus()) // when @@ -4434,8 +4423,8 @@ func TestUserSignupLastTargetClusterAnnotation(t *testing.T) { // given userSignup := commonsignup.NewUserSignup() userSignupName := userSignup.Name - members := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, cl := prepareReconcile(t, userSignup.Name, members, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, cl := prepareReconcile(t, userSignup.Name, spc1, userSignup, baseNSTemplateTier, deactivate30Tier, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) cl.MockUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { s, ok := obj.(*toolchainv1alpha1.UserSignup) if ok && s.Annotations[toolchainv1alpha1.UserSignupLastTargetClusterAnnotationKey] == "member1" { @@ -4465,7 +4454,7 @@ func TestUserSignupLastTargetClusterAnnotation(t *testing.T) { } func TestUserSignupStatusNotReady(t *testing.T) { - member := NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue) + spc1 := hspc.NewEnabledValidTenantSPC("member1") logf.SetLogger(zap.New(zap.UseDevMode(true))) setup := func() (*toolchainv1alpha1.UserSignup, *toolchainv1alpha1.MasterUserRecord, *toolchainv1alpha1.Space, *toolchainv1alpha1.SpaceBinding) { @@ -4523,9 +4512,9 @@ func TestUserSignupStatusNotReady(t *testing.T) { } t.Run("until Space is provisioned", func(t *testing.T) { - //given + // given userSignup, mur, space, spacebinding := setup() - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) // when res, err := r.Reconcile(context.TODO(), req) require.NoError(t, err) @@ -4534,7 +4523,6 @@ func TestUserSignupStatusNotReady(t *testing.T) { err = r.Client.Get(context.TODO(), types.NamespacedName{Name: userSignup.Name, Namespace: req.Namespace}, userSignup) require.NoError(t, err) test.AssertConditionsMatch(t, userSignup.Status.Conditions, signupIncomplete...) - }) t.Run("when space is provisioned", func(t *testing.T) { @@ -4545,7 +4533,7 @@ func TestUserSignupStatusNotReady(t *testing.T) { Status: corev1.ConditionTrue, Reason: toolchainv1alpha1.SpaceProvisionedReason, }) - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) // when res, err := r.Reconcile(context.TODO(), req) // then @@ -4567,7 +4555,7 @@ func TestUserSignupStatusNotReady(t *testing.T) { Status: corev1.ConditionFalse, Reason: toolchainv1alpha1.SpaceUpdatingReason, }} - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) // when res, err := r.Reconcile(context.TODO(), req) require.NoError(t, err) @@ -4584,7 +4572,7 @@ func TestUserSignupStatusNotReady(t *testing.T) { userSignup, mur, space, spacebinding := setup() space.Status.Conditions = signupIncomplete userSignup.Status.Conditions = []toolchainv1alpha1.Condition{} - r, req, _ := prepareReconcile(t, userSignup.Name, NewGetMemberClusters(member), userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, mur, space, spacebinding, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier) // when res, err := r.Reconcile(context.TODO(), req) require.NoError(t, err) @@ -4662,8 +4650,8 @@ func TestUserReactivatingWhileOldSpaceExists(t *testing.T) { Reason: "UserIsActive", }, } - ready := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - r, req, _ := prepareReconcile(t, userSignup.Name, ready, userSignup, + spc1 := hspc.NewEnabledValidTenantSPC("member1") + r, req, _ := prepareReconcile(t, userSignup.Name, spc1, userSignup, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true)), baseNSTemplateTier, deactivate30Tier, mur, space) diff --git a/go.mod b/go.mod index 533da39a6..d7fd37b7f 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/codeready-toolchain/host-operator require ( - github.com/codeready-toolchain/api v0.0.0-20240227210924-371ddb054d87 - github.com/codeready-toolchain/toolchain-common v0.0.0-20240313081501-5cafefaa6598 + github.com/codeready-toolchain/api v0.0.0-20240322110702-5ab3840476e9 + github.com/codeready-toolchain/toolchain-common v0.0.0-20240322131000-8d44f7428e83 github.com/davecgh/go-spew v1.1.1 // indirect github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible diff --git a/go.sum b/go.sum index 8f2b17368..5b673a9f8 100644 --- a/go.sum +++ b/go.sum @@ -136,10 +136,10 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/codeready-toolchain/api v0.0.0-20240227210924-371ddb054d87 h1:eQLsrMqfjAzGfuO9t6pVxO4K6cUDKOMxEvl0ujQq/2I= -github.com/codeready-toolchain/api v0.0.0-20240227210924-371ddb054d87/go.mod h1:FO7kgXH1x1LqkF327D5a36u0WIrwjVCbeijPkzgwaZc= -github.com/codeready-toolchain/toolchain-common v0.0.0-20240313081501-5cafefaa6598 h1:06nit/nCQFVKp51ZtIOyY49ncmxEK5shJGTaM+Ogicw= -github.com/codeready-toolchain/toolchain-common v0.0.0-20240313081501-5cafefaa6598/go.mod h1:c2JxboVI7keMD5fx5bB7LwzowFYYTwbepJhzPWSYXVs= +github.com/codeready-toolchain/api v0.0.0-20240322110702-5ab3840476e9 h1:Lm7bFLrzfJzrUiRGVqtsSaZMpj+akLiR/fvAFjjE9gM= +github.com/codeready-toolchain/api v0.0.0-20240322110702-5ab3840476e9/go.mod h1:cfNN6YPX4TORvhhZXMSjSPesqAHlB3nD/WAfGe4WLKQ= +github.com/codeready-toolchain/toolchain-common v0.0.0-20240322131000-8d44f7428e83 h1:j+3snE8RGTyB5MdwPUqIfmAm9C2aScCni+ma1EveC4c= +github.com/codeready-toolchain/toolchain-common v0.0.0-20240322131000-8d44f7428e83/go.mod h1:OJ3L9aaTRMGjxr2WeH/9l6m5OjExwEK3Bp/+P+efoGg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= diff --git a/main.go b/main.go index 193c898c1..d69accafd 100644 --- a/main.go +++ b/main.go @@ -280,7 +280,7 @@ func main() { // nolint:gocyclo Namespace: namespace, Scheme: mgr.GetScheme(), SegmentClient: segmentClient, - ClusterManager: capacity.NewClusterManager(commoncluster.GetMemberClusters, mgr.GetClient()), + ClusterManager: capacity.NewClusterManager(namespace, mgr.GetClient()), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "UserSignup") os.Exit(1) @@ -335,7 +335,7 @@ func main() { // nolint:gocyclo if err = (&spacecompletion.Reconciler{ Client: mgr.GetClient(), Namespace: namespace, - ClusterManager: capacity.NewClusterManager(commoncluster.GetMemberClusters, mgr.GetClient()), + ClusterManager: capacity.NewClusterManager(namespace, mgr.GetClient()), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SpaceCompletion") os.Exit(1) diff --git a/pkg/capacity/manager.go b/pkg/capacity/manager.go index 309b94785..bea0f1a3c 100644 --- a/pkg/capacity/manager.go +++ b/pkg/capacity/manager.go @@ -2,48 +2,50 @@ package capacity import ( "context" + "fmt" "sort" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "github.com/codeready-toolchain/host-operator/controllers/toolchainconfig" "github.com/codeready-toolchain/host-operator/pkg/counter" "github.com/codeready-toolchain/toolchain-common/pkg/cluster" + "github.com/codeready-toolchain/toolchain-common/pkg/condition" - "github.com/pkg/errors" "k8s.io/apimachinery/pkg/types" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func hasNotReachedMaxNumberOfSpacesThreshold(config toolchainconfig.ToolchainConfig, counts counter.Counts) cluster.Condition { - return func(cluster *cluster.CachedToolchainCluster) bool { - numberOfSpaces := counts.SpacesPerClusterCounts[cluster.Name] - threshold := config.CapacityThresholds().MaxNumberOfSpacesSpecificPerMemberCluster()[cluster.Name] +type ( + spaceProvisionerConfigPredicate func(*toolchainv1alpha1.SpaceProvisionerConfig) bool +) + +func hasNotReachedMaxNumberOfSpacesThreshold(counts counter.Counts) spaceProvisionerConfigPredicate { + return func(spc *toolchainv1alpha1.SpaceProvisionerConfig) bool { + numberOfSpaces := uint(counts.SpacesPerClusterCounts[spc.Spec.ToolchainCluster]) + threshold := spc.Spec.CapacityThresholds.MaxNumberOfSpaces return threshold == 0 || numberOfSpaces < threshold } } -func hasEnoughResources(config toolchainconfig.ToolchainConfig, status *toolchainv1alpha1.ToolchainStatus) cluster.Condition { - return func(cluster *cluster.CachedToolchainCluster) bool { - threshold, found := config.CapacityThresholds().ResourceCapacityThresholdSpecificPerMemberCluster()[cluster.Name] - if !found { - threshold = config.CapacityThresholds().ResourceCapacityThresholdDefault() - } +func hasEnoughMemoryCapacity(status *toolchainv1alpha1.ToolchainStatus) spaceProvisionerConfigPredicate { + return func(spc *toolchainv1alpha1.SpaceProvisionerConfig) bool { + threshold := spc.Spec.CapacityThresholds.MaxMemoryUtilizationPercent if threshold == 0 { return true } for _, memberStatus := range status.Status.Members { - if memberStatus.ClusterName == cluster.Name { - return hasMemberREnoughResources(memberStatus, threshold) + if memberStatus.ClusterName == spc.Spec.ToolchainCluster { + return hasMemberEnoughMemoryCapacity(memberStatus, threshold) } } return false } } -func hasMemberREnoughResources(memberStatus toolchainv1alpha1.Member, threshold int) bool { +func hasMemberEnoughMemoryCapacity(memberStatus toolchainv1alpha1.Member, threshold uint) bool { if len(memberStatus.MemberStatus.ResourceUsage.MemoryUsagePerNodeRole) > 0 { for _, usagePerNode := range memberStatus.MemberStatus.ResourceUsage.MemoryUsagePerNodeRole { - if usagePerNode >= threshold { + if uint(usagePerNode) >= threshold { return false } } @@ -52,17 +54,52 @@ func hasMemberREnoughResources(memberStatus toolchainv1alpha1.Member, threshold return false } -func NewClusterManager(getMemberClusters cluster.GetMemberClustersFunc, cl runtimeclient.Client) *ClusterManager { +func isProvisioningEnabled() spaceProvisionerConfigPredicate { + return func(spc *toolchainv1alpha1.SpaceProvisionerConfig) bool { + return spc.Spec.Enabled + } +} + +func isReady() spaceProvisionerConfigPredicate { + return func(spc *toolchainv1alpha1.SpaceProvisionerConfig) bool { + return condition.IsTrue(spc.Status.Conditions, toolchainv1alpha1.ConditionReady) + } +} + +func hasPlacementRoles(placementRoles []string) spaceProvisionerConfigPredicate { + return func(spc *toolchainv1alpha1.SpaceProvisionerConfig) bool { + if len(placementRoles) == 0 { + // by default it should pick the `tenant` placement role, if no specific placement role was provided + placementRoles = []string{cluster.RoleLabel(cluster.Tenant)} + } + + // filter member cluster having the required placement role + placementCheck: + for _, placement := range placementRoles { + for _, requiredPlacement := range spc.Spec.PlacementRoles { + if requiredPlacement == placement { + continue placementCheck + } + } + return false + } + + // all placement roles were matched + return true + } +} + +func NewClusterManager(namespace string, cl runtimeclient.Client) *ClusterManager { return &ClusterManager{ - getMemberClusters: getMemberClusters, - client: cl, + namespace: namespace, + client: cl, } } type ClusterManager struct { - getMemberClusters cluster.GetMemberClustersFunc - client runtimeclient.Client - lastUsed string + namespace string + client runtimeclient.Client + lastUsed string } // OptimalTargetClusterFilter is used by GetOptimalTargetCluster @@ -89,41 +126,55 @@ type OptimalTargetClusterFilter struct { // If the preferredCluster is provided and it is also one of the available clusters, then the same name is returned. // In case the preferredCluster was not provided or not found/available and the clusterRoles are provided then the candidates optimal cluster pool will be made out by only those matching the labels, if any available. func (b *ClusterManager) GetOptimalTargetCluster(ctx context.Context, optimalClusterFilter OptimalTargetClusterFilter) (string, error) { - config, err := toolchainconfig.GetToolchainConfig(b.client) - if err != nil { - return "", errors.Wrapf(err, "unable to get ToolchainConfig") - } - counts, err := counter.GetCounts() if err != nil { - return "", errors.Wrapf(err, "unable to get the number of provisioned spaces") + return "", fmt.Errorf("unable to get the number of provisioned spaces: %w", err) } status := &toolchainv1alpha1.ToolchainStatus{} if err := b.client.Get(ctx, types.NamespacedName{Namespace: optimalClusterFilter.ToolchainStatusNamespace, Name: toolchainconfig.ToolchainStatusName}, status); err != nil { - return "", errors.Wrapf(err, "unable to read ToolchainStatus resource") + return "", fmt.Errorf("unable to read ToolchainStatus resource: %w", err) + } + optimalSpaceProvisioners, err := b.getOptimalTargetClusters( + ctx, + optimalClusterFilter.PreferredCluster, + isReady(), + isProvisioningEnabled(), + hasPlacementRoles(optimalClusterFilter.ClusterRoles), + hasNotReachedMaxNumberOfSpacesThreshold(counts), + hasEnoughMemoryCapacity(status)) + if err != nil { + return "", fmt.Errorf("failed to find the optimal space provisioner config: %w", err) + } + + if len(optimalSpaceProvisioners) == 0 { + return "", nil } - optimalTargetClusters := getOptimalTargetClusters(optimalClusterFilter.PreferredCluster, b.getMemberClusters, optimalClusterFilter.ClusterRoles, hasNotReachedMaxNumberOfSpacesThreshold(config, counts), hasEnoughResources(config, status)) - if len(optimalTargetClusters) == 1 { - return optimalTargetClusters[0], nil + if len(optimalSpaceProvisioners) == 1 { + return optimalSpaceProvisioners[0].Spec.ToolchainCluster, nil } - for _, cluster := range optimalTargetClusters { - if cluster == b.lastUsed { - provisioned := counts.SpacesPerClusterCounts[cluster] + for _, spc := range optimalSpaceProvisioners { + clusterName := spc.Spec.ToolchainCluster + if clusterName == b.lastUsed { + provisioned := counts.SpacesPerClusterCounts[clusterName] if provisioned%50 != 0 { - return cluster, nil + return clusterName, nil } } } - sort.Slice(optimalTargetClusters, func(i, j int) bool { - provisioned1 := counts.SpacesPerClusterCounts[optimalTargetClusters[i]] - threshold1 := config.CapacityThresholds().MaxNumberOfSpacesSpecificPerMemberCluster()[optimalTargetClusters[i]] + sort.Slice(optimalSpaceProvisioners, func(i, j int) bool { + spc1 := optimalSpaceProvisioners[i] + cluster1 := spc1.Spec.ToolchainCluster + provisioned1 := counts.SpacesPerClusterCounts[cluster1] + threshold1 := spc1.Spec.CapacityThresholds.MaxNumberOfSpaces - provisioned2 := counts.SpacesPerClusterCounts[optimalTargetClusters[j]] - threshold2 := config.CapacityThresholds().MaxNumberOfSpacesSpecificPerMemberCluster()[optimalTargetClusters[j]] + spc2 := optimalSpaceProvisioners[j] + cluster2 := spc2.Spec.ToolchainCluster + provisioned2 := counts.SpacesPerClusterCounts[cluster2] + threshold2 := spc2.Spec.CapacityThresholds.MaxNumberOfSpaces // Let's round the number of provisioned users down to the closest multiple of 50 // This is a trick we need to do before comparing the capacity, so we can distribute the users in batches by 50 (if the clusters have the same limit) @@ -134,64 +185,52 @@ func (b *ClusterManager) GetOptimalTargetCluster(ctx context.Context, optimalClu return float64(provisioned1By50)/float64(threshold1) < float64(provisioned2By50)/float64(threshold2) }) - b.lastUsed = optimalTargetClusters[0] - return optimalTargetClusters[0], nil + b.lastUsed = optimalSpaceProvisioners[0].Spec.ToolchainCluster + return b.lastUsed, nil +} + +func matches(spc *toolchainv1alpha1.SpaceProvisionerConfig, predicates []spaceProvisionerConfigPredicate) bool { + for _, p := range predicates { + if !p(spc) { + return false + } + } + + return true } // getOptimalTargetClusters checks if a preferred target cluster was provided and available from the cluster pool. // If the preferred target cluster was not provided or not available, but a list of clusterRoles was provided, then it filters only the available clusters matching all those roles. // If no cluster roles were provided then it returns all the available clusters. -// The function returns a slice with an empty string if not optimal target clusters where found. -func getOptimalTargetClusters(preferredCluster string, getMemberClusters cluster.GetMemberClustersFunc, clusterRoles []string, conditions ...cluster.Condition) []string { - emptyTargetCluster := []string{""} - // Automatic cluster selection based on cluster readiness - members := getMemberClusters(append(conditions, cluster.Ready)...) - if len(members) == 0 { - return emptyTargetCluster - } - - // extract only names of the available clusters - var memberNames []string - for _, member := range members { - // if cluster-role labels were provided, it will check for matching on the member labels - // if no clusterRoles labels are required, then the function will return all member cluster with the `tenant` cluster role label - if hasClusterRoles(clusterRoles, member) { - memberNames = append(memberNames, member.Name) +// The function returns a slice of matching SpaceProvisionerConfigs. If there are no matches, the empty slice is represented by a nil value (which is the default value in Go). +func (b *ClusterManager) getOptimalTargetClusters(ctx context.Context, preferredCluster string, predicates ...spaceProvisionerConfigPredicate) ([]toolchainv1alpha1.SpaceProvisionerConfig, error) { + list := &toolchainv1alpha1.SpaceProvisionerConfigList{} + if err := b.client.List(ctx, list, runtimeclient.InNamespace(b.namespace)); err != nil { + return nil, err + } + + matching := make([]toolchainv1alpha1.SpaceProvisionerConfig, 0, len(list.Items)) + + for _, spc := range list.Items { + spc := spc + if matches(&spc, predicates) { + matching = append(matching, spc) } } - // return empty string if no members available with roles - if len(memberNames) == 0 { - return emptyTargetCluster + if len(matching) == 0 { + return nil, nil } // if the preferred cluster is provided and it is also one of the available clusters, then the same name is returned, otherwise, it returns the first available one if preferredCluster != "" { - for _, memberName := range memberNames { - if preferredCluster == memberName { - return []string{memberName} + for _, member := range matching { + if preferredCluster == member.Spec.ToolchainCluster { + return []toolchainv1alpha1.SpaceProvisionerConfig{member}, nil } } } // return the member names in case some were found - return memberNames -} - -func hasClusterRoles(clusterRoles []string, member *cluster.CachedToolchainCluster) bool { - if len(clusterRoles) == 0 { - // by default it should pick the `tenant` cluster role, if no specific cluster role was provided - clusterRoles = []string{cluster.RoleLabel(cluster.Tenant)} - } - - // filter member cluster having the required cluster role - for _, clusterRoleLabel := range clusterRoles { - if _, hasRole := member.Labels[clusterRoleLabel]; !hasRole { - // missing cluster role - return false - } - } - - // all cluster roles were matched - return true + return matching, nil } diff --git a/pkg/capacity/manager_test.go b/pkg/capacity/manager_test.go index 52ba9f14f..d63167f6a 100644 --- a/pkg/capacity/manager_test.go +++ b/pkg/capacity/manager_test.go @@ -2,6 +2,7 @@ package capacity_test import ( "context" + "errors" "fmt" "testing" @@ -10,14 +11,15 @@ import ( "github.com/codeready-toolchain/host-operator/pkg/counter" "github.com/codeready-toolchain/host-operator/pkg/metrics" . "github.com/codeready-toolchain/host-operator/test" + hspc "github.com/codeready-toolchain/host-operator/test/spaceprovisionerconfig" "github.com/codeready-toolchain/toolchain-common/pkg/cluster" commonconfig "github.com/codeready-toolchain/toolchain-common/pkg/configuration" commontest "github.com/codeready-toolchain/toolchain-common/pkg/test" testconfig "github.com/codeready-toolchain/toolchain-common/pkg/test/config" + spc "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -40,17 +42,12 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with one cluster and enough capacity", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spaceProvisionerConfig := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spaceProvisionerConfig) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) - // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -64,16 +61,15 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with three clusters and enough capacity in all of them so it returns the with more capacity (the first one)", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 10000), testconfig.PerMemberCluster("member2", 300), testconfig.PerMemberCluster("member3", 400)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(10000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(300), spc.MaxMemoryUtilizationPercent(75)) + spc3 := hspc.NewEnabledValidTenantSPC("member3", spc.MaxNumberOfSpaces(400), spc.MaxMemoryUtilizationPercent(80)) + + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2, spc3) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member3", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -87,16 +83,15 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with three clusters and enough capacity in all of them so it returns the with more capacity (the third one)", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000), testconfig.PerMemberCluster("member2", 1000), testconfig.PerMemberCluster("member3", 2000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + spc3 := hspc.NewEnabledValidTenantSPC("member3", spc.MaxNumberOfSpaces(2000), spc.MaxMemoryUtilizationPercent(80)) + + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2, spc3) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member3", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -109,16 +104,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters and enough capacity in both of them, but the second one is the preferred", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ PreferredCluster: "member2", @@ -133,16 +125,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters where the first one reaches resource threshold", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 60), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(60)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -156,16 +145,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters where the first one reaches max number of Spaces, so the second one is returned even when the first is defined as the preferred one", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 700), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 90), testconfig.PerMemberCluster("member2", 95))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(700), spc.MaxMemoryUtilizationPercent(90)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(95)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ PreferredCluster: "member1", @@ -180,16 +166,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters, none of them is returned since it reaches max number of Spaces, no matter what is defined as preferred", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1), testconfig.PerMemberCluster("member2", 1)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 60), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1), spc.MaxMemoryUtilizationPercent(60)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1), spc.MaxMemoryUtilizationPercent(95)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ PreferredCluster: "member2", @@ -204,16 +187,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters but only the second one has enough capacity - using the default values", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(). - ResourceCapacityThreshold(62)) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxMemoryUtilizationPercent(62)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxMemoryUtilizationPercent(62)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -227,16 +207,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters but none of them has enough capacity - using the default memory values", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(). - ResourceCapacityThreshold(1)) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxMemoryUtilizationPercent(1)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxMemoryUtilizationPercent(1)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -250,16 +227,13 @@ func TestGetOptimalTargetCluster(t *testing.T) { t.Run("with two clusters and enough capacity in both of them but first one is not ready", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionFalse), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -271,21 +245,15 @@ func TestGetOptimalTargetCluster(t *testing.T) { assert.Equal(t, "member2", clusterName) }) - t.Run("with two clusters and enough capacity in both of them but passing specific cluster-role label", func(t *testing.T) { + t.Run("with two clusters and enough capacity in both of them but passing specific placement-role", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1000), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 70), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithoutClusterRoles(t, "member2", corev1.ConditionTrue), // member2 has capacity but doesn't have the required cluster role - ) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -298,21 +266,15 @@ func TestGetOptimalTargetCluster(t *testing.T) { assert.Equal(t, "member1", clusterName) // only member one has required label }) - t.Run("with two clusters and not enough capacity on the cluster with specific cluster-role label", func(t *testing.T) { + t.Run("with two clusters and not enough capacity on the cluster with specific placement-role", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member1", 1), testconfig.PerMemberCluster("member2", 1000)). - ResourceCapacityThreshold(80, testconfig.PerMemberCluster("member1", 1), testconfig.PerMemberCluster("member2", 75))) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1), spc.MaxMemoryUtilizationPercent(1)) + spc2 := hspc.NewEnabledValidSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithoutClusterRoles(t, "member2", corev1.ConditionTrue), // member2 has capacity but doesn't have the required cluster role - ) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -325,21 +287,41 @@ func TestGetOptimalTargetCluster(t *testing.T) { assert.Equal(t, "", clusterName) // only member one has required label but no capacity }) - t.Run("with two clusters, the preferred one is returned if it has the required cluster-roles", func(t *testing.T) { + t.Run("with two clusters, the preferred one is returned if it has the required placement-roles", func(t *testing.T) { // given - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(75)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters( - NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), - NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue)) // this is set as preferred // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ - PreferredCluster: "member2", // request specifically this member eve if it doesn't match the cluster-roles from below - ClusterRoles: []string{cluster.RoleLabel(cluster.Tenant)}, // set + PreferredCluster: "member2", // request specifically this member eve if it doesn't match the cluster-roles from below + ClusterRoles: []string{spc.PlacementRole("tenant")}, // set + ToolchainStatusNamespace: commontest.HostOperatorNs, + }, + ) + + // then + require.NoError(t, err) + assert.Equal(t, "member2", clusterName) + }) + + // given + t.Run("choose one of the configured clusters because the preferred one is missing the SPC", func(t *testing.T) { + spc1 := hspc.NewEnabledValidTenantSPC("member1", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(1000), spc.MaxMemoryUtilizationPercent(70)) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2) + InitializeCounters(t, toolchainStatus) + + // when + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( + ctx, + capacity.OptimalTargetClusterFilter{ + PreferredCluster: "member3", // request specifically this member eve if it doesn't match the cluster-roles from below + ClusterRoles: []string{spc.PlacementRole("tenant")}, // set ToolchainStatusNamespace: commontest.HostOperatorNs, }, ) @@ -350,18 +332,20 @@ func TestGetOptimalTargetCluster(t *testing.T) { }) t.Run("failures", func(t *testing.T) { - t.Run("unable to get ToolchainConfig", func(t *testing.T) { + t.Run("unable to list SpaceProvisionerConfigs", func(t *testing.T) { // given fakeClient := commontest.NewFakeClient(t, toolchainStatus) InitializeCounters(t, toolchainStatus) - fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { - return fmt.Errorf("some error") + fakeClient.MockList = func(ctx context.Context, list runtimeclient.ObjectList, opts ...runtimeclient.ListOption) error { + if _, ok := list.(*toolchainv1alpha1.SpaceProvisionerConfigList); ok { + return errors.New("some error") + } + return fakeClient.Client.List(ctx, list, opts...) } InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -369,7 +353,7 @@ func TestGetOptimalTargetCluster(t *testing.T) { ) // then - require.EqualError(t, err, "unable to get ToolchainConfig: some error") + require.EqualError(t, err, "failed to find the optimal space provisioner config: some error") assert.Equal(t, "", clusterName) }) @@ -383,10 +367,9 @@ func TestGetOptimalTargetCluster(t *testing.T) { return fakeClient.Client.Get(ctx, key, obj, opts...) } InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( ctx, capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -403,12 +386,10 @@ func TestGetOptimalTargetCluster(t *testing.T) { func TestGetOptimalTargetClusterInBatchesBy50WhenTwoClusterHaveTheSameUsage(t *testing.T) { // given ctx := context.TODO() - for _, limit := range []int{800, 1000, 1234, 2500, 10000} { + for _, limit := range []uint{800, 1000, 1234, 2500, 10000} { t.Run(fmt.Sprintf("for the given limit of max number of spaces per cluster: %d", limit), func(t *testing.T) { - for _, numberOfSpaces := range []int{0, 8, 50, 88, 100, 123, 555} { t.Run(fmt.Sprintf("when there is a number of spaces at the very beginning %d", numberOfSpaces), func(t *testing.T) { - toolchainStatus := NewToolchainStatus( WithMetric(toolchainv1alpha1.MasterUserRecordsPerDomainMetricKey, toolchainv1alpha1.Metric{ string(metrics.Internal): 1000, @@ -421,14 +402,13 @@ func TestGetOptimalTargetClusterInBatchesBy50WhenTwoClusterHaveTheSameUsage(t *t WithMember("member3", WithSpaceCount(numberOfSpaces), WithNodeRoleUsage("worker", 55), WithNodeRoleUsage("master", 50))) // member2 and member3 have the same capacity left and the member1 is full, so no one can be provisioned there - toolchainConfig := commonconfig.NewToolchainConfigObjWithReset(t, - testconfig.CapacityThresholds(). - MaxNumberOfSpaces(testconfig.PerMemberCluster("member2", limit), testconfig.PerMemberCluster("member3", limit))) + spc1 := hspc.NewEnabledValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2", spc.MaxNumberOfSpaces(limit)) + spc3 := hspc.NewEnabledValidTenantSPC("member3", spc.MaxNumberOfSpaces(limit)) - fakeClient := commontest.NewFakeClient(t, toolchainStatus, toolchainConfig) + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2, spc3) InitializeCounters(t, toolchainStatus) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member2", corev1.ConditionTrue), NewMemberClusterWithTenantRole(t, "member3", corev1.ConditionTrue)) - clusterBalancer := capacity.NewClusterManager(clusters, fakeClient) + clusterBalancer := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient) // now run in 4 cycles and expect that the users will be provisioned in batches of 50 member2CurrentCount := numberOfSpaces @@ -439,7 +419,7 @@ func TestGetOptimalTargetClusterInBatchesBy50WhenTwoClusterHaveTheSameUsage(t *t // this 50 users should go into member2 - it will be always 50 for i := 0; i < member2MissingTo50; i++ { t.Run(fmt.Sprintf("cycle %d user %d for member2", cycle, i), func(t *testing.T) { - //given + // given // even when the counter of the other member is decremented, it should still use the last used one // but we can decrement it only in the second cycle when the member3 has at least 50 Spaces if i == 2 && cycle > 1 { @@ -542,10 +522,9 @@ func TestGetOptimalTargetClusterWhenCounterIsNotInitialized(t *testing.T) { toolchainStatus := NewToolchainStatus( WithMember("member1", WithNodeRoleUsage("worker", 68), WithNodeRoleUsage("master", 65))) fakeClient := commontest.NewFakeClient(t, toolchainStatus, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) - clusters := NewGetMemberClusters(NewMemberClusterWithTenantRole(t, "member1", corev1.ConditionTrue)) // when - clusterName, err := capacity.NewClusterManager(clusters, fakeClient).GetOptimalTargetCluster( + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( context.TODO(), capacity.OptimalTargetClusterFilter{ ToolchainStatusNamespace: commontest.HostOperatorNs, @@ -556,3 +535,61 @@ func TestGetOptimalTargetClusterWhenCounterIsNotInitialized(t *testing.T) { require.EqualError(t, err, "unable to get the number of provisioned spaces: counter is not initialized") assert.Equal(t, "", clusterName) } + +func TestGetOptimalTargetClusterWithSpaceProvisionerConfig(t *testing.T) { + t.Run("explicitly disabled", func(t *testing.T) { + // given + toolchainStatus := NewToolchainStatus( + WithMember("member1", WithNodeRoleUsage("worker", 68), WithNodeRoleUsage("master", 65)), + WithMember("member2", WithNodeRoleUsage("worker", 68), WithNodeRoleUsage("master", 65))) + + spc1 := hspc.NewValidTenantSPC("member1") + spc2 := hspc.NewEnabledValidTenantSPC("member2") + + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + InitializeCounters(t, toolchainStatus) + + // when + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( + context.TODO(), + capacity.OptimalTargetClusterFilter{ + ToolchainStatusNamespace: commontest.HostOperatorNs, + }, + ) + + // then + require.NoError(t, err) + assert.Equal(t, "member2", clusterName) + }) + t.Run("not ready", func(t *testing.T) { + // given + toolchainStatus := NewToolchainStatus( + WithMember("member1", WithNodeRoleUsage("worker", 68), WithNodeRoleUsage("master", 65)), + WithMember("member2", WithNodeRoleUsage("worker", 68), WithNodeRoleUsage("master", 65))) + + spc1 := spc.NewSpaceProvisionerConfig( + "member1Spc", + commontest.HostOperatorNs, + spc.ReferencingToolchainCluster("member1"), + spc.WithReadyConditionInvalid("because we're testing it"), + spc.Enabled(true), + spc.WithPlacementRoles(cluster.RoleLabel(cluster.Role("tenant")))) + + spc2 := hspc.NewEnabledValidTenantSPC("member2") + + fakeClient := commontest.NewFakeClient(t, toolchainStatus, spc1, spc2, commonconfig.NewToolchainConfigObjWithReset(t, testconfig.AutomaticApproval().Enabled(true))) + InitializeCounters(t, toolchainStatus) + + // when + clusterName, err := capacity.NewClusterManager(commontest.HostOperatorNs, fakeClient).GetOptimalTargetCluster( + context.TODO(), + capacity.OptimalTargetClusterFilter{ + ToolchainStatusNamespace: commontest.HostOperatorNs, + }, + ) + + // then + require.NoError(t, err) + assert.Equal(t, "member2", clusterName) + }) +} diff --git a/test/spaceprovisionerconfig/util.go b/test/spaceprovisionerconfig/util.go new file mode 100644 index 000000000..dcb8fff18 --- /dev/null +++ b/test/spaceprovisionerconfig/util.go @@ -0,0 +1,35 @@ +package spaceprovisionerconfig + +import ( + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-common/pkg/test" + . "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig" +) + +func NewEnabledValidTenantSPC(referencedToolchainCluster string, opts ...CreateOption) *toolchainv1alpha1.SpaceProvisionerConfig { + return NewSpaceProvisionerConfig(referencedToolchainCluster+"Spc", test.HostOperatorNs, + append(opts, + Enabled(true), + WithReadyConditionValid(), + ReferencingToolchainCluster(referencedToolchainCluster), + WithPlacementRoles(PlacementRole("tenant")))..., + ) +} + +func NewValidTenantSPC(referencedToolchainCluster string, opts ...CreateOption) *toolchainv1alpha1.SpaceProvisionerConfig { + return NewSpaceProvisionerConfig(referencedToolchainCluster+"Spc", test.HostOperatorNs, + append(opts, + WithReadyConditionValid(), + ReferencingToolchainCluster(referencedToolchainCluster), + WithPlacementRoles(PlacementRole("tenant")))..., + ) +} + +func NewEnabledValidSPC(referencedToolchainCluster string, opts ...CreateOption) *toolchainv1alpha1.SpaceProvisionerConfig { + return NewSpaceProvisionerConfig(referencedToolchainCluster+"Spc", test.HostOperatorNs, + append(opts, + Enabled(true), + WithReadyConditionValid(), + ReferencingToolchainCluster(referencedToolchainCluster))..., + ) +}