From 555f4df0ac5bddaeef680acb95ef919d70f11d35 Mon Sep 17 00:00:00 2001 From: erdrix Date: Mon, 9 Mar 2020 12:02:57 +0100 Subject: [PATCH 1/3] implement data configuration at DC level & unit tests --- .../db.orange.com_cassandraclusters_crd.yaml | 9 ++ .../db/v1alpha1/cassandracluster_types.go | 75 ++++++++++++++ pkg/controller/cassandracluster/generator.go | 14 +-- .../cassandracluster/generator_test.go | 49 +++++++--- pkg/controller/cassandracluster/reconcile.go | 38 ++++--- .../cassandracluster-2DC-default.yaml | 98 ------------------- .../testdata/cassandracluster-2DC.yaml | 2 + 7 files changed, 151 insertions(+), 134 deletions(-) delete mode 100644 pkg/controller/cassandracluster/testdata/cassandracluster-2DC-default.yaml diff --git a/deploy/crds/db.orange.com_cassandraclusters_crd.yaml b/deploy/crds/db.orange.com_cassandraclusters_crd.yaml index 131653e40..2ed0202d7 100644 --- a/deploy/crds/db.orange.com_cassandraclusters_crd.yaml +++ b/deploy/crds/db.orange.com_cassandraclusters_crd.yaml @@ -1460,6 +1460,15 @@ spec: description: DC allow to configure Cassandra RC according to kubernetes nodeselector labels properties: + dataCapacity: + description: Define the Capacity for Persistent Volume Claims + in the local storage + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + dataStorageClass: + description: Define StorageClass for Persistent Volume Claims + in the local storage. + type: string labels: additionalProperties: type: string diff --git a/pkg/apis/db/v1alpha1/cassandracluster_types.go b/pkg/apis/db/v1alpha1/cassandracluster_types.go index 0a9c09d65..016a688eb 100644 --- a/pkg/apis/db/v1alpha1/cassandracluster_types.go +++ b/pkg/apis/db/v1alpha1/cassandracluster_types.go @@ -502,6 +502,74 @@ func (cc *CassandraCluster) InitCassandraRackList() int { return nbRack } +// GetDataCapacityForDC sends back the data capacity of cassandra nodes to uses for this dc +func (cc *CassandraCluster) GetDataCapacityForDC(dcName string) string { + dataCapacity := cc.GetDataCapacityFromDCName(dcName) + return dataCapacity +} + +// GetDataCapacityFromDCName send DataCapacity used for the given dcName +func (cc *CassandraCluster) GetDataCapacityFromDCName(dcName string) string { + dcSize := cc.GetDCSize() + if dcSize < 1 { + return cc.Spec.DataCapacity + } + + for dc := 0; dc < dcSize; dc ++ { + if dcName == cc.GetDCName(dc) { + return cc.getDCDataCapacityFromIndex(dc) + } + } + + return cc.Spec.DataCapacity +} + +// getDCDataCapacityFromIndex send DataCapacity used for the given index +func (cc *CassandraCluster) getDCDataCapacityFromIndex(dc int) string { + if dc >= cc.GetDCSize() { + return cc.Spec.DataCapacity + } + storeDC := cc.Spec.Topology.DC[dc] + if storeDC.DataCapacity == "" { + return cc.Spec.DataCapacity + } + return storeDC.DataCapacity +} + +// GetDataCapacityForDC sends back the data storage class of cassandra nodes to uses for this dc +func (cc *CassandraCluster) GetDataStorageClassForDC(dcName string) string { + dataCapacity := cc.GetDataStorageClassFromDCName(dcName) + return dataCapacity +} + +// GetDataCapacityFromDCName send DataStorageClass used for the given dcName +func (cc *CassandraCluster) GetDataStorageClassFromDCName(dcName string) string { + dcSize := cc.GetDCSize() + if dcSize < 1 { + return cc.Spec.DataStorageClass + } + + for dc := 0; dc < dcSize; dc ++ { + if dcName == cc.GetDCName(dc) { + return cc.getDCDataStorageClassFromIndex(dc) + } + } + + return cc.Spec.DataCapacity +} + +// getDCDataCapacityFromIndex send DataStorageClass used for the given index +func (cc *CassandraCluster) getDCDataStorageClassFromIndex(dc int) string { + if dc >= cc.GetDCSize() { + return cc.Spec.DataStorageClass + } + storeDC := cc.Spec.Topology.DC[dc] + if storeDC.DataStorageClass == "" { + return cc.Spec.DataStorageClass + } + return storeDC.DataStorageClass +} + // GetNodesPerRacks sends back the number of cassandra nodes to uses for this dc-rack func (cc *CassandraCluster) GetNodesPerRacks(dcRackName string) int32 { nodesPerRacks := cc.GetDCNodesPerRacksFromDCRackName(dcRackName) @@ -833,6 +901,13 @@ type DC struct { //NumTokens : configure the CASSANDRA_NUM_TOKENS parameter which can be different for each DD NumTokens *int32 `json:"numTokens,omitempty"` + + //Define the Capacity for Persistent Volume Claims in the local storage + // +kubebuilder:validation:Pattern=^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + DataCapacity string `json:"dataCapacity,omitempty"` + + //Define StorageClass for Persistent Volume Claims in the local storage. + DataStorageClass string `json:"dataStorageClass,omitempty"` } // Rack allow to configure Cassandra Rack according to kubernetes nodeselector labels diff --git a/pkg/controller/cassandracluster/generator.go b/pkg/controller/cassandracluster/generator.go index fbce23534..a6ba6c607 100644 --- a/pkg/controller/cassandracluster/generator.go +++ b/pkg/controller/cassandracluster/generator.go @@ -221,11 +221,13 @@ func generateStorageConfigVolumeClaimTemplates(cc *api.CassandraCluster, labels return pvcs, nil } -func generateVolumeClaimTemplate(cc *api.CassandraCluster, labels map[string]string) ([]v1.PersistentVolumeClaim, error) { +func generateVolumeClaimTemplate(cc *api.CassandraCluster, labels map[string]string, dcName string) ([]v1.PersistentVolumeClaim, error) { var pvc []v1.PersistentVolumeClaim + dataCapacity := cc.GetDataCapacityForDC(dcName) + dataStorageClass := cc.GetDataStorageClassForDC(dcName) - if cc.Spec.DataCapacity == "" { + if dataCapacity == "" { logrus.Warnf("[%s]: No Spec.DataCapacity was specified -> You Cluster WILL NOT HAVE PERSISTENT DATA!!!!!", cc.Name) return pvc, nil } @@ -243,15 +245,15 @@ func generateVolumeClaimTemplate(cc *api.CassandraCluster, labels map[string]str Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - "storage": generateResourceQuantity(cc.Spec.DataCapacity), + "storage": generateResourceQuantity(dataCapacity), }, }, }, }, } - if cc.Spec.DataStorageClass != "" { - pvc[0].Spec.StorageClassName = &cc.Spec.DataStorageClass + if dataStorageClass != "" { + pvc[0].Spec.StorageClassName = &dataStorageClass } storageConfigPvcs, err := generateStorageConfigVolumeClaimTemplates(cc, labels) @@ -271,7 +273,7 @@ func generateCassandraStatefulSet(cc *api.CassandraCluster, status *api.Cassandr namespace := cc.Namespace volumes := generateCassandraVolumes(cc) - volumeClaimTemplate, err := generateVolumeClaimTemplate(cc, labels) + volumeClaimTemplate, err := generateVolumeClaimTemplate(cc, labels, dcName) if err != nil { return nil, err } diff --git a/pkg/controller/cassandracluster/generator_test.go b/pkg/controller/cassandracluster/generator_test.go index d9b230e58..b48f379d9 100644 --- a/pkg/controller/cassandracluster/generator_test.go +++ b/pkg/controller/cassandracluster/generator_test.go @@ -142,14 +142,11 @@ func TestGenerateCassandraStatefulSet(t *testing.T) { dcRackName := fmt.Sprintf("%s-%s", dcName, rackName) _, cc := helperInitCluster(t, "cassandracluster-2DC.yaml") + ccDefault := cc.DeepCopy() cc.CheckDefaults() labels, nodeSelector := k8s.GetDCRackLabelsAndNodeSelectorForStatefulSet(cc, 0, 0) sts, _ := generateCassandraStatefulSet(cc, &cc.Status, dcName, dcRackName, labels, nodeSelector, nil) - _, ccDefault := helperInitCluster(t, "cassandracluster-2DC-default.yaml") - ccDefault.CheckDefaults() - labelsDefault, nodeSelectorDefault := k8s.GetDCRackLabelsAndNodeSelectorForStatefulSet(ccDefault, 0, 0) - stsDefault, _ := generateCassandraStatefulSet(ccDefault, &ccDefault.Status, dcName, dcRackName, labelsDefault, nodeSelectorDefault, nil) assert.Equal(map[string]string{ "app": "cassandracluster", @@ -167,18 +164,42 @@ func TestGenerateCassandraStatefulSet(t *testing.T) { Effect: v1.TaintEffectNoSchedule}}, sts.Spec.Template.Spec.Tolerations) - + checkVolumeClaimTemplates(t, labels, sts.Spec.VolumeClaimTemplates, "10Gi", "test-storage") checkLiveAndReadiNessProbe(t, sts.Spec.Template.Spec.Containers, 1010, 201, 32, 7, 9,1205, 151, 17, 50, 30) - checkLiveAndReadiNessProbe(t, stsDefault.Spec.Template.Spec.Containers, - 60, 10, 10, 0,0, 120, 20, 10, 0, 0) - checkVolumeClaimTemplates(t, labels, sts.Spec.VolumeClaimTemplates) checkVolumeMount(t, sts.Spec.Template.Spec.Containers) checkVarEnv(t, sts.Spec.Template.Spec.Containers, cc, dcRackName) cc.Spec.StorageConfigs[0].PVCSpec = nil _, err := generateCassandraStatefulSet(cc, &cc.Status, dcName, dcRackName, labels, nodeSelector, nil) assert.NotEqual(t, err, nil) + + // Test default setup + dcNameDefault := "dc2" + rackNameDefault := "rack1" + dcRackNameDefault := fmt.Sprintf("%s-%s", dcNameDefault, rackNameDefault) + setupForDefaultTest(ccDefault) + + ccDefault.CheckDefaults() + labelsDefault, nodeSelectorDefault := k8s.GetDCRackLabelsAndNodeSelectorForStatefulSet(ccDefault, 0, 0) + stsDefault, _ := generateCassandraStatefulSet(ccDefault, &ccDefault.Status, dcNameDefault, dcRackNameDefault, labelsDefault, nodeSelectorDefault, nil) + + checkVolumeClaimTemplates(t, labels, stsDefault.Spec.VolumeClaimTemplates, "3Gi", "local-storage") + checkLiveAndReadiNessProbe(t, stsDefault.Spec.Template.Spec.Containers, + 60, 10, 10, 0,0, 120, 20, 10, 0, 0) +} + +func setupForDefaultTest(cc *api.CassandraCluster) { + cc.Spec.LivenessFailureThreshold = nil + cc.Spec.LivenessSuccessThreshold = nil + cc.Spec.LivenessHealthCheckPeriod = nil + cc.Spec.LivenessHealthCheckTimeout = nil + cc.Spec.LivenessInitialDelaySeconds = nil + cc.Spec.ReadinessHealthCheckPeriod = nil + cc.Spec.ReadinessHealthCheckTimeout = nil + cc.Spec.ReadinessInitialDelaySeconds = nil + cc.Spec.ReadinessFailureThreshold = nil + cc.Spec.ReadinessSuccessThreshold = nil } func checkLiveAndReadiNessProbe(t *testing.T, containers []v1.Container, @@ -213,12 +234,13 @@ func checkLiveAndReadiNessProbe(t *testing.T, containers []v1.Container, -func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, pvcs []v1.PersistentVolumeClaim) { +func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, pvcs []v1.PersistentVolumeClaim, + dataCapacity, dataClassStorage string) { assert.Equal(t, len(pvcs), 3) for _, pvc := range pvcs { switch pvc.Name { case "data": - assert.Equal(t, generateExpectedDataStoragePVC(expectedlabels), pvc) + assert.Equal(t, generateExpectedDataStoragePVC(expectedlabels, dataCapacity, dataClassStorage), pvc) case "gc-logs": assert.Equal(t, generateExpectedGcLogsStoragePVC(expectedlabels), pvc) case "cassandra-logs": @@ -229,10 +251,9 @@ func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, p } } -func generateExpectedDataStoragePVC(expectedlabels map[string]string) v1.PersistentVolumeClaim { +func generateExpectedDataStoragePVC(expectedlabels map[string]string, dataCapacity, dataClassStorage string) v1.PersistentVolumeClaim { - expectedDataStorageQuantity, _ := resource.ParseQuantity("3Gi") - expectedDataStorageClassName := "local-storage" + expectedDataStorageQuantity, _ := resource.ParseQuantity(dataCapacity) return v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -249,7 +270,7 @@ func generateExpectedDataStoragePVC(expectedlabels map[string]string) v1.Persist "storage": expectedDataStorageQuantity, }, }, - StorageClassName: &expectedDataStorageClassName, + StorageClassName: &dataClassStorage, }, } } diff --git a/pkg/controller/cassandracluster/reconcile.go b/pkg/controller/cassandracluster/reconcile.go index f5a660e40..d8a0cb1d7 100644 --- a/pkg/controller/cassandracluster/reconcile.go +++ b/pkg/controller/cassandracluster/reconcile.go @@ -138,23 +138,30 @@ func (rcc *ReconcileCassandraCluster) CheckNonAllowedChanges(cc *api.CassandraCl cc.Spec.NodesPerRacks = oldCRD.Spec.NodesPerRacks needUpdate = true } - //DataCapacity change is forbidden - if cc.Spec.DataCapacity != oldCRD.Spec.DataCapacity { - logrus.WithFields(logrus.Fields{"cluster": cc.Name}). - Warningf("The Operator has refused the change on DataCapacity from [%s] to NewValue[%s]", - oldCRD.Spec.DataCapacity, cc.Spec.DataCapacity) - cc.Spec.DataCapacity = oldCRD.Spec.DataCapacity - needUpdate = true - } - //DataStorage - if cc.Spec.DataStorageClass != oldCRD.Spec.DataStorageClass { - logrus.WithFields(logrus.Fields{"cluster": cc.Name}). - Warningf("The Operator has refused the change on DataStorageClass from [%s] to NewValue[%s]", - oldCRD.Spec.DataStorageClass, cc.Spec.DataStorageClass) - cc.Spec.DataStorageClass = oldCRD.Spec.DataStorageClass - needUpdate = true + + for dc := 0; dc < cc.GetDCSize(); dc++ { + dcName := cc.GetDCName(dc) + //DataCapacity change is forbidden + if cc.GetDataCapacityForDC(dcName) != oldCRD.GetDataCapacityForDC(dcName) { + logrus.WithFields(logrus.Fields{"cluster": cc.Name, "dcName": dcName}). + Warningf("The Operator has refused the change on DataCapacity from [%s] to NewValue[%s]", + oldCRD.GetDataCapacityForDC(dcName), cc.GetDataCapacityForDC(dcName)) + cc.Spec.DataCapacity = oldCRD.Spec.DataCapacity + cc.Spec.Topology.DC[dc].DataCapacity = oldCRD.Spec.Topology.DC[dc].DataCapacity + needUpdate = true + } + //DataStorage + if cc.GetDataStorageClassForDC(dcName) != oldCRD.GetDataStorageClassForDC(dcName) { + logrus.WithFields(logrus.Fields{"cluster": cc.Name, "dcName": dcName}). + Warningf("The Operator has refused the change on DataStorageClass from [%s] to NewValue[%s]", + oldCRD.GetDataStorageClassForDC(dcName), cc.GetDataStorageClassForDC(dcName)) + cc.Spec.DataStorageClass = oldCRD.Spec.DataStorageClass + cc.Spec.Topology.DC[dc].DataStorageClass = oldCRD.Spec.Topology.DC[dc].DataStorageClass + needUpdate = true + } } + if needUpdate { status.LastClusterAction = api.ActionCorrectCRDConfig.Name ClusterActionMetric.set(api.ActionCorrectCRDConfig, cc.Name) @@ -208,7 +215,6 @@ func (rcc *ReconcileCassandraCluster) CheckNonAllowedChanges(cc *api.CassandraCl status.CassandraRackStatus[dcRackName].CassandraLastAction.EndTime = nil } } - } return false diff --git a/pkg/controller/cassandracluster/testdata/cassandracluster-2DC-default.yaml b/pkg/controller/cassandracluster/testdata/cassandracluster-2DC-default.yaml deleted file mode 100644 index c0f101efe..000000000 --- a/pkg/controller/cassandracluster/testdata/cassandracluster-2DC-default.yaml +++ /dev/null @@ -1,98 +0,0 @@ -apiVersion: "db.orange.com/v1alpha1" -kind: "CassandraCluster" -metadata: - name: cassandra-demo - labels: - cluster: k8s.pic - namespace: ns -spec: - nodesPerRacks: 1 - cassandraImage: cassandra:latest - restartCountBeforePodDeletion: 3 - imagePullSecret: - name: advisedev # To authenticate on docker registry - rollingPartition: 0 - service: - annotations: - external-dns.alpha.kubernetes.io/hostname: my.custom.domain.com. - pod: - annotations: - exemple.com/test: my.custom.annotation - tolerations: - - key: my_custom_taint - operator: Exists - effect: NoSchedule - dataCapacity: "3Gi" - dataStorageClass: "local-storage" - hardAntiAffinity: false - deletePVC: true - storageConfigs: - - mountPath: "/var/lib/cassandra/log" - name: "gc-logs" - pvcSpec: - accessModes: - - ReadWriteOnce - storageClassName: standard-wait - resources: - requests: - storage: 10Gi - - mountPath: "/var/log/cassandra" - name: "cassandra-logs" - pvcSpec: - accessModes: - - ReadWriteOnce - storageClassName: standard-wait - resources: - requests: - storage: 10Gi - sidecarConfigs: - - args: ["tail", "-F", "/var/log/cassandra/system.log"] - image: alpine - imagePullPolicy: Always - name: cassandra-logs - resources: &sidecar_resources - limits: - cpu: 50m - memory: 50Mi - requests: - cpu: 10m - memory: 10Mi - volumeMounts: - - mountPath: /var/log/cassandra - name: cassandra-logs - - args: ["tail", "-F", "/var/log/cassandra/gc.log.0.current"] - image: alpine - imagePullPolicy: Always - name: gc-logs - <<: *sidecar_resources - volumeMounts: - - mountPath: /var/log/cassandra - name: gc-logs - autoPilot: true - resources: - requests: - cpu: '1' - memory: 2Gi - limits: - cpu: '1' - memory: 2Gi - topology: - dc: - - name: dc1 - labels: - location.dfy.orange.com/site : mts - rack: - - name: rack1 - labels: - location.dfy.orange.com/street : street1 - - name: rack2 - labels: - location.dfy.orange.com/street : street2 - - name: dc2 - nodesPerRacks: 1 - labels: - location.dfy.orange.com/site : mts - rack: - - name: rack1 - labels: - location.dfy.orange.com/street : street3 diff --git a/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml b/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml index 3a18eca31..071a3ec42 100644 --- a/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml +++ b/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml @@ -89,6 +89,8 @@ spec: topology: dc: - name: dc1 + dataCapacity: "10Gi" + dataStorageClass: "test-storage" labels: location.dfy.orange.com/site : mts rack: From d362dd0fb847de4d16ce352cbc24787c8fc3084f Mon Sep 17 00:00:00 2001 From: erdrix Date: Mon, 9 Mar 2020 13:31:47 +0100 Subject: [PATCH 2/3] update documentation about data storage configuration --- documentation/description.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/documentation/description.md b/documentation/description.md index d3a92af83..92b7e865e 100644 --- a/documentation/description.md +++ b/documentation/description.md @@ -366,7 +366,7 @@ Jolokia, and uses it to connect. Cassandra is a stateful application. It needs to store data on disks. CassKop allows you to configure the type of storage you want to use. -Storage can be configured using the `storage` property in `CassandraCluster.spec` +Storage can be configured using the `storage` property in `CassandraCluster.spec` for global Data Centers configuration, or can be overrided at `CassandraCluster.spec.topology.dc` level. > **Important:** Once the Cassandra cluster is deployed, the storage cannot be changed. @@ -381,10 +381,23 @@ CassandraCluster fragment of persistent storage definition : ``` # ... + # Global configuration dataCapacity: "300Gi" dataStorageClass: "local-storage" deletePVC: true + # ... + topology: + dc: + - name: dc1 + # DC level configuration + dataCapacity: "10Gi" + dataStorageClass: "test-storage" + # ... + - name: dc2 + # ... + # ... # ... + ``` - `dataCapacity` (required): Defines the size of the persistent volume claim, for example, "1000Gi". @@ -393,6 +406,16 @@ CassandraCluster fragment of persistent storage definition : it can be any storage with high ssd througput. - `deletePVC`(optional): Boolean value which specifies if the Persistent Volume Claim has to be deleted when the cluster is deleted. Default is `false`. + +In this example, all statefulsets related to the `dc2` will have the default configuration for the `data` PV : + +- `dataCapacity` : "300Gi" +- `dataStorageClass`: "local-storage" + +All statefulsets related to the `dc1` will have the specific configuration for the `data` PV : + +- `dataCapacity` : "10Gi" +- `dataStorageClass` : "test-storage" > **WARNING**: Resizing persistent storage for existing CassandraCluster is not currently supported. You must decide the > necessary storage size before deploying the cluster. From 3c3eb6a8a6da9997e4893c04b26ba8e9a450233c Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 10 Mar 2020 10:37:50 +0100 Subject: [PATCH 3/3] review corrections --- documentation/description.md | 22 +++---- .../db/v1alpha1/cassandracluster_types.go | 64 ++++++++----------- .../cassandracluster/generator_test.go | 2 +- .../testdata/cassandracluster-2DC.yaml | 8 +-- 4 files changed, 43 insertions(+), 53 deletions(-) diff --git a/documentation/description.md b/documentation/description.md index 92b7e865e..c532ec2e8 100644 --- a/documentation/description.md +++ b/documentation/description.md @@ -380,23 +380,23 @@ The `PersistentVolumes` are acquired using a `PersistentVolumeClaim` which is ma CassandraCluster fragment of persistent storage definition : ``` -# ... +... # Global configuration dataCapacity: "300Gi" dataStorageClass: "local-storage" deletePVC: true - # ... + ... topology: dc: - name: dc1 # DC level configuration dataCapacity: "10Gi" dataStorageClass: "test-storage" - # ... - - name: dc2 - # ... - # ... -# ... + ... + - name: dc2 + ... + ... +... ``` @@ -409,13 +409,13 @@ CassandraCluster fragment of persistent storage definition : In this example, all statefulsets related to the `dc2` will have the default configuration for the `data` PV : -- `dataCapacity` : "300Gi" -- `dataStorageClass`: "local-storage" +- `dataCapacity` : 300Gi +- `dataStorageClass`: local-storage All statefulsets related to the `dc1` will have the specific configuration for the `data` PV : -- `dataCapacity` : "10Gi" -- `dataStorageClass` : "test-storage" +- `dataCapacity` : 10Gi +- `dataStorageClass` : test-storage > **WARNING**: Resizing persistent storage for existing CassandraCluster is not currently supported. You must decide the > necessary storage size before deploying the cluster. diff --git a/pkg/apis/db/v1alpha1/cassandracluster_types.go b/pkg/apis/db/v1alpha1/cassandracluster_types.go index 016a688eb..ec10a17bd 100644 --- a/pkg/apis/db/v1alpha1/cassandracluster_types.go +++ b/pkg/apis/db/v1alpha1/cassandracluster_types.go @@ -504,70 +504,60 @@ func (cc *CassandraCluster) InitCassandraRackList() int { // GetDataCapacityForDC sends back the data capacity of cassandra nodes to uses for this dc func (cc *CassandraCluster) GetDataCapacityForDC(dcName string) string { - dataCapacity := cc.GetDataCapacityFromDCName(dcName) - return dataCapacity + return cc.GetDataCapacityFromDCName(dcName) } // GetDataCapacityFromDCName send DataCapacity used for the given dcName func (cc *CassandraCluster) GetDataCapacityFromDCName(dcName string) string { - dcSize := cc.GetDCSize() - if dcSize < 1 { - return cc.Spec.DataCapacity - } - - for dc := 0; dc < dcSize; dc ++ { - if dcName == cc.GetDCName(dc) { - return cc.getDCDataCapacityFromIndex(dc) + dcIndex := cc.GetDCIndexFromDCName(dcName) + if dcIndex >= 0 { + dc := cc.getDCFromIndex(dcIndex) + if dc != nil && dc.DataCapacity != "" { + return dc.DataCapacity } - } - - return cc.Spec.DataCapacity -} - -// getDCDataCapacityFromIndex send DataCapacity used for the given index -func (cc *CassandraCluster) getDCDataCapacityFromIndex(dc int) string { - if dc >= cc.GetDCSize() { return cc.Spec.DataCapacity } - storeDC := cc.Spec.Topology.DC[dc] - if storeDC.DataCapacity == "" { - return cc.Spec.DataCapacity - } - return storeDC.DataCapacity + return cc.Spec.DataCapacity } // GetDataCapacityForDC sends back the data storage class of cassandra nodes to uses for this dc func (cc *CassandraCluster) GetDataStorageClassForDC(dcName string) string { - dataCapacity := cc.GetDataStorageClassFromDCName(dcName) - return dataCapacity + return cc.GetDataStorageClassFromDCName(dcName) } // GetDataCapacityFromDCName send DataStorageClass used for the given dcName func (cc *CassandraCluster) GetDataStorageClassFromDCName(dcName string) string { + dcIndex := cc.GetDCIndexFromDCName(dcName) + if dcIndex >= 0 { + dc := cc.getDCFromIndex(dcIndex) + if dc != nil && dc.DataCapacity != "" { + return dc.DataStorageClass + } + return cc.Spec.DataStorageClass + } + return cc.Spec.DataStorageClass +} + +func (cc *CassandraCluster) GetDCIndexFromDCName(dcName string) int { dcSize := cc.GetDCSize() if dcSize < 1 { - return cc.Spec.DataStorageClass + return -1 } for dc := 0; dc < dcSize; dc ++ { if dcName == cc.GetDCName(dc) { - return cc.getDCDataStorageClassFromIndex(dc) + return dc } } - - return cc.Spec.DataCapacity + return -1 } -// getDCDataCapacityFromIndex send DataStorageClass used for the given index -func (cc *CassandraCluster) getDCDataStorageClassFromIndex(dc int) string { +// getDCFromIndex send DC for the given index +func (cc *CassandraCluster) getDCFromIndex(dc int) *DC { if dc >= cc.GetDCSize() { - return cc.Spec.DataStorageClass - } - storeDC := cc.Spec.Topology.DC[dc] - if storeDC.DataStorageClass == "" { - return cc.Spec.DataStorageClass + return nil } - return storeDC.DataStorageClass + return &cc.Spec.Topology.DC[dc] } // GetNodesPerRacks sends back the number of cassandra nodes to uses for this dc-rack diff --git a/pkg/controller/cassandracluster/generator_test.go b/pkg/controller/cassandracluster/generator_test.go index b48f379d9..c6e62c3b4 100644 --- a/pkg/controller/cassandracluster/generator_test.go +++ b/pkg/controller/cassandracluster/generator_test.go @@ -236,7 +236,7 @@ func checkLiveAndReadiNessProbe(t *testing.T, containers []v1.Container, func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, pvcs []v1.PersistentVolumeClaim, dataCapacity, dataClassStorage string) { - assert.Equal(t, len(pvcs), 3) + assert.Equal(t, 3, len(pvcs)) for _, pvc := range pvcs { switch pvc.Name { case "data": diff --git a/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml b/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml index 071a3ec42..63fd9d1cc 100644 --- a/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml +++ b/pkg/controller/cassandracluster/testdata/cassandracluster-2DC.yaml @@ -32,8 +32,8 @@ spec: - key: my_custom_taint operator: Exists effect: NoSchedule - dataCapacity: "3Gi" - dataStorageClass: "local-storage" + dataCapacity: 3Gi + dataStorageClass: local-storage hardAntiAffinity: false deletePVC: true storageConfigs: @@ -89,8 +89,8 @@ spec: topology: dc: - name: dc1 - dataCapacity: "10Gi" - dataStorageClass: "test-storage" + dataCapacity: 10Gi + dataStorageClass: test-storage labels: location.dfy.orange.com/site : mts rack: