Skip to content
This repository has been archived by the owner on Oct 20, 2022. It is now read-only.

Commit

Permalink
Merge pull request #203 from erdrix/feature_volume_size
Browse files Browse the repository at this point in the history
Implement data configuration at DC level & unit tests
  • Loading branch information
Alexandre Guitton authored Mar 10, 2020
2 parents 3576fd4 + 3c3eb6a commit 1709643
Show file tree
Hide file tree
Showing 8 changed files with 170 additions and 140 deletions.
9 changes: 9 additions & 0 deletions deploy/crds/db.orange.com_cassandraclusters_crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1460,6 +1460,15 @@ spec:
description: DC allow to configure Cassandra RC according to kubernetes
nodeselector labels
properties:
dataCapacity:
description: Define the Capacity for Persistent Volume Claims
in the local storage
pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$
type: string
dataStorageClass:
description: Define StorageClass for Persistent Volume Claims
in the local storage.
type: string
labels:
additionalProperties:
type: string
Expand Down
29 changes: 26 additions & 3 deletions documentation/description.md
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ Jolokia, and uses it to connect.
Cassandra is a stateful application. It needs to store data on disks. CassKop allows you to configure the type of
storage you want to use.

Storage can be configured using the `storage` property in `CassandraCluster.spec`
Storage can be configured using the `storage` property in `CassandraCluster.spec` for global Data Centers configuration, or can be overrided at `CassandraCluster.spec.topology.dc` level.

> **Important:** Once the Cassandra cluster is deployed, the storage cannot be changed.
Expand All @@ -380,11 +380,24 @@ The `PersistentVolumes` are acquired using a `PersistentVolumeClaim` which is ma
CassandraCluster fragment of persistent storage definition :

```
# ...
...
# Global configuration
dataCapacity: "300Gi"
dataStorageClass: "local-storage"
deletePVC: true
# ...
...
topology:
dc:
- name: dc1
# DC level configuration
dataCapacity: "10Gi"
dataStorageClass: "test-storage"
...
- name: dc2
...
...
...
```

- `dataCapacity` (required): Defines the size of the persistent volume claim, for example, "1000Gi".
Expand All @@ -393,6 +406,16 @@ CassandraCluster fragment of persistent storage definition :
it can be any storage with high ssd througput.
- `deletePVC`(optional): Boolean value which specifies if the Persistent Volume Claim has to be deleted when the cluster
is deleted. Default is `false`.

In this example, all statefulsets related to the `dc2` will have the default configuration for the `data` PV :

- `dataCapacity` : 300Gi
- `dataStorageClass`: local-storage

All statefulsets related to the `dc1` will have the specific configuration for the `data` PV :

- `dataCapacity` : 10Gi
- `dataStorageClass` : test-storage

> **WARNING**: Resizing persistent storage for existing CassandraCluster is not currently supported. You must decide the
> necessary storage size before deploying the cluster.
Expand Down
65 changes: 65 additions & 0 deletions pkg/apis/db/v1alpha1/cassandracluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,64 @@ func (cc *CassandraCluster) InitCassandraRackList() int {
return nbRack
}

// GetDataCapacityForDC sends back the data capacity of cassandra nodes to uses for this dc
func (cc *CassandraCluster) GetDataCapacityForDC(dcName string) string {
return cc.GetDataCapacityFromDCName(dcName)
}

// GetDataCapacityFromDCName send DataCapacity used for the given dcName
func (cc *CassandraCluster) GetDataCapacityFromDCName(dcName string) string {
dcIndex := cc.GetDCIndexFromDCName(dcName)
if dcIndex >= 0 {
dc := cc.getDCFromIndex(dcIndex)
if dc != nil && dc.DataCapacity != "" {
return dc.DataCapacity
}
return cc.Spec.DataCapacity
}
return cc.Spec.DataCapacity
}

// GetDataCapacityForDC sends back the data storage class of cassandra nodes to uses for this dc
func (cc *CassandraCluster) GetDataStorageClassForDC(dcName string) string {
return cc.GetDataStorageClassFromDCName(dcName)
}

// GetDataCapacityFromDCName send DataStorageClass used for the given dcName
func (cc *CassandraCluster) GetDataStorageClassFromDCName(dcName string) string {
dcIndex := cc.GetDCIndexFromDCName(dcName)
if dcIndex >= 0 {
dc := cc.getDCFromIndex(dcIndex)
if dc != nil && dc.DataCapacity != "" {
return dc.DataStorageClass
}
return cc.Spec.DataStorageClass
}
return cc.Spec.DataStorageClass
}

func (cc *CassandraCluster) GetDCIndexFromDCName(dcName string) int {
dcSize := cc.GetDCSize()
if dcSize < 1 {
return -1
}

for dc := 0; dc < dcSize; dc ++ {
if dcName == cc.GetDCName(dc) {
return dc
}
}
return -1
}

// getDCFromIndex send DC for the given index
func (cc *CassandraCluster) getDCFromIndex(dc int) *DC {
if dc >= cc.GetDCSize() {
return nil
}
return &cc.Spec.Topology.DC[dc]
}

// GetNodesPerRacks sends back the number of cassandra nodes to uses for this dc-rack
func (cc *CassandraCluster) GetNodesPerRacks(dcRackName string) int32 {
nodesPerRacks := cc.GetDCNodesPerRacksFromDCRackName(dcRackName)
Expand Down Expand Up @@ -833,6 +891,13 @@ type DC struct {

//NumTokens : configure the CASSANDRA_NUM_TOKENS parameter which can be different for each DD
NumTokens *int32 `json:"numTokens,omitempty"`

//Define the Capacity for Persistent Volume Claims in the local storage
// +kubebuilder:validation:Pattern=^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$
DataCapacity string `json:"dataCapacity,omitempty"`

//Define StorageClass for Persistent Volume Claims in the local storage.
DataStorageClass string `json:"dataStorageClass,omitempty"`
}

// Rack allow to configure Cassandra Rack according to kubernetes nodeselector labels
Expand Down
14 changes: 8 additions & 6 deletions pkg/controller/cassandracluster/generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,11 +221,13 @@ func generateStorageConfigVolumeClaimTemplates(cc *api.CassandraCluster, labels
return pvcs, nil
}

func generateVolumeClaimTemplate(cc *api.CassandraCluster, labels map[string]string) ([]v1.PersistentVolumeClaim, error) {
func generateVolumeClaimTemplate(cc *api.CassandraCluster, labels map[string]string, dcName string) ([]v1.PersistentVolumeClaim, error) {

var pvc []v1.PersistentVolumeClaim
dataCapacity := cc.GetDataCapacityForDC(dcName)
dataStorageClass := cc.GetDataStorageClassForDC(dcName)

if cc.Spec.DataCapacity == "" {
if dataCapacity == "" {
logrus.Warnf("[%s]: No Spec.DataCapacity was specified -> You Cluster WILL NOT HAVE PERSISTENT DATA!!!!!", cc.Name)
return pvc, nil
}
Expand All @@ -243,15 +245,15 @@ func generateVolumeClaimTemplate(cc *api.CassandraCluster, labels map[string]str

Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": generateResourceQuantity(cc.Spec.DataCapacity),
"storage": generateResourceQuantity(dataCapacity),
},
},
},
},
}

if cc.Spec.DataStorageClass != "" {
pvc[0].Spec.StorageClassName = &cc.Spec.DataStorageClass
if dataStorageClass != "" {
pvc[0].Spec.StorageClassName = &dataStorageClass
}

storageConfigPvcs, err := generateStorageConfigVolumeClaimTemplates(cc, labels)
Expand All @@ -271,7 +273,7 @@ func generateCassandraStatefulSet(cc *api.CassandraCluster, status *api.Cassandr
namespace := cc.Namespace
volumes := generateCassandraVolumes(cc)

volumeClaimTemplate, err := generateVolumeClaimTemplate(cc, labels)
volumeClaimTemplate, err := generateVolumeClaimTemplate(cc, labels, dcName)
if err != nil {
return nil, err
}
Expand Down
51 changes: 36 additions & 15 deletions pkg/controller/cassandracluster/generator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,11 @@ func TestGenerateCassandraStatefulSet(t *testing.T) {
dcRackName := fmt.Sprintf("%s-%s", dcName, rackName)

_, cc := helperInitCluster(t, "cassandracluster-2DC.yaml")
ccDefault := cc.DeepCopy()
cc.CheckDefaults()
labels, nodeSelector := k8s.GetDCRackLabelsAndNodeSelectorForStatefulSet(cc, 0, 0)
sts, _ := generateCassandraStatefulSet(cc, &cc.Status, dcName, dcRackName, labels, nodeSelector, nil)

_, ccDefault := helperInitCluster(t, "cassandracluster-2DC-default.yaml")
ccDefault.CheckDefaults()
labelsDefault, nodeSelectorDefault := k8s.GetDCRackLabelsAndNodeSelectorForStatefulSet(ccDefault, 0, 0)
stsDefault, _ := generateCassandraStatefulSet(ccDefault, &ccDefault.Status, dcName, dcRackName, labelsDefault, nodeSelectorDefault, nil)

assert.Equal(map[string]string{
"app": "cassandracluster",
Expand All @@ -167,18 +164,42 @@ func TestGenerateCassandraStatefulSet(t *testing.T) {
Effect: v1.TaintEffectNoSchedule}},
sts.Spec.Template.Spec.Tolerations)


checkVolumeClaimTemplates(t, labels, sts.Spec.VolumeClaimTemplates, "10Gi", "test-storage")
checkLiveAndReadiNessProbe(t, sts.Spec.Template.Spec.Containers,
1010, 201, 32, 7, 9,1205, 151, 17, 50, 30)
checkLiveAndReadiNessProbe(t, stsDefault.Spec.Template.Spec.Containers,
60, 10, 10, 0,0, 120, 20, 10, 0, 0)
checkVolumeClaimTemplates(t, labels, sts.Spec.VolumeClaimTemplates)
checkVolumeMount(t, sts.Spec.Template.Spec.Containers)
checkVarEnv(t, sts.Spec.Template.Spec.Containers, cc, dcRackName)

cc.Spec.StorageConfigs[0].PVCSpec = nil
_, err := generateCassandraStatefulSet(cc, &cc.Status, dcName, dcRackName, labels, nodeSelector, nil)
assert.NotEqual(t, err, nil)

// Test default setup
dcNameDefault := "dc2"
rackNameDefault := "rack1"
dcRackNameDefault := fmt.Sprintf("%s-%s", dcNameDefault, rackNameDefault)
setupForDefaultTest(ccDefault)

ccDefault.CheckDefaults()
labelsDefault, nodeSelectorDefault := k8s.GetDCRackLabelsAndNodeSelectorForStatefulSet(ccDefault, 0, 0)
stsDefault, _ := generateCassandraStatefulSet(ccDefault, &ccDefault.Status, dcNameDefault, dcRackNameDefault, labelsDefault, nodeSelectorDefault, nil)

checkVolumeClaimTemplates(t, labels, stsDefault.Spec.VolumeClaimTemplates, "3Gi", "local-storage")
checkLiveAndReadiNessProbe(t, stsDefault.Spec.Template.Spec.Containers,
60, 10, 10, 0,0, 120, 20, 10, 0, 0)
}

func setupForDefaultTest(cc *api.CassandraCluster) {
cc.Spec.LivenessFailureThreshold = nil
cc.Spec.LivenessSuccessThreshold = nil
cc.Spec.LivenessHealthCheckPeriod = nil
cc.Spec.LivenessHealthCheckTimeout = nil
cc.Spec.LivenessInitialDelaySeconds = nil
cc.Spec.ReadinessHealthCheckPeriod = nil
cc.Spec.ReadinessHealthCheckTimeout = nil
cc.Spec.ReadinessInitialDelaySeconds = nil
cc.Spec.ReadinessFailureThreshold = nil
cc.Spec.ReadinessSuccessThreshold = nil
}

func checkLiveAndReadiNessProbe(t *testing.T, containers []v1.Container,
Expand Down Expand Up @@ -213,12 +234,13 @@ func checkLiveAndReadiNessProbe(t *testing.T, containers []v1.Container,



func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, pvcs []v1.PersistentVolumeClaim) {
assert.Equal(t, len(pvcs), 3)
func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, pvcs []v1.PersistentVolumeClaim,
dataCapacity, dataClassStorage string) {
assert.Equal(t, 3, len(pvcs))
for _, pvc := range pvcs {
switch pvc.Name {
case "data":
assert.Equal(t, generateExpectedDataStoragePVC(expectedlabels), pvc)
assert.Equal(t, generateExpectedDataStoragePVC(expectedlabels, dataCapacity, dataClassStorage), pvc)
case "gc-logs":
assert.Equal(t, generateExpectedGcLogsStoragePVC(expectedlabels), pvc)
case "cassandra-logs":
Expand All @@ -229,10 +251,9 @@ func checkVolumeClaimTemplates(t *testing.T, expectedlabels map[string]string, p
}
}

func generateExpectedDataStoragePVC(expectedlabels map[string]string) v1.PersistentVolumeClaim {
func generateExpectedDataStoragePVC(expectedlabels map[string]string, dataCapacity, dataClassStorage string) v1.PersistentVolumeClaim {

expectedDataStorageQuantity, _ := resource.ParseQuantity("3Gi")
expectedDataStorageClassName := "local-storage"
expectedDataStorageQuantity, _ := resource.ParseQuantity(dataCapacity)

return v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -249,7 +270,7 @@ func generateExpectedDataStoragePVC(expectedlabels map[string]string) v1.Persist
"storage": expectedDataStorageQuantity,
},
},
StorageClassName: &expectedDataStorageClassName,
StorageClassName: &dataClassStorage,
},
}
}
Expand Down
38 changes: 22 additions & 16 deletions pkg/controller/cassandracluster/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,23 +138,30 @@ func (rcc *ReconcileCassandraCluster) CheckNonAllowedChanges(cc *api.CassandraCl
cc.Spec.NodesPerRacks = oldCRD.Spec.NodesPerRacks
needUpdate = true
}
//DataCapacity change is forbidden
if cc.Spec.DataCapacity != oldCRD.Spec.DataCapacity {
logrus.WithFields(logrus.Fields{"cluster": cc.Name}).
Warningf("The Operator has refused the change on DataCapacity from [%s] to NewValue[%s]",
oldCRD.Spec.DataCapacity, cc.Spec.DataCapacity)
cc.Spec.DataCapacity = oldCRD.Spec.DataCapacity
needUpdate = true
}
//DataStorage
if cc.Spec.DataStorageClass != oldCRD.Spec.DataStorageClass {
logrus.WithFields(logrus.Fields{"cluster": cc.Name}).
Warningf("The Operator has refused the change on DataStorageClass from [%s] to NewValue[%s]",
oldCRD.Spec.DataStorageClass, cc.Spec.DataStorageClass)
cc.Spec.DataStorageClass = oldCRD.Spec.DataStorageClass
needUpdate = true

for dc := 0; dc < cc.GetDCSize(); dc++ {
dcName := cc.GetDCName(dc)
//DataCapacity change is forbidden
if cc.GetDataCapacityForDC(dcName) != oldCRD.GetDataCapacityForDC(dcName) {
logrus.WithFields(logrus.Fields{"cluster": cc.Name, "dcName": dcName}).
Warningf("The Operator has refused the change on DataCapacity from [%s] to NewValue[%s]",
oldCRD.GetDataCapacityForDC(dcName), cc.GetDataCapacityForDC(dcName))
cc.Spec.DataCapacity = oldCRD.Spec.DataCapacity
cc.Spec.Topology.DC[dc].DataCapacity = oldCRD.Spec.Topology.DC[dc].DataCapacity
needUpdate = true
}
//DataStorage
if cc.GetDataStorageClassForDC(dcName) != oldCRD.GetDataStorageClassForDC(dcName) {
logrus.WithFields(logrus.Fields{"cluster": cc.Name, "dcName": dcName}).
Warningf("The Operator has refused the change on DataStorageClass from [%s] to NewValue[%s]",
oldCRD.GetDataStorageClassForDC(dcName), cc.GetDataStorageClassForDC(dcName))
cc.Spec.DataStorageClass = oldCRD.Spec.DataStorageClass
cc.Spec.Topology.DC[dc].DataStorageClass = oldCRD.Spec.Topology.DC[dc].DataStorageClass
needUpdate = true
}
}


if needUpdate {
status.LastClusterAction = api.ActionCorrectCRDConfig.Name
ClusterActionMetric.set(api.ActionCorrectCRDConfig, cc.Name)
Expand Down Expand Up @@ -208,7 +215,6 @@ func (rcc *ReconcileCassandraCluster) CheckNonAllowedChanges(cc *api.CassandraCl
status.CassandraRackStatus[dcRackName].CassandraLastAction.EndTime = nil
}
}

}

return false
Expand Down
Loading

0 comments on commit 1709643

Please sign in to comment.