diff --git a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go index 99551f1884..f0916b8301 100644 --- a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go +++ b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go @@ -60,6 +60,10 @@ func setTidbClusterSpecDefault(tc *v1alpha1.TidbCluster) { d := false tc.Spec.EnablePVReclaim = &d } + retainPVP := corev1.PersistentVolumeReclaimRetain + if tc.Spec.PVReclaimPolicy == nil { + tc.Spec.PVReclaimPolicy = &retainPVP + } } func setTidbSpecDefault(tc *v1alpha1.TidbCluster) { diff --git a/pkg/apis/pingcap/v1alpha1/tidbmonitor_types.go b/pkg/apis/pingcap/v1alpha1/tidbmonitor_types.go index 45e76d3382..5c089e5eec 100644 --- a/pkg/apis/pingcap/v1alpha1/tidbmonitor_types.go +++ b/pkg/apis/pingcap/v1alpha1/tidbmonitor_types.go @@ -48,8 +48,8 @@ type TidbMonitorSpec struct { Initializer InitializerSpec `json:"initializer"` // Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster - // +kubebuilder:default=Recycle - PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` + // +kubebuilder:default=Retain + PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images. diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go index 827a2e1be6..b4eeca181a 100644 --- a/pkg/apis/pingcap/v1alpha1/types.go +++ b/pkg/apis/pingcap/v1alpha1/types.go @@ -152,8 +152,8 @@ type TidbClusterSpec struct { SchedulerName string `json:"schedulerName,omitempty"` // Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster - // +kubebuilder:default=Recycle - PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` + // +kubebuilder:default=Retain + PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` // ImagePullPolicy of TiDB cluster Pods // +kubebuilder:default=IfNotPresent diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go index 0471ea5855..78a6cbe0d4 100644 --- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go @@ -6616,6 +6616,11 @@ func (in *TidbClusterSpec) DeepCopyInto(out *TidbClusterSpec) { *out = new(HelperSpec) (*in).DeepCopyInto(*out) } + if in.PVReclaimPolicy != nil { + in, out := &in.PVReclaimPolicy, &out.PVReclaimPolicy + *out = new(v1.PersistentVolumeReclaimPolicy) + **out = **in + } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]v1.LocalObjectReference, len(*in)) @@ -6952,6 +6957,11 @@ func (in *TidbMonitorSpec) DeepCopyInto(out *TidbMonitorSpec) { } in.Reloader.DeepCopyInto(&out.Reloader) in.Initializer.DeepCopyInto(&out.Initializer) + if in.PVReclaimPolicy != nil { + in, out := &in.PVReclaimPolicy, &out.PVReclaimPolicy + *out = new(v1.PersistentVolumeReclaimPolicy) + **out = **in + } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets *out = make([]v1.LocalObjectReference, len(*in)) diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index ae64a31573..9ad300b732 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -341,11 +341,15 @@ func collectEvents(source <-chan string) []string { } func newTidbCluster() *v1alpha1.TidbCluster { + retainPVP := corev1.PersistentVolumeReclaimRetain tc := &v1alpha1.TidbCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "demo", Namespace: metav1.NamespaceDefault, }, + Spec: v1alpha1.TidbClusterSpec{ + PVReclaimPolicy: &retainPVP, + }, } return tc } diff --git a/pkg/controller/pv_control_test.go b/pkg/controller/pv_control_test.go index 921263c82c..7164300cbb 100644 --- a/pkg/controller/pv_control_test.go +++ b/pkg/controller/pv_control_test.go @@ -39,7 +39,7 @@ func TestPVControlPatchPVReclaimPolicySuccess(t *testing.T) { fakeClient.AddReactor("patch", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) { return true, nil, nil }) - err := control.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy) + err := control.PatchPVReclaimPolicy(tc, pv, *tc.Spec.PVReclaimPolicy) g.Expect(err).To(Succeed()) events := collectEvents(recorder.Events) @@ -56,7 +56,7 @@ func TestPVControlPatchPVReclaimPolicyFailed(t *testing.T) { fakeClient.AddReactor("patch", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) - err := control.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy) + err := control.PatchPVReclaimPolicy(tc, pv, *tc.Spec.PVReclaimPolicy) g.Expect(err).To(HaveOccurred()) events := collectEvents(recorder.Events) @@ -79,7 +79,7 @@ func TestPVControlPatchPVReclaimPolicyConflictSuccess(t *testing.T) { } return true, nil, nil }) - err := control.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy) + err := control.PatchPVReclaimPolicy(tc, pv, *tc.Spec.PVReclaimPolicy) g.Expect(err).To(Succeed()) events := collectEvents(recorder.Events) diff --git a/pkg/label/label.go b/pkg/label/label.go index 457ea2ca64..697c6874cf 100644 --- a/pkg/label/label.go +++ b/pkg/label/label.go @@ -314,11 +314,19 @@ func (l Label) Pump() Label { return l } +func (l Label) IsPump() bool { + return l[ComponentLabelKey] == PumpLabelVal +} + func (l Label) Monitor() Label { l.Component(TiDBMonitorVal) return l } +func (l Label) IsMonitor() bool { + return l[ComponentLabelKey] == TiDBMonitorVal +} + // Discovery assigns discovery to component key in label func (l Label) Discovery() Label { l.Component(DiscoveryLabelVal) diff --git a/pkg/manager/meta/reclaim_policy_manager.go b/pkg/manager/meta/reclaim_policy_manager.go index fe0625215f..4603f0e65c 100644 --- a/pkg/manager/meta/reclaim_policy_manager.go +++ b/pkg/manager/meta/reclaim_policy_manager.go @@ -14,15 +14,12 @@ package meta import ( - "fmt" - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/manager" "github.com/pingcap/tidb-operator/pkg/monitor" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" corelisters "k8s.io/client-go/listers/core/v1" ) @@ -56,32 +53,19 @@ func NewReclaimPolicyMonitorManager(pvcLister corelisters.PersistentVolumeClaimL } func (rpm *reclaimPolicyManager) Sync(tc *v1alpha1.TidbCluster) error { - return rpm.sync(v1alpha1.TiDBClusterKind, tc.GetNamespace(), tc.GetInstanceName(), tc.IsPVReclaimEnabled(), tc.Spec.PVReclaimPolicy, tc) + return rpm.sync(v1alpha1.TiDBClusterKind, tc.GetNamespace(), tc.GetInstanceName(), tc.IsPVReclaimEnabled(), *tc.Spec.PVReclaimPolicy, tc) } func (rpm *reclaimPolicyManager) SyncMonitor(tm *v1alpha1.TidbMonitor) error { - return rpm.sync(v1alpha1.TiDBMonitorKind, tm.GetNamespace(), tm.GetName(), false, tm.Spec.PVReclaimPolicy, tm) + return rpm.sync(v1alpha1.TiDBMonitorKind, tm.GetNamespace(), tm.GetName(), false, *tm.Spec.PVReclaimPolicy, tm) } func (rpm *reclaimPolicyManager) sync(kind, ns, instanceName string, isPVReclaimEnabled bool, policy corev1.PersistentVolumeReclaimPolicy, obj runtime.Object) error { - var l labels.Selector - var err error - switch kind { - case v1alpha1.TiDBMonitorKind: - l, err = label.NewMonitor().Instance(instanceName).Monitor().Selector() - if err != nil { - return err - } - case v1alpha1.TiDBClusterKind: - l, err = label.New().Instance(instanceName).Selector() - if err != nil { - return err - } - default: - return fmt.Errorf("unknown kind = %s", kind) + selector, err := label.New().Instance(instanceName).Selector() + if err != nil { + return err } - - pvcs, err := rpm.pvcLister.PersistentVolumeClaims(ns).List(l) + pvcs, err := rpm.pvcLister.PersistentVolumeClaims(ns).List(selector) if err != nil { return err } @@ -90,6 +74,20 @@ func (rpm *reclaimPolicyManager) sync(kind, ns, instanceName string, isPVReclaim if pvc.Spec.VolumeName == "" { continue } + l := label.Label(pvc.Labels) + switch kind { + case v1alpha1.TiDBClusterKind: + if !l.IsPD() && !l.IsTiDB() && !l.IsTiKV() && !l.IsTiFlash() && !l.IsPump() { + continue + } + case v1alpha1.TiDBMonitorKind: + if !l.IsMonitor() { + continue + } + default: + continue + } + if isPVReclaimEnabled && len(pvc.Annotations[label.AnnPVCDeferDeleting]) != 0 { // If the pv reclaim function is turned on, and when pv is the candidate pv to be reclaimed, skip patch this pv. continue @@ -102,7 +100,6 @@ func (rpm *reclaimPolicyManager) sync(kind, ns, instanceName string, isPVReclaim if pv.Spec.PersistentVolumeReclaimPolicy == policy { continue } - err = rpm.pvControl.PatchPVReclaimPolicy(obj, pv, policy) if err != nil { return err diff --git a/pkg/manager/meta/reclaim_policy_manager_test.go b/pkg/manager/meta/reclaim_policy_manager_test.go index 76236426cd..61911d2c88 100644 --- a/pkg/manager/meta/reclaim_policy_manager_test.go +++ b/pkg/manager/meta/reclaim_policy_manager_test.go @@ -165,6 +165,7 @@ func newFakeReclaimPolicyManager() (*reclaimPolicyManager, *controller.FakePVCon } func newTidbClusterForMeta() *v1alpha1.TidbCluster { + pvp := corev1.PersistentVolumeReclaimRetain return &v1alpha1.TidbCluster{ TypeMeta: metav1.TypeMeta{ Kind: "TidbCluster", @@ -177,7 +178,7 @@ func newTidbClusterForMeta() *v1alpha1.TidbCluster { Labels: label.New().Instance(controller.TestClusterName), }, Spec: v1alpha1.TidbClusterSpec{ - PVReclaimPolicy: corev1.PersistentVolumeReclaimRetain, + PVReclaimPolicy: &pvp, }, } } diff --git a/pkg/monitor/monitor/util.go b/pkg/monitor/monitor/util.go index cf1fde2015..00b85cff79 100644 --- a/pkg/monitor/monitor/util.go +++ b/pkg/monitor/monitor/util.go @@ -864,4 +864,8 @@ func defaultTidbMonitor(monitor *v1alpha1.TidbMonitor) { } monitor.Spec.Clusters[id] = tcRef } + retainPVP := core.PersistentVolumeReclaimRetain + if monitor.Spec.PVReclaimPolicy == nil { + monitor.Spec.PVReclaimPolicy = &retainPVP + } } diff --git a/tests/actions.go b/tests/actions.go index ce890b8674..7221f4b3c0 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -1754,7 +1754,7 @@ func (oa *operatorActions) reclaimPolicySyncFn(tc *v1alpha1.TidbCluster) (bool, if pv, err := oa.kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}); err != nil { klog.Errorf("failed to get pv: %s, error: %v", pvName, err) return false, nil - } else if pv.Spec.PersistentVolumeReclaimPolicy != tc.Spec.PVReclaimPolicy { + } else if pv.Spec.PersistentVolumeReclaimPolicy != *tc.Spec.PVReclaimPolicy { klog.Errorf("pv: %s's reclaimPolicy is not Retain", pvName) return false, nil } diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index 3a717554ed..44d7bdc3a2 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -723,7 +723,8 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() { framework.ExpectNoError(err, "Expected get tidbcluster") tm := fixture.NewTidbMonitor("e2e-monitor", tc.Namespace, tc, true, true) - tm.Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimDelete + deletePVP := corev1.PersistentVolumeReclaimDelete + tm.Spec.PVReclaimPolicy = &deletePVP _, err = cli.PingcapV1alpha1().TidbMonitors(tc.Namespace).Create(tm) framework.ExpectNoError(err, "Expected tidbmonitor deployed success") err = tests.CheckTidbMonitor(tm, cli, c, fw) @@ -764,7 +765,8 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() { tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "fetch latest tidbmonitor error") tm.Spec.Prometheus.Service.Type = corev1.ServiceTypeNodePort - tm.Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimRetain + retainPVP := corev1.PersistentVolumeReclaimRetain + tm.Spec.PVReclaimPolicy = &retainPVP tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Update(tm) framework.ExpectNoError(err, "update tidbmonitor service type error") @@ -827,6 +829,27 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() { }) framework.ExpectNoError(err, "second update tidbmonitor service error") + err = wait.Poll(5*time.Second, 3*time.Minute, func() (done bool, err error) { + tc, err := cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if *tc.Spec.PVReclaimPolicy != corev1.PersistentVolumeReclaimDelete { + framework.Logf("tidbcluster PVReclaimPolicy changed into %v", *tc.Spec.PVReclaimPolicy) + return true, nil + } + if *tm.Spec.PVReclaimPolicy != corev1.PersistentVolumeReclaimRetain { + framework.Logf("tidbmonitor PVReclaimPolicy changed into %v", *tm.Spec.PVReclaimPolicy) + return true, nil + } + return false, nil + }) + framework.ExpectEqual(err, wait.ErrWaitTimeout, "verify tidbmonitor and tidbcluster PVReclaimPolicy won't affect each other") + err = cli.PingcapV1alpha1().TidbMonitors(tm.Namespace).Delete(tm.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "delete tidbmonitor failed") err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) { diff --git a/tests/pkg/fixture/fixture.go b/tests/pkg/fixture/fixture.go index b5f58fc7d0..0fda921947 100644 --- a/tests/pkg/fixture/fixture.go +++ b/tests/pkg/fixture/fixture.go @@ -84,7 +84,7 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster { if v, err := semver.NewVersion(version); err == nil && v.LessThan(tikvV4Beta) { tikvStorageConfig = nil } - + deletePVP := corev1.PersistentVolumeReclaimDelete return &v1alpha1.TidbCluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -93,7 +93,7 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster { Spec: v1alpha1.TidbClusterSpec{ Version: version, ImagePullPolicy: corev1.PullIfNotPresent, - PVReclaimPolicy: corev1.PersistentVolumeReclaimDelete, + PVReclaimPolicy: &deletePVP, SchedulerName: "tidb-scheduler", Timezone: "Asia/Shanghai",