Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pv patch continuously problem #2769

Merged
merged 14 commits into from
Jun 19, 2020
4 changes: 4 additions & 0 deletions pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ func setTidbClusterSpecDefault(tc *v1alpha1.TidbCluster) {
d := false
tc.Spec.EnablePVReclaim = &d
}
retainPVP := corev1.PersistentVolumeReclaimRetain
if tc.Spec.PVReclaimPolicy == nil {
tc.Spec.PVReclaimPolicy = &retainPVP
}
}

func setTidbSpecDefault(tc *v1alpha1.TidbCluster) {
Expand Down
4 changes: 2 additions & 2 deletions pkg/apis/pingcap/v1alpha1/tidbmonitor_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ type TidbMonitorSpec struct {
Initializer InitializerSpec `json:"initializer"`

// Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster
// +kubebuilder:default=Recycle
PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`
// +kubebuilder:default=Retain
PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`

ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images.
Expand Down
4 changes: 2 additions & 2 deletions pkg/apis/pingcap/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ type TidbClusterSpec struct {
SchedulerName string `json:"schedulerName,omitempty"`

// Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster
// +kubebuilder:default=Recycle
PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`
// +kubebuilder:default=Retain
PVReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`

// ImagePullPolicy of TiDB cluster Pods
// +kubebuilder:default=IfNotPresent
Expand Down
10 changes: 10 additions & 0 deletions pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions pkg/controller/controller_utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -341,11 +341,15 @@ func collectEvents(source <-chan string) []string {
}

func newTidbCluster() *v1alpha1.TidbCluster {
retainPVP := corev1.PersistentVolumeReclaimRetain
tc := &v1alpha1.TidbCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "demo",
Namespace: metav1.NamespaceDefault,
},
Spec: v1alpha1.TidbClusterSpec{
PVReclaimPolicy: &retainPVP,
},
}
return tc
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/pv_control_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func TestPVControlPatchPVReclaimPolicySuccess(t *testing.T) {
fakeClient.AddReactor("patch", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
err := control.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy)
err := control.PatchPVReclaimPolicy(tc, pv, *tc.Spec.PVReclaimPolicy)
g.Expect(err).To(Succeed())

events := collectEvents(recorder.Events)
Expand All @@ -56,7 +56,7 @@ func TestPVControlPatchPVReclaimPolicyFailed(t *testing.T) {
fakeClient.AddReactor("patch", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
err := control.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy)
err := control.PatchPVReclaimPolicy(tc, pv, *tc.Spec.PVReclaimPolicy)
g.Expect(err).To(HaveOccurred())

events := collectEvents(recorder.Events)
Expand All @@ -79,7 +79,7 @@ func TestPVControlPatchPVReclaimPolicyConflictSuccess(t *testing.T) {
}
return true, nil, nil
})
err := control.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy)
err := control.PatchPVReclaimPolicy(tc, pv, *tc.Spec.PVReclaimPolicy)
g.Expect(err).To(Succeed())

events := collectEvents(recorder.Events)
Expand Down
8 changes: 8 additions & 0 deletions pkg/label/label.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,11 +314,19 @@ func (l Label) Pump() Label {
return l
}

func (l Label) IsPump() bool {
return l[ComponentLabelKey] == PumpLabelVal
}

func (l Label) Monitor() Label {
l.Component(TiDBMonitorVal)
return l
}

func (l Label) IsMonitor() bool {
return l[ComponentLabelKey] == TiDBMonitorVal
}

// Discovery assigns discovery to component key in label
func (l Label) Discovery() Label {
l.Component(DiscoveryLabelVal)
Expand Down
43 changes: 20 additions & 23 deletions pkg/manager/meta/reclaim_policy_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,12 @@
package meta

import (
"fmt"

"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/manager"
"github.com/pingcap/tidb-operator/pkg/monitor"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
corelisters "k8s.io/client-go/listers/core/v1"
)
Expand Down Expand Up @@ -56,32 +53,19 @@ func NewReclaimPolicyMonitorManager(pvcLister corelisters.PersistentVolumeClaimL
}

func (rpm *reclaimPolicyManager) Sync(tc *v1alpha1.TidbCluster) error {
return rpm.sync(v1alpha1.TiDBClusterKind, tc.GetNamespace(), tc.GetInstanceName(), tc.IsPVReclaimEnabled(), tc.Spec.PVReclaimPolicy, tc)
return rpm.sync(v1alpha1.TiDBClusterKind, tc.GetNamespace(), tc.GetInstanceName(), tc.IsPVReclaimEnabled(), *tc.Spec.PVReclaimPolicy, tc)
}

func (rpm *reclaimPolicyManager) SyncMonitor(tm *v1alpha1.TidbMonitor) error {
return rpm.sync(v1alpha1.TiDBMonitorKind, tm.GetNamespace(), tm.GetName(), false, tm.Spec.PVReclaimPolicy, tm)
return rpm.sync(v1alpha1.TiDBMonitorKind, tm.GetNamespace(), tm.GetName(), false, *tm.Spec.PVReclaimPolicy, tm)
}

func (rpm *reclaimPolicyManager) sync(kind, ns, instanceName string, isPVReclaimEnabled bool, policy corev1.PersistentVolumeReclaimPolicy, obj runtime.Object) error {
var l labels.Selector
var err error
switch kind {
case v1alpha1.TiDBMonitorKind:
l, err = label.NewMonitor().Instance(instanceName).Monitor().Selector()
if err != nil {
return err
}
case v1alpha1.TiDBClusterKind:
l, err = label.New().Instance(instanceName).Selector()
if err != nil {
return err
}
default:
return fmt.Errorf("unknown kind = %s", kind)
selector, err := label.New().Instance(instanceName).Selector()
if err != nil {
return err
}

pvcs, err := rpm.pvcLister.PersistentVolumeClaims(ns).List(l)
pvcs, err := rpm.pvcLister.PersistentVolumeClaims(ns).List(selector)
if err != nil {
return err
}
Expand All @@ -90,6 +74,20 @@ func (rpm *reclaimPolicyManager) sync(kind, ns, instanceName string, isPVReclaim
if pvc.Spec.VolumeName == "" {
continue
}
l := label.Label(pvc.Labels)
switch kind {
case v1alpha1.TiDBClusterKind:
if !l.IsPD() && !l.IsTiDB() && !l.IsTiKV() && !l.IsTiFlash() && !l.IsPump() {
continue
}
case v1alpha1.TiDBMonitorKind:
if !l.IsMonitor() {
continue
}
default:
continue
}

if isPVReclaimEnabled && len(pvc.Annotations[label.AnnPVCDeferDeleting]) != 0 {
// If the pv reclaim function is turned on, and when pv is the candidate pv to be reclaimed, skip patch this pv.
continue
Expand All @@ -102,7 +100,6 @@ func (rpm *reclaimPolicyManager) sync(kind, ns, instanceName string, isPVReclaim
if pv.Spec.PersistentVolumeReclaimPolicy == policy {
continue
}

err = rpm.pvControl.PatchPVReclaimPolicy(obj, pv, policy)
if err != nil {
return err
Expand Down
3 changes: 2 additions & 1 deletion pkg/manager/meta/reclaim_policy_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ func newFakeReclaimPolicyManager() (*reclaimPolicyManager, *controller.FakePVCon
}

func newTidbClusterForMeta() *v1alpha1.TidbCluster {
pvp := corev1.PersistentVolumeReclaimRetain
return &v1alpha1.TidbCluster{
TypeMeta: metav1.TypeMeta{
Kind: "TidbCluster",
Expand All @@ -177,7 +178,7 @@ func newTidbClusterForMeta() *v1alpha1.TidbCluster {
Labels: label.New().Instance(controller.TestClusterName),
},
Spec: v1alpha1.TidbClusterSpec{
PVReclaimPolicy: corev1.PersistentVolumeReclaimRetain,
PVReclaimPolicy: &pvp,
},
}
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/monitor/monitor/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -864,4 +864,8 @@ func defaultTidbMonitor(monitor *v1alpha1.TidbMonitor) {
}
monitor.Spec.Clusters[id] = tcRef
}
retainPVP := core.PersistentVolumeReclaimRetain
if monitor.Spec.PVReclaimPolicy == nil {
monitor.Spec.PVReclaimPolicy = &retainPVP
}
}
2 changes: 1 addition & 1 deletion tests/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -1754,7 +1754,7 @@ func (oa *operatorActions) reclaimPolicySyncFn(tc *v1alpha1.TidbCluster) (bool,
if pv, err := oa.kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}); err != nil {
klog.Errorf("failed to get pv: %s, error: %v", pvName, err)
return false, nil
} else if pv.Spec.PersistentVolumeReclaimPolicy != tc.Spec.PVReclaimPolicy {
} else if pv.Spec.PersistentVolumeReclaimPolicy != *tc.Spec.PVReclaimPolicy {
klog.Errorf("pv: %s's reclaimPolicy is not Retain", pvName)
return false, nil
}
Expand Down
27 changes: 25 additions & 2 deletions tests/e2e/tidbcluster/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,8 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
framework.ExpectNoError(err, "Expected get tidbcluster")

tm := fixture.NewTidbMonitor("e2e-monitor", tc.Namespace, tc, true, true)
tm.Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimDelete
deletePVP := corev1.PersistentVolumeReclaimDelete
tm.Spec.PVReclaimPolicy = &deletePVP
_, err = cli.PingcapV1alpha1().TidbMonitors(tc.Namespace).Create(tm)
framework.ExpectNoError(err, "Expected tidbmonitor deployed success")
err = tests.CheckTidbMonitor(tm, cli, c, fw)
Expand Down Expand Up @@ -764,7 +765,8 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "fetch latest tidbmonitor error")
tm.Spec.Prometheus.Service.Type = corev1.ServiceTypeNodePort
tm.Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimRetain
retainPVP := corev1.PersistentVolumeReclaimRetain
tm.Spec.PVReclaimPolicy = &retainPVP
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Update(tm)
framework.ExpectNoError(err, "update tidbmonitor service type error")

Expand Down Expand Up @@ -827,6 +829,27 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
})
framework.ExpectNoError(err, "second update tidbmonitor service error")

err = wait.Poll(5*time.Second, 3*time.Minute, func() (done bool, err error) {
tc, err := cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if *tc.Spec.PVReclaimPolicy != corev1.PersistentVolumeReclaimDelete {
framework.Logf("tidbcluster PVReclaimPolicy changed into %v", *tc.Spec.PVReclaimPolicy)
return true, nil
}
if *tm.Spec.PVReclaimPolicy != corev1.PersistentVolumeReclaimRetain {
framework.Logf("tidbmonitor PVReclaimPolicy changed into %v", *tm.Spec.PVReclaimPolicy)
return true, nil
}
return false, nil
})
framework.ExpectEqual(err, wait.ErrWaitTimeout, "verify tidbmonitor and tidbcluster PVReclaimPolicy won't affect each other")

err = cli.PingcapV1alpha1().TidbMonitors(tm.Namespace).Delete(tm.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete tidbmonitor failed")
err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
Expand Down
4 changes: 2 additions & 2 deletions tests/pkg/fixture/fixture.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
if v, err := semver.NewVersion(version); err == nil && v.LessThan(tikvV4Beta) {
tikvStorageConfig = nil
}

deletePVP := corev1.PersistentVolumeReclaimDelete
return &v1alpha1.TidbCluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Expand All @@ -93,7 +93,7 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
Spec: v1alpha1.TidbClusterSpec{
Version: version,
ImagePullPolicy: corev1.PullIfNotPresent,
PVReclaimPolicy: corev1.PersistentVolumeReclaimDelete,
PVReclaimPolicy: &deletePVP,
SchedulerName: "tidb-scheduler",
Timezone: "Asia/Shanghai",

Expand Down