diff --git a/domain/infosync/info.go b/domain/infosync/info.go index 9c0ba3a2925bf..d639030c7a4f4 100644 --- a/domain/infosync/info.go +++ b/domain/infosync/info.go @@ -1235,14 +1235,20 @@ func SetTiFlashPlacementRule(ctx context.Context, rule placement.TiFlashRule) er return is.tiflashReplicaManager.SetPlacementRule(ctx, rule) } -// DeleteTiFlashPlacementRule is to delete placement rule for certain group. -func DeleteTiFlashPlacementRule(ctx context.Context, group string, ruleID string) error { +// DeleteTiFlashPlacementRules is a helper function to delete TiFlash placement rules of given physical table IDs. +func DeleteTiFlashPlacementRules(ctx context.Context, physicalTableIDs []int64) error { is, err := getGlobalInfoSyncer() if err != nil { return errors.Trace(err) } - logutil.BgLogger().Info("DeleteTiFlashPlacementRule", zap.String("ruleID", ruleID)) - return is.tiflashReplicaManager.DeletePlacementRule(ctx, group, ruleID) + logutil.BgLogger().Info("DeleteTiFlashPlacementRules", zap.Int64s("physicalTableIDs", physicalTableIDs)) + rules := make([]placement.TiFlashRule, 0, len(physicalTableIDs)) + for _, id := range physicalTableIDs { + // make a rule with count 0 to delete the rule + rule := MakeNewRule(id, 0, nil) + rules = append(rules, rule) + } + return is.tiflashReplicaManager.SetPlacementRuleBatch(ctx, rules) } // GetTiFlashGroupRules to get all placement rule in a certain group. diff --git a/domain/infosync/info_test.go b/domain/infosync/info_test.go index 6afa3f63608b3..9d9c04d730734 100644 --- a/domain/infosync/info_test.go +++ b/domain/infosync/info_test.go @@ -246,8 +246,8 @@ func TestTiFlashManager(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, stats.Count) - // DeleteTiFlashPlacementRule - require.NoError(t, DeleteTiFlashPlacementRule(ctx, "tiflash", rule.ID)) + // DeleteTiFlashPlacementRules + require.NoError(t, DeleteTiFlashPlacementRules(ctx, []int64{1})) rules, err = GetTiFlashGroupRules(ctx, "tiflash") require.NoError(t, err) require.Equal(t, 0, len(rules)) diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index 3843b83fcb4d0..41f7cd5f19071 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -979,7 +979,7 @@ func (w *GCWorker) redoDeleteRanges(ctx context.Context, safePoint uint64, concu return nil } -func (w *GCWorker) doUnsafeDestroyRangeRequest(ctx context.Context, startKey []byte, endKey []byte, concurrency int) error { +func (w *GCWorker) doUnsafeDestroyRangeRequest(ctx context.Context, startKey []byte, endKey []byte, _ int) error { // Get all stores every time deleting a region. So the store list is less probably to be stale. stores, err := w.getStoresForGC(ctx) if err != nil { @@ -1970,16 +1970,8 @@ func (w *GCWorker) doGCPlacementRules(se session.Session, safePoint uint64, dr u return } - for _, id := range physicalTableIDs { - // Delete pd rule - failpoint.Inject("gcDeletePlacementRuleCounter", func() {}) - logutil.BgLogger().Info("try delete TiFlash pd rule", - zap.Int64("tableID", id), zap.String("endKey", string(dr.EndKey)), zap.Uint64("safePoint", safePoint)) - ruleID := infosync.MakeRuleID(id) - if err := infosync.DeleteTiFlashPlacementRule(context.Background(), "tiflash", ruleID); err != nil { - logutil.BgLogger().Error("delete TiFlash pd rule failed when gc", - zap.Error(err), zap.String("ruleID", ruleID), zap.Uint64("safePoint", safePoint)) - } + if err := infosync.DeleteTiFlashPlacementRules(context.Background(), physicalTableIDs); err != nil { + logutil.BgLogger().Error("delete placement rules failed", zap.Error(err), zap.Int64s("tableIDs", physicalTableIDs)) } bundles := make([]*placement.Bundle, 0, len(physicalTableIDs)) for _, id := range physicalTableIDs {