From b7c949850cf186b7c823fb4739e1e0a2e70c7b83 Mon Sep 17 00:00:00 2001 From: tomato <38561029+qidi1@users.noreply.github.com> Date: Tue, 11 Jan 2022 17:23:43 +0800 Subject: [PATCH] config:fix bug hot region configuration cant be change unless we do a reboot (#4539) * fix bug of hot_region_history Signed-off-by: qidi1 <1083369179@qq.com> * config:make config can be change ref #pingcap/tidb/issues/25281 Signed-off-by: qidi1 <1083369179@qq.com> * server:address comment and add unit test Signed-off-by: qidi1 <1083369179@qq.com> Co-authored-by: IcePigZDB * server:data race Signed-off-by: qidi1 <1083369179@qq.com> * core:add integration test for config change Signed-off-by: qidi1 <1083369179@qq.com> * core:remove config change unit test Signed-off-by: qidi1 <1083369179@qq.com> * core:add getCurReservedDays and getCurInterval Signed-off-by: qidi1 <1083369179@qq.com> Co-authored-by: IcePigZDB Co-authored-by: ShuNing Co-authored-by: Ti Chi Robot --- server/config/config.go | 9 +- server/config/config_test.go | 4 +- server/config/persist_options.go | 10 ++ server/core/hot_region_storage.go | 84 ++++++++-- server/core/hot_region_storage_test.go | 54 ++++-- server/handler.go | 10 ++ server/server.go | 4 +- tests/server/core/hot_region_storage_test.go | 168 ++++++++++++++++++- 8 files changed, 299 insertions(+), 44 deletions(-) diff --git a/server/config/config.go b/server/config/config.go index 0ce34cc42e32..2ebbfd140e4b 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -739,7 +739,7 @@ type ScheduleConfig struct { HotRegionsWriteInterval typeutil.Duration `toml:"hot-regions-write-interval" json:"hot-regions-write-interval"` // The day of hot regions data to be reserved. 0 means close. - HotRegionsReservedDays int64 `toml:"hot-regions-reserved-days" json:"hot-regions-reserved-days"` + HotRegionsReservedDays uint64 `toml:"hot-regions-reserved-days" json:"hot-regions-reserved-days"` } // Clone returns a cloned scheduling configuration. @@ -805,6 +805,7 @@ func (c *ScheduleConfig) adjust(meta *configMetaData, reloading bool) error { adjustDuration(&c.SplitMergeInterval, defaultSplitMergeInterval) adjustDuration(&c.PatrolRegionInterval, defaultPatrolRegionInterval) adjustDuration(&c.MaxStoreDownTime, defaultMaxStoreDownTime) + adjustDuration(&c.HotRegionsWriteInterval, defaultHotRegionsWriteInterval) if !meta.IsDefined("leader-schedule-limit") { adjustUint64(&c.LeaderScheduleLimit, defaultLeaderScheduleLimit) } @@ -868,12 +869,8 @@ func (c *ScheduleConfig) adjust(meta *configMetaData, reloading bool) error { c.StoreLimit = make(map[uint64]StoreLimitConfig) } - if !meta.IsDefined("hot-regions-write-interval") { - adjustDuration(&c.HotRegionsWriteInterval, defaultHotRegionsWriteInterval) - } - if !meta.IsDefined("hot-regions-reserved-days") { - adjustInt64(&c.HotRegionsReservedDays, defaultHotRegionsReservedDays) + adjustUint64(&c.HotRegionsReservedDays, defaultHotRegionsReservedDays) } return c.Validate() diff --git a/server/config/config_test.go b/server/config/config_test.go index cb3c29eb3f39..1b459abd6f1e 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -477,13 +477,13 @@ hot-regions-write-interval= "30m" err = cfg.Adjust(&meta, false) c.Assert(err, IsNil) c.Assert(cfg.Schedule.HotRegionsWriteInterval.Duration, Equals, 30*time.Minute) - c.Assert(cfg.Schedule.HotRegionsReservedDays, Equals, int64(30)) + c.Assert(cfg.Schedule.HotRegionsReservedDays, Equals, uint64(30)) // Verify default value cfg = NewConfig() err = cfg.Adjust(nil, false) c.Assert(err, IsNil) c.Assert(cfg.Schedule.HotRegionsWriteInterval.Duration, Equals, 10*time.Minute) - c.Assert(cfg.Schedule.HotRegionsReservedDays, Equals, int64(7)) + c.Assert(cfg.Schedule.HotRegionsReservedDays, Equals, uint64(7)) } func (s *testConfigSuite) TestConfigClone(c *C) { diff --git a/server/config/persist_options.go b/server/config/persist_options.go index 6ca1e0c7f9cd..0b1412f71092 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -526,6 +526,16 @@ func (o *PersistOptions) GetSchedulers() SchedulerConfigs { return o.GetScheduleConfig().Schedulers } +// GetHotRegionsWriteInterval gets interval for PD to store Hot Region information. +func (o *PersistOptions) GetHotRegionsWriteInterval() time.Duration { + return o.GetScheduleConfig().HotRegionsWriteInterval.Duration +} + +// GetHotRegionsReservedDays gets days hot region information is kept. +func (o *PersistOptions) GetHotRegionsReservedDays() uint64 { + return o.GetScheduleConfig().HotRegionsReservedDays +} + // AddSchedulerCfg adds the scheduler configurations. func (o *PersistOptions) AddSchedulerCfg(tp string, args []string) { v := o.GetScheduleConfig().Clone() diff --git a/server/core/hot_region_storage.go b/server/core/hot_region_storage.go index 4800275eb283..e4f5526bd2db 100644 --- a/server/core/hot_region_storage.go +++ b/server/core/hot_region_storage.go @@ -34,6 +34,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/server/encryptionkm" "github.com/tikv/pd/server/kv" + "go.uber.org/zap" ) // HotRegionStorage is used to storage hot region info, @@ -43,14 +44,15 @@ import ( type HotRegionStorage struct { *kv.LeveldbKV encryptionKeyManager *encryptionkm.KeyManager - mu sync.RWMutex hotRegionLoopWg sync.WaitGroup batchHotInfo map[string]*HistoryHotRegion - remianedDays int64 - pullInterval time.Duration hotRegionInfoCtx context.Context hotRegionInfoCancel context.CancelFunc hotRegionStorageHandler HotRegionStorageHandler + + curReservedDays uint64 + curInterval time.Duration + mu sync.RWMutex } // HistoryHotRegions wraps historyHotRegion @@ -90,6 +92,10 @@ type HotRegionStorageHandler interface { PackHistoryHotWriteRegions() ([]HistoryHotRegion, error) // IsLeader return true means this server is leader. IsLeader() bool + // GetHotRegionWriteInterval gets interval for PD to store Hot Region information.. + GetHotRegionsWriteInterval() time.Duration + // GetHotRegionsReservedDays gets days hot region information is kept. + GetHotRegionsReservedDays() uint64 } const ( @@ -129,8 +135,6 @@ func NewHotRegionsStorage( path string, encryptionKeyManager *encryptionkm.KeyManager, hotRegionStorageHandler HotRegionStorageHandler, - remianedDays int64, - pullInterval time.Duration, ) (*HotRegionStorage, error) { levelDB, err := kv.NewLeveldbKV(path) if err != nil { @@ -141,17 +145,15 @@ func NewHotRegionsStorage( LeveldbKV: levelDB, encryptionKeyManager: encryptionKeyManager, batchHotInfo: make(map[string]*HistoryHotRegion), - remianedDays: remianedDays, - pullInterval: pullInterval, hotRegionInfoCtx: hotRegionInfoCtx, hotRegionInfoCancel: hotRegionInfoCancle, hotRegionStorageHandler: hotRegionStorageHandler, + curReservedDays: hotRegionStorageHandler.GetHotRegionsReservedDays(), + curInterval: hotRegionStorageHandler.GetHotRegionsWriteInterval(), } - if remianedDays > 0 { - h.hotRegionLoopWg.Add(2) - go h.backgroundFlush() - go h.backgroundDelete() - } + h.hotRegionLoopWg.Add(2) + go h.backgroundFlush() + go h.backgroundDelete() return &h, nil } @@ -173,11 +175,18 @@ func (h *HotRegionStorage) backgroundDelete() { for { select { case <-ticker.C: + h.updateReservedDays() + curReservedDays := h.getCurReservedDays() if isFirst { ticker.Reset(24 * time.Hour) isFirst = false } - h.delete() + if curReservedDays == 0 { + log.Warn(`hot region reserved days is 0, if previous reserved days is non 0, + there may be residual hot regions, you can remove it manually, [pd-dir]/data/hot-region.`) + continue + } + h.delete(int(curReservedDays)) case <-h.hotRegionInfoCtx.Done(): return } @@ -186,7 +195,8 @@ func (h *HotRegionStorage) backgroundDelete() { // Write hot_region info into db in the background. func (h *HotRegionStorage) backgroundFlush() { - ticker := time.NewTicker(h.pullInterval) + interval := h.getCurInterval() + ticker := time.NewTicker(interval) defer func() { ticker.Stop() h.hotRegionLoopWg.Done() @@ -194,6 +204,12 @@ func (h *HotRegionStorage) backgroundFlush() { for { select { case <-ticker.C: + h.updateInterval() + h.updateReservedDays() + ticker.Reset(h.getCurInterval()) + if h.getCurReservedDays() == 0 { + continue + } if h.hotRegionStorageHandler.IsLeader() { if err := h.pullHotRegionInfo(); err != nil { log.Error("get hot_region stat meet error", errs.ZapError(err)) @@ -270,6 +286,42 @@ func (h *HotRegionStorage) packHistoryHotRegions(historyHotRegions []HistoryHotR return nil } +func (h *HotRegionStorage) updateInterval() { + h.mu.Lock() + defer h.mu.Unlock() + interval := h.hotRegionStorageHandler.GetHotRegionsWriteInterval() + if interval != h.curInterval { + log.Info("hot region write interval changed", + zap.Duration("previous-interval", h.curInterval), + zap.Duration("new-interval", interval)) + h.curInterval = interval + } +} + +func (h *HotRegionStorage) getCurInterval() time.Duration { + h.mu.RLock() + defer h.mu.RUnlock() + return h.curInterval +} + +func (h *HotRegionStorage) updateReservedDays() { + h.mu.Lock() + defer h.mu.Unlock() + reservedDays := h.hotRegionStorageHandler.GetHotRegionsReservedDays() + if reservedDays != h.curReservedDays { + log.Info("hot region reserved days changed", + zap.Uint64("previous-reserved-days", h.curReservedDays), + zap.Uint64("new-reserved-days", reservedDays)) + h.curReservedDays = reservedDays + } +} + +func (h *HotRegionStorage) getCurReservedDays() uint64 { + h.mu.RLock() + defer h.mu.RUnlock() + return h.curReservedDays +} + func (h *HotRegionStorage) flush() error { h.mu.Lock() defer h.mu.Unlock() @@ -288,14 +340,14 @@ func (h *HotRegionStorage) flush() error { return nil } -func (h *HotRegionStorage) delete() error { +func (h *HotRegionStorage) delete(reservedDays int) error { h.mu.Lock() defer h.mu.Unlock() db := h.LeveldbKV batch := new(leveldb.Batch) for _, hotRegionType := range HotRegionTypes { startKey := HotRegionStorePath(hotRegionType, 0, 0) - endTime := time.Now().AddDate(0, 0, 0-int(h.remianedDays)).UnixNano() / int64(time.Millisecond) + endTime := time.Now().AddDate(0, 0, 0-reservedDays).UnixNano() / int64(time.Millisecond) endKey := HotRegionStorePath(hotRegionType, endTime, math.MaxInt64) iter := db.NewIterator(&util.Range{ Start: []byte(startKey), Limit: []byte(endKey)}, nil) diff --git a/server/core/hot_region_storage_test.go b/server/core/hot_region_storage_test.go index 62859c478815..4775189f0482 100644 --- a/server/core/hot_region_storage_test.go +++ b/server/core/hot_region_storage_test.go @@ -32,16 +32,22 @@ type MockPackHotRegionInfo struct { isLeader bool historyHotReads []HistoryHotRegion historyHotWrites []HistoryHotRegion + reservedDays uint64 + pullInterval time.Duration } // PackHistoryHotWriteRegions get read hot region info in HistoryHotRegion from. func (m *MockPackHotRegionInfo) PackHistoryHotReadRegions() ([]HistoryHotRegion, error) { - return m.historyHotReads, nil + result := make([]HistoryHotRegion, len(m.historyHotReads)) + copy(result, m.historyHotReads) + return result, nil } // PackHistoryHotWriteRegions get write hot region info in HistoryHotRegion form. func (m *MockPackHotRegionInfo) PackHistoryHotWriteRegions() ([]HistoryHotRegion, error) { - return m.historyHotWrites, nil + result := make([]HistoryHotRegion, len(m.historyHotWrites)) + copy(result, m.historyHotWrites) + return result, nil } // IsLeader return isLeader. @@ -75,13 +81,29 @@ func (m *MockPackHotRegionInfo) GenHistoryHotRegions(num int, updateTime time.Ti } } +func (m *MockPackHotRegionInfo) GetHotRegionsReservedDays() uint64 { + return m.reservedDays +} + +func (m *MockPackHotRegionInfo) SetHotRegionsReservedDays(reservedDays uint64) { + m.reservedDays = reservedDays +} + +func (m *MockPackHotRegionInfo) GetHotRegionsWriteInterval() time.Duration { + return m.pullInterval +} + +func (m *MockPackHotRegionInfo) SetHotRegionsWriteInterval(interval time.Duration) { + m.pullInterval = interval +} + // ClearHotRegion delete all region cached. func (m *MockPackHotRegionInfo) ClearHotRegion() { m.historyHotReads = make([]HistoryHotRegion, 0) m.historyHotWrites = make([]HistoryHotRegion, 0) } -var _ = Suite(&testHotRegionStorage{}) +var _ = SerialSuites(&testHotRegionStorage{}) type testHotRegionStorage struct { ctx context.Context @@ -157,11 +179,11 @@ func (t *testHotRegionStorage) TestHotRegionWrite(c *C) { } func (t *testHotRegionStorage) TestHotRegionDelete(c *C) { - defaultReaminDay := 7 + defaultRemainDay := 7 defaultDelteData := 30 deleteDate := time.Now().AddDate(0, 0, 0) packHotRegionInfo := &MockPackHotRegionInfo{} - store, clean, err := newTestHotRegionStorage(10*time.Minute, int64(defaultReaminDay), packHotRegionInfo) + store, clean, err := newTestHotRegionStorage(10*time.Minute, uint64(defaultRemainDay), packHotRegionInfo) c.Assert(err, IsNil) defer clean() historyHotRegions := make([]HistoryHotRegion, 0) @@ -177,14 +199,14 @@ func (t *testHotRegionStorage) TestHotRegionDelete(c *C) { packHotRegionInfo.historyHotReads = historyHotRegions store.pullHotRegionInfo() store.flush() - store.delete() + store.delete(defaultRemainDay) iter := store.NewIterator(HotRegionTypes, deleteDate.UnixNano()/int64(time.Millisecond), time.Now().UnixNano()/int64(time.Millisecond)) num := 0 for next, err := iter.Next(); next != nil && err == nil; next, err = iter.Next() { num++ - c.Assert(reflect.DeepEqual(next, &historyHotRegions[defaultReaminDay-num]), IsTrue) + c.Assert(reflect.DeepEqual(next, &historyHotRegions[defaultRemainDay-num]), IsTrue) } } @@ -202,10 +224,10 @@ func BenchmarkInsert(b *testing.B) { b.StopTimer() } -func BenchmarkInsertAfterMonth(b *testing.B) { +func BenchmarkInsertAfterManyDays(b *testing.B) { defaultInsertDay := 30 packHotRegionInfo := &MockPackHotRegionInfo{} - regionStorage, clear, err := newTestHotRegionStorage(10*time.Hour, int64(defaultInsertDay), packHotRegionInfo) + regionStorage, clear, err := newTestHotRegionStorage(10*time.Hour, uint64(defaultInsertDay), packHotRegionInfo) defer clear() if err != nil { b.Fatal(err) @@ -220,9 +242,9 @@ func BenchmarkInsertAfterMonth(b *testing.B) { func BenchmarkDelete(b *testing.B) { defaultInsertDay := 7 - defaultReaminDay := 7 + defaultRemainDay := 7 packHotRegionInfo := &MockPackHotRegionInfo{} - regionStorage, clear, err := newTestHotRegionStorage(10*time.Hour, int64(defaultReaminDay), packHotRegionInfo) + regionStorage, clear, err := newTestHotRegionStorage(10*time.Hour, uint64(defaultRemainDay), packHotRegionInfo) defer clear() if err != nil { b.Fatal(err) @@ -230,7 +252,7 @@ func BenchmarkDelete(b *testing.B) { deleteTime := time.Now().AddDate(0, 0, -14) newTestHotRegions(regionStorage, packHotRegionInfo, 144*defaultInsertDay, 1000, deleteTime) b.ResetTimer() - regionStorage.delete() + regionStorage.delete(defaultRemainDay) b.StopTimer() } @@ -269,8 +291,8 @@ func newTestHotRegions(storage *HotRegionStorage, mock *MockPackHotRegionInfo, c } func newTestHotRegionStorage(pullInterval time.Duration, - remianedDays int64, - packHotRegionInfo HotRegionStorageHandler) ( + reservedDays uint64, + packHotRegionInfo *MockPackHotRegionInfo) ( hotRegionStorage *HotRegionStorage, clear func(), err error) { writePath := "./tmp" @@ -278,9 +300,11 @@ func newTestHotRegionStorage(pullInterval time.Duration, if err != nil { return nil, nil, err } + packHotRegionInfo.pullInterval = pullInterval + packHotRegionInfo.reservedDays = reservedDays // delete data in between today and tomrrow hotRegionStorage, err = NewHotRegionsStorage(ctx, - writePath, nil, packHotRegionInfo, remianedDays, pullInterval) + writePath, nil, packHotRegionInfo) if err != nil { return nil, nil, err } diff --git a/server/handler.go b/server/handler.go index 4b50e4b4b5c7..5995accebc7a 100644 --- a/server/handler.go +++ b/server/handler.go @@ -191,6 +191,16 @@ func (h *Handler) GetHotReadRegions() *statistics.StoreHotPeersInfos { return c.GetHotReadRegions() } +// GetHotRegionsWriteInterval gets interval for PD to store Hot Region information.. +func (h *Handler) GetHotRegionsWriteInterval() time.Duration { + return h.opt.GetHotRegionsWriteInterval() +} + +// GetHotRegionsReservedDays gets days hot region information is kept. +func (h *Handler) GetHotRegionsReservedDays() uint64 { + return h.opt.GetHotRegionsReservedDays() +} + // GetStoresLoads gets all hot write stores stats. func (h *Handler) GetStoresLoads() map[uint64][]float64 { rc := h.s.GetRaftCluster() diff --git a/server/server.go b/server/server.go index b4aec089f278..14ac766786aa 100644 --- a/server/server.go +++ b/server/server.go @@ -400,9 +400,7 @@ func (s *Server) startServer(ctx context.Context) error { // initial hot_region_storage in here. hotRegionPath := filepath.Join(s.cfg.DataDir, "hot-region") s.hotRegionStorage, err = core.NewHotRegionsStorage( - ctx, hotRegionPath, encryptionKeyManager, s.handler, - s.cfg.Schedule.HotRegionsReservedDays, - s.cfg.Schedule.HotRegionsWriteInterval.Duration) + ctx, hotRegionPath, encryptionKeyManager, s.handler) if err != nil { return err } diff --git a/tests/server/core/hot_region_storage_test.go b/tests/server/core/hot_region_storage_test.go index 717363180177..cf7ae32168ad 100644 --- a/tests/server/core/hot_region_storage_test.go +++ b/tests/server/core/hot_region_storage_test.go @@ -81,7 +81,7 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) pdctl.MustPutRegion(c, cluster, 3, 1, []byte("e"), []byte("f")) pdctl.MustPutRegion(c, cluster, 4, 2, []byte("g"), []byte("h")) - storeStatss := []*pdpb.StoreStats{ + storeStats := []*pdpb.StoreStats{ { StoreId: 1, Interval: &pdpb.TimeInterval{StartTimestamp: 0, EndTimestamp: statistics.ReadReportInterval}, @@ -103,7 +103,7 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { }, }, } - for _, storeStats := range storeStatss { + for _, storeStats := range storeStats { leaderServer.GetRaftCluster().HandleStoreHeartbeat(storeStats) } // wait hot scheduler starts @@ -143,3 +143,167 @@ func (s *hotRegionHistorySuite) TestHotRegionStorage(c *C) { c.Assert(next, IsNil) c.Assert(err, IsNil) } + +func (s *hotRegionHistorySuite) TestHotRegionStorageReservedDayConfigChange(c *C) { + statistics.Denoising = false + ctx, cancel := context.WithCancel(context.Background()) + interval := 100 * time.Millisecond + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1, + func(cfg *config.Config, serverName string) { + cfg.Schedule.HotRegionCacheHitsThreshold = 0 + cfg.Schedule.HotRegionsWriteInterval.Duration = interval + cfg.Schedule.HotRegionsReservedDays = 1 + }, + ) + c.Assert(err, IsNil) + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + stores := []*metapb.Store{ + { + Id: 1, + State: metapb.StoreState_Up, + LastHeartbeat: time.Now().UnixNano(), + }, + { + Id: 2, + State: metapb.StoreState_Up, + LastHeartbeat: time.Now().UnixNano(), + }, + } + + leaderServer := cluster.GetServer(cluster.GetLeader()) + c.Assert(leaderServer.BootstrapCluster(), IsNil) + for _, store := range stores { + pdctl.MustPutStore(c, leaderServer.GetServer(), store) + } + defer cluster.Destroy() + startTime := time.Now().UnixNano() / int64(time.Millisecond) + pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + // wait hot scheduler starts + time.Sleep(5000 * time.Millisecond) + endTime := time.Now().UnixNano() / int64(time.Millisecond) + storage := leaderServer.GetServer().GetHistoryHotRegionStorage() + iter := storage.NewIterator([]string{core.WriteType.String()}, startTime, endTime) + next, err := iter.Next() + c.Assert(next, NotNil) + c.Assert(err, IsNil) + c.Assert(next.RegionID, Equals, uint64(1)) + c.Assert(next.StoreID, Equals, uint64(1)) + c.Assert(next.HotRegionType, Equals, core.WriteType.String()) + next, err = iter.Next() + c.Assert(err, IsNil) + c.Assert(next, IsNil) + schedule := leaderServer.GetConfig().Schedule + // set reserved day to zero,close hot region storage + schedule.HotRegionsReservedDays = 0 + leaderServer.GetServer().SetScheduleConfig(schedule) + time.Sleep(3 * interval) + pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + time.Sleep(10 * interval) + endTime = time.Now().UnixNano() / int64(time.Millisecond) + storage = leaderServer.GetServer().GetHistoryHotRegionStorage() + iter = storage.NewIterator([]string{core.WriteType.String()}, startTime, endTime) + next, err = iter.Next() + c.Assert(next, NotNil) + c.Assert(err, IsNil) + c.Assert(next.RegionID, Equals, uint64(1)) + c.Assert(next.StoreID, Equals, uint64(1)) + c.Assert(next.HotRegionType, Equals, core.WriteType.String()) + next, err = iter.Next() + c.Assert(err, IsNil) + c.Assert(next, IsNil) + // set reserved day to one,open hot region storage + schedule.HotRegionsReservedDays = 1 + leaderServer.GetServer().SetScheduleConfig(schedule) + time.Sleep(3 * interval) + endTime = time.Now().UnixNano() / int64(time.Millisecond) + storage = leaderServer.GetServer().GetHistoryHotRegionStorage() + iter = storage.NewIterator([]string{core.WriteType.String()}, startTime, endTime) + next, err = iter.Next() + c.Assert(next, NotNil) + c.Assert(err, IsNil) + c.Assert(next.RegionID, Equals, uint64(1)) + c.Assert(next.StoreID, Equals, uint64(1)) + c.Assert(next.HotRegionType, Equals, core.WriteType.String()) + next, err = iter.Next() + c.Assert(next, NotNil) + c.Assert(err, IsNil) + c.Assert(next.RegionID, Equals, uint64(2)) + c.Assert(next.StoreID, Equals, uint64(2)) + c.Assert(next.HotRegionType, Equals, core.WriteType.String()) +} + +func (s *hotRegionHistorySuite) TestHotRegionStorageWriteIntervalConfigChange(c *C) { + statistics.Denoising = false + ctx, cancel := context.WithCancel(context.Background()) + interval := 100 * time.Millisecond + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1, + func(cfg *config.Config, serverName string) { + cfg.Schedule.HotRegionCacheHitsThreshold = 0 + cfg.Schedule.HotRegionsWriteInterval.Duration = interval + cfg.Schedule.HotRegionsReservedDays = 1 + }, + ) + c.Assert(err, IsNil) + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + stores := []*metapb.Store{ + { + Id: 1, + State: metapb.StoreState_Up, + LastHeartbeat: time.Now().UnixNano(), + }, + { + Id: 2, + State: metapb.StoreState_Up, + LastHeartbeat: time.Now().UnixNano(), + }, + } + + leaderServer := cluster.GetServer(cluster.GetLeader()) + c.Assert(leaderServer.BootstrapCluster(), IsNil) + for _, store := range stores { + pdctl.MustPutStore(c, leaderServer.GetServer(), store) + } + defer cluster.Destroy() + startTime := time.Now().UnixNano() / int64(time.Millisecond) + pdctl.MustPutRegion(c, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(statistics.WriteReportInterval)) + // wait hot scheduler starts + time.Sleep(5000 * time.Millisecond) + endTime := time.Now().UnixNano() / int64(time.Millisecond) + storage := leaderServer.GetServer().GetHistoryHotRegionStorage() + iter := storage.NewIterator([]string{core.WriteType.String()}, startTime, endTime) + next, err := iter.Next() + c.Assert(next, NotNil) + c.Assert(err, IsNil) + c.Assert(next.RegionID, Equals, uint64(1)) + c.Assert(next.StoreID, Equals, uint64(1)) + c.Assert(next.HotRegionType, Equals, core.WriteType.String()) + next, err = iter.Next() + c.Assert(err, IsNil) + c.Assert(next, IsNil) + schedule := leaderServer.GetConfig().Schedule + // set the time to 20 times the interval + schedule.HotRegionsWriteInterval.Duration = 20 * interval + leaderServer.GetServer().SetScheduleConfig(schedule) + time.Sleep(3 * interval) + pdctl.MustPutRegion(c, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(statistics.WriteReportInterval)) + time.Sleep(10 * interval) + endTime = time.Now().UnixNano() / int64(time.Millisecond) + // it cant get new hot region because wait time smaller than hot region write interval + storage = leaderServer.GetServer().GetHistoryHotRegionStorage() + iter = storage.NewIterator([]string{core.WriteType.String()}, startTime, endTime) + next, err = iter.Next() + c.Assert(next, NotNil) + c.Assert(err, IsNil) + c.Assert(next.RegionID, Equals, uint64(1)) + c.Assert(next.StoreID, Equals, uint64(1)) + c.Assert(next.HotRegionType, Equals, core.WriteType.String()) + next, err = iter.Next() + c.Assert(err, IsNil) + c.Assert(next, IsNil) +}