diff --git a/executor/builder.go b/executor/builder.go index 94aa7fd37fdf4..d83c4302742fa 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -553,6 +553,7 @@ func (b *executorBuilder) buildShow(v *plannercore.Show) Executor { DBName: model.NewCIStr(v.DBName), Table: v.Table, Column: v.Column, + IndexName: v.IndexName, User: v.User, Roles: v.Roles, IfNotExists: v.IfNotExists, diff --git a/executor/executor_test.go b/executor/executor_test.go index 14def89c0d914..cc3f17253a4e9 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -3921,6 +3921,122 @@ func (s *testSuite) TestSplitRegion(c *C) { tk.MustExec(`split table t by (0),(1000),(1000000)`) } +func (s *testSuite) TestShowTableRegion(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t_regions1, t_regions") + tk.MustExec("create table t_regions1 (a int key, b int, index idx(b))") + tk.MustExec("create table t_regions (a int key, b int, index idx(b))") + + // Test show table regions. + tk.MustExec(`split table t_regions1 by (0)`) + tk.MustExec(`split table t_regions between (-10000) and (10000) regions 4;`) + re := tk.MustQuery("show table t_regions regions") + rows := re.Rows() + // Table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 4) + c.Assert(len(rows[0]), Equals, 7) + tbl1 := testGetTableByName(c, tk.Se, "test", "t_regions1") + tbl := testGetTableByName(c, tk.Se, "test", "t_regions") + // Check the region start key. + c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", tbl1.Meta().ID)) + c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID)) + c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID)) + c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID)) + + // Test show table index regions. + tk.MustExec(`split table t_regions index idx between (-1000) and (1000) regions 4;`) + re = tk.MustQuery("show table t_regions index idx regions") + rows = re.Rows() + // The index `idx` of table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 4) + // Check the region start key. + c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID)) + c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + + re = tk.MustQuery("show table t_regions regions") + rows = re.Rows() + // The index `idx` of table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 7) + // Check the region start key. + c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID)) + c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID)) + c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID)) + c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + + // Test unsigned primary key and wait scatter finish. + tk.MustExec("drop table if exists t_regions") + tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))") + + // Test show table regions. + tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`) + tk.MustExec(`split table t_regions between (0) and (10000) regions 4;`) + re = tk.MustQuery("show table t_regions regions") + rows = re.Rows() + // Table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 4) + tbl = testGetTableByName(c, tk.Se, "test", "t_regions") + // Check the region start key. + c.Assert(rows[0][1], Matches, "t_.*") + c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID)) + c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID)) + c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID)) + + // Test show table index regions. + tk.MustExec(`split table t_regions index idx between (0) and (1000) regions 4;`) + re = tk.MustQuery("show table t_regions index idx regions") + rows = re.Rows() + // The index `idx` of table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 4) + // Check the region start key. + c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID)) + c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID)) + + // Test show table regions for partition table when disable split region when create table. + atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) + tk.MustExec("drop table if exists partition_t;") + tk.MustExec("set @@session.tidb_enable_table_partition = '1';") + tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") + re = tk.MustQuery("show table partition_t regions") + rows = re.Rows() + // Table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 1) + c.Assert(rows[0][1], Matches, "t_.*") + + // Test show table regions for partition table when enable split region when create table. + atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) + tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("drop table if exists partition_t;") + tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") + re = tk.MustQuery("show table partition_t regions") + rows = re.Rows() + // Table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 3) + tbl = testGetTableByName(c, tk.Se, "test", "partition_t") + partitionDef := tbl.Meta().GetPartitionInfo().Definitions + c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID)) + c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID)) + c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID)) + atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) +} + +func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table { + dom := domain.GetDomain(ctx) + // Make sure the table schema is the new schema. + err := dom.Reload() + c.Assert(err, IsNil) + tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table)) + c.Assert(err, IsNil) + return tbl +} + func (s *testSuite) TestIssue10435(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/executor/show.go b/executor/show.go index 0380d1e0e4f9a..9fffaf16bf44e 100644 --- a/executor/show.go +++ b/executor/show.go @@ -37,11 +37,13 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" @@ -60,6 +62,7 @@ type ShowExec struct { DBName model.CIStr Table *ast.TableName // Used for showing columns. Column *ast.ColumnName // Used for `desc table column`. + IndexName model.CIStr // Used for show table regions. Flag int // Some flag parsed from sql, such as FULL. Full bool User *auth.UserIdentity // Used for show grants. @@ -178,6 +181,8 @@ func (e *ShowExec) fetchAll() error { case ast.ShowAnalyzeStatus: e.fetchShowAnalyzeStatus() return nil + case ast.ShowRegions: + return e.fetchShowTableRegions() } return nil } @@ -1166,3 +1171,108 @@ func (e *ShowExec) appendRow(row []interface{}) { } } } + +func (e *ShowExec) fetchShowTableRegions() error { + store := e.ctx.GetStore() + tikvStore, ok := store.(tikv.Storage) + if !ok { + return nil + } + splitStore, ok := store.(kv.SplitableStore) + if !ok { + return nil + } + + tb, err := e.getTable() + if err != nil { + return errors.Trace(err) + } + + // Get table regions from from pd, not from regionCache, because the region cache maybe outdated. + var regions []regionMeta + if len(e.IndexName.L) != 0 { + indexInfo := tb.Meta().FindIndexByName(e.IndexName.L) + if indexInfo == nil { + return plannercore.ErrKeyDoesNotExist.GenWithStackByArgs(e.IndexName, tb.Meta().Name) + } + regions, err = getTableIndexRegions(tb, indexInfo, tikvStore, splitStore) + } else { + regions, err = getTableRegions(tb, tikvStore, splitStore) + } + + if err != nil { + return err + } + e.fillRegionsToChunk(regions) + return nil +} + +func getTableRegions(tb table.Table, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { + if info := tb.Meta().GetPartitionInfo(); info != nil { + return getPartitionTableRegions(info, tb.(table.PartitionedTable), tikvStore, splitStore) + } + return getPhysicalTableRegions(tb.Meta().ID, tb.Meta(), tikvStore, splitStore, nil) +} + +func getTableIndexRegions(tb table.Table, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { + if info := tb.Meta().GetPartitionInfo(); info != nil { + return getPartitionIndexRegions(info, tb.(table.PartitionedTable), indexInfo, tikvStore, splitStore) + } + return getPhysicalIndexRegions(tb.Meta().ID, indexInfo, tikvStore, splitStore, nil) +} + +func getPartitionTableRegions(info *model.PartitionInfo, tbl table.PartitionedTable, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { + regions := make([]regionMeta, 0, len(info.Definitions)) + uniqueRegionMap := make(map[uint64]struct{}) + for _, def := range info.Definitions { + pid := def.ID + partition := tbl.GetPartition(pid) + partition.GetPhysicalID() + partitionRegions, err := getPhysicalTableRegions(partition.GetPhysicalID(), tbl.Meta(), tikvStore, splitStore, uniqueRegionMap) + if err != nil { + return nil, err + } + regions = append(regions, partitionRegions...) + } + return regions, nil +} + +func getPartitionIndexRegions(info *model.PartitionInfo, tbl table.PartitionedTable, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { + var regions []regionMeta + uniqueRegionMap := make(map[uint64]struct{}) + for _, def := range info.Definitions { + pid := def.ID + partition := tbl.GetPartition(pid) + partition.GetPhysicalID() + partitionRegions, err := getPhysicalIndexRegions(partition.GetPhysicalID(), indexInfo, tikvStore, splitStore, uniqueRegionMap) + if err != nil { + return nil, err + } + regions = append(regions, partitionRegions...) + } + return regions, nil +} + +func (e *ShowExec) fillRegionsToChunk(regions []regionMeta) { + for i := range regions { + e.result.AppendUint64(0, regions[i].region.Id) + e.result.AppendString(1, regions[i].start) + e.result.AppendString(2, regions[i].end) + e.result.AppendUint64(3, regions[i].leaderID) + e.result.AppendUint64(4, regions[i].storeID) + + peers := "" + for i, peer := range regions[i].region.Peers { + if i > 0 { + peers += ", " + } + peers += strconv.FormatUint(peer.Id, 10) + } + e.result.AppendString(5, peers) + if regions[i].scattering { + e.result.AppendInt64(6, 1) + } else { + e.result.AppendInt64(6, 0) + } + } +} diff --git a/executor/split.go b/executor/split.go index 0ac355bae4308..afb2459d8a29a 100644 --- a/executor/split.go +++ b/executor/split.go @@ -17,19 +17,23 @@ import ( "bytes" "context" "encoding/binary" + "fmt" "math" "time" "github.com/cznic/mathutil" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) @@ -347,3 +351,165 @@ func (e *SplitTableRegionExec) getSplitTableKeys() ([][]byte, error) { } return keys, nil } + +// RegionMeta contains a region's peer detail +type regionMeta struct { + region *metapb.Region + leaderID uint64 + storeID uint64 // storeID is the store ID of the leader region. + start string + end string + scattering bool +} + +func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore tikv.Storage, s kv.SplitableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { + if uniqueRegionMap == nil { + uniqueRegionMap = make(map[uint64]struct{}) + } + // for record + startKey, endKey := tablecodec.GetTableHandleKeyRange(physicalTableID) + regionCache := tikvStore.GetRegionCache() + recordRegionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackoffer(context.Background(), 20000), startKey, endKey) + if err != nil { + return nil, err + } + recordPrefix := tablecodec.GenTableRecordPrefix(physicalTableID) + tablePrefix := tablecodec.GenTablePrefix(physicalTableID) + recordRegions, err := getRegionMeta(recordRegionMetas, uniqueRegionMap, tablePrefix, recordPrefix, nil, physicalTableID, 0) + if err != nil { + return nil, err + } + + regions := recordRegions + // for indices + for _, index := range tableInfo.Indices { + if index.State != model.StatePublic { + continue + } + startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, index.ID) + regionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackoffer(context.Background(), 20000), startKey, endKey) + if err != nil { + return nil, err + } + indexPrefix := tablecodec.EncodeTableIndexPrefix(physicalTableID, index.ID) + indexRegions, err := getRegionMeta(regionMetas, uniqueRegionMap, tablePrefix, recordPrefix, indexPrefix, physicalTableID, index.ID) + if err != nil { + return nil, err + } + regions = append(regions, indexRegions...) + } + err = checkRegionsStatus(s, regions) + if err != nil { + return nil, err + } + return regions, nil +} + +func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, tikvStore tikv.Storage, s kv.SplitableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { + if uniqueRegionMap == nil { + uniqueRegionMap = make(map[uint64]struct{}) + } + + startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, indexInfo.ID) + regionCache := tikvStore.GetRegionCache() + regions, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackoffer(context.Background(), 20000), startKey, endKey) + if err != nil { + return nil, err + } + recordPrefix := tablecodec.GenTableRecordPrefix(physicalTableID) + tablePrefix := tablecodec.GenTablePrefix(physicalTableID) + indexPrefix := tablecodec.EncodeTableIndexPrefix(physicalTableID, indexInfo.ID) + indexRegions, err := getRegionMeta(regions, uniqueRegionMap, tablePrefix, recordPrefix, indexPrefix, physicalTableID, indexInfo.ID) + if err != nil { + return nil, err + } + err = checkRegionsStatus(s, indexRegions) + if err != nil { + return nil, err + } + return indexRegions, nil +} + +func checkRegionsStatus(store kv.SplitableStore, regions []regionMeta) error { + for i := range regions { + scattering, err := store.CheckRegionInScattering(regions[i].region.Id) + if err != nil { + return err + } + regions[i].scattering = scattering + } + return nil +} + +func decodeRegionsKey(regions []regionMeta, tablePrefix, recordPrefix, indexPrefix []byte, physicalTableID, indexID int64) { + d := ®ionKeyDecoder{ + physicalTableID: physicalTableID, + tablePrefix: tablePrefix, + recordPrefix: recordPrefix, + indexPrefix: indexPrefix, + indexID: indexID, + } + for i := range regions { + regions[i].start = d.decodeRegionKey(regions[i].region.StartKey) + regions[i].end = d.decodeRegionKey(regions[i].region.EndKey) + } +} + +type regionKeyDecoder struct { + physicalTableID int64 + tablePrefix []byte + recordPrefix []byte + indexPrefix []byte + indexID int64 +} + +func (d *regionKeyDecoder) decodeRegionKey(key []byte) string { + if len(d.indexPrefix) > 0 && bytes.HasPrefix(key, d.indexPrefix) { + return fmt.Sprintf("t_%d_i_%d_%x", d.physicalTableID, d.indexID, key[len(d.indexPrefix):]) + } else if len(d.recordPrefix) > 0 && bytes.HasPrefix(key, d.recordPrefix) { + _, handle, err := codec.DecodeInt(key[len(d.recordPrefix):]) + if err == nil { + return fmt.Sprintf("t_%d_r_%d", d.physicalTableID, handle) + } + } + if len(d.tablePrefix) > 0 && bytes.HasPrefix(key, d.tablePrefix) { + key = key[len(d.tablePrefix):] + // Has index prefix. + if !bytes.HasPrefix(key, []byte("_i")) { + return fmt.Sprintf("t_%d_%x", d.physicalTableID, key) + } + key = key[2:] + // try to decode index ID. + if _, indexID, err := codec.DecodeInt(key); err == nil { + return fmt.Sprintf("t_%d_i_%d_%x", d.physicalTableID, indexID, key[8:]) + } + return fmt.Sprintf("t_%d_i__%x", d.physicalTableID, key) + } + // Has table prefix. + if bytes.HasPrefix(key, []byte("t")) { + key = key[1:] + // try to decode table ID. + if _, tableID, err := codec.DecodeInt(key); err == nil { + return fmt.Sprintf("t_%d_%x", tableID, key[8:]) + } + return fmt.Sprintf("t_%x", key) + } + return fmt.Sprintf("%x", key) +} + +func getRegionMeta(regionMetas []*tikv.Region, uniqueRegionMap map[uint64]struct{}, tablePrefix, recordPrefix, indexPrefix []byte, physicalTableID, indexID int64) ([]regionMeta, error) { + regions := make([]regionMeta, 0, len(regionMetas)) + for _, r := range regionMetas { + if _, ok := uniqueRegionMap[r.GetID()]; ok { + continue + } + uniqueRegionMap[r.GetID()] = struct{}{} + regions = append(regions, regionMeta{ + region: r.GetMeta(), + leaderID: r.GetLeaderID(), + storeID: r.GetLeaderStoreID(), + }) + } + decodeRegionsKey(regions, tablePrefix, recordPrefix, indexPrefix, physicalTableID, indexID) + return regions, nil +} diff --git a/go.mod b/go.mod index 98fd7f483920b..0fbd9a208bb3a 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4 - github.com/gogo/protobuf v1.2.0 // indirect + github.com/gogo/protobuf v1.2.0 github.com/golang/protobuf v1.2.0 github.com/golang/snappy v0.0.1 // indirect github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c @@ -43,7 +43,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e github.com/pingcap/kvproto v0.0.0-20190703131923-d9830856b531 github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596 - github.com/pingcap/parser v0.0.0-20190710031629-52a9d3a79f41 + github.com/pingcap/parser v0.0.0-20190712081837-c3bdffe5d00e github.com/pingcap/pd v0.0.0-20190711034019-ee98bf9063e9 github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330 diff --git a/go.sum b/go.sum index 069cabc8ccdec..04b7283097824 100644 --- a/go.sum +++ b/go.sum @@ -165,8 +165,8 @@ github.com/pingcap/kvproto v0.0.0-20190703131923-d9830856b531/go.mod h1:QMdbTAXC github.com/pingcap/log v0.0.0-20190214045112-b37da76f67a7/go.mod h1:xsfkWVaFVV5B8e1K9seWfyJWFrIhbtUTAD8NV1Pq3+w= github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596 h1:t2OQTpPJnrPDGlvA+3FwJptMTt6MEPdzK1Wt99oaefQ= github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v0.0.0-20190710031629-52a9d3a79f41 h1:hsCjAYfXliEMyRQTiNAYHyYATfURKNSK1J0eaKfOm1w= -github.com/pingcap/parser v0.0.0-20190710031629-52a9d3a79f41/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20190712081837-c3bdffe5d00e h1:bFfuLDvmMO6QvxkCAEsfJihxBkcrk58MYJRVpM/1Ujk= +github.com/pingcap/parser v0.0.0-20190712081837-c3bdffe5d00e/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v0.0.0-20190711034019-ee98bf9063e9 h1:sqqiviE8oEYXJh3Aq59HO/AhxjsvcRb9ETh0ivFOHXc= github.com/pingcap/pd v0.0.0-20190711034019-ee98bf9063e9/go.mod h1:3DlDlFT7EF64A1bmb/tulZb6wbPSagm5G4p1AlhaEDs= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= diff --git a/kv/kv.go b/kv/kv.go index 4e2f5d889f8eb..a646b19b50a02 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -300,4 +300,5 @@ type Iterator interface { type SplitableStore interface { SplitRegion(splitKey Key, scatter bool) (regionID uint64, err error) WaitScatterRegionFinish(regionID uint64) error + CheckRegionInScattering(regionID uint64) (bool, error) } diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index bb2f1730ce776..d52b436324cbb 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -346,7 +346,8 @@ type Show struct { DBName string Table *ast.TableName // Used for showing columns. Column *ast.ColumnName // Used for `desc table column`. - Flag int // Some flag parsed from sql, such as FULL. + IndexName model.CIStr + Flag int // Some flag parsed from sql, such as FULL. Full bool User *auth.UserIdentity // Used for show grants. Roles []*auth.RoleIdentity // Used for show grants. @@ -513,6 +514,14 @@ type SplitRegion struct { ValueLists [][]types.Datum } +// SplitRegionStatus represents a split regions status plan. +type SplitRegionStatus struct { + baseSchemaProducer + + Table table.Table + IndexInfo *model.IndexInfo +} + // DDL represents a DDL statement plan. type DDL struct { baseSchemaProducer diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 81d6af8cbb34e..e9e33ca8f697f 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -987,6 +987,18 @@ func buildShowDDLJobsFields() *expression.Schema { return schema } +func buildTableRegionsSchema() *expression.Schema { + schema := expression.NewSchema(make([]*expression.Column, 0, 10)...) + schema.Append(buildColumn("", "REGION_ID", mysql.TypeLonglong, 4)) + schema.Append(buildColumn("", "START_KEY", mysql.TypeVarchar, 64)) + schema.Append(buildColumn("", "END_Key", mysql.TypeVarchar, 64)) + schema.Append(buildColumn("", "LEADER_ID", mysql.TypeLonglong, 4)) + schema.Append(buildColumn("", "LEADER_STORE_ID", mysql.TypeLonglong, 4)) + schema.Append(buildColumn("", "PEERS", mysql.TypeVarchar, 64)) + schema.Append(buildColumn("", "SCATTERING", mysql.TypeTiny, 1)) + return schema +} + func buildShowDDLJobQueriesFields() *expression.Schema { schema := expression.NewSchema(make([]*expression.Column, 0, 1)...) schema.Append(buildColumn("", "QUERY", mysql.TypeVarchar, 256)) @@ -1074,6 +1086,7 @@ func (b *PlanBuilder) buildShow(show *ast.ShowStmt) (Plan, error) { DBName: show.DBName, Table: show.Table, Column: show.Column, + IndexName: show.IndexName, Flag: show.Flag, Full: show.Full, User: show.User, @@ -1090,6 +1103,8 @@ func (b *PlanBuilder) buildShow(show *ast.ShowStmt) (Plan, error) { p.SetSchema(buildShowEventsSchema()) case ast.ShowWarnings, ast.ShowErrors: p.SetSchema(buildShowWarningsSchema()) + case ast.ShowRegions: + p.SetSchema(buildTableRegionsSchema()) default: isView := false switch showTp { diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index c27c15c977a19..971c72c10becd 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -22,6 +22,7 @@ import ( "time" "unsafe" + "github.com/gogo/protobuf/proto" "github.com/google/btree" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" @@ -458,6 +459,26 @@ func (c *RegionCache) ListRegionIDsInKeyRange(bo *Backoffer, startKey, endKey [] return regionIDs, nil } +// LoadRegionsInKeyRange lists ids of regions in [start_key,end_key]. +func (c *RegionCache) LoadRegionsInKeyRange(bo *Backoffer, startKey, endKey []byte) (regions []*Region, err error) { + for { + curRegion, err := c.loadRegion(bo, startKey, false) + if err != nil { + return nil, errors.Trace(err) + } + c.mu.Lock() + c.insertRegionToCache(curRegion) + c.mu.Unlock() + + regions = append(regions, curRegion) + if curRegion.Contains(endKey) { + break + } + startKey = curRegion.EndKey() + } + return regions, nil +} + // InvalidateCachedRegion removes a cached Region. func (c *RegionCache) InvalidateCachedRegion(id RegionVerID) { cachedRegion := c.getCachedRegionWithRLock(id) @@ -766,6 +787,29 @@ func (r *Region) GetID() uint64 { return r.meta.GetId() } +// GetMeta returns region meta. +func (r *Region) GetMeta() *metapb.Region { + return proto.Clone(r.meta).(*metapb.Region) +} + +// GetLeaderID returns leader region ID. +func (r *Region) GetLeaderID() uint64 { + store := r.getStore() + if int(store.workStoreIdx) >= len(r.meta.Peers) { + return 0 + } + return r.meta.Peers[int(r.getStore().workStoreIdx)].Id +} + +// GetLeaderStoreID returns the store ID of the leader region. +func (r *Region) GetLeaderStoreID() uint64 { + store := r.getStore() + if int(store.workStoreIdx) >= len(r.meta.Peers) { + return 0 + } + return r.meta.Peers[int(r.getStore().workStoreIdx)].StoreId +} + // WorkStorePeer returns current work store with work peer. func (r *Region) WorkStorePeer(rs *RegionStore) (store *Store, peer *metapb.Peer, idx int) { idx = int(rs.workStoreIdx) diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go index ed3a28e03e1b7..e04eb891b8ac9 100644 --- a/store/tikv/region_cache_test.go +++ b/store/tikv/region_cache_test.go @@ -121,6 +121,8 @@ func (s *testRegionCacheSuite) TestSimple(c *C) { c.Assert(r.GetID(), Equals, s.region1) c.Assert(s.getAddr(c, []byte("a")), Equals, s.storeAddr(s.store1)) s.checkCache(c, 1) + c.Assert(r.GetMeta(), DeepEquals, r.meta) + c.Assert(r.GetLeaderID(), Equals, r.meta.Peers[r.getStore().workStoreIdx].Id) s.cache.mu.regions[r.VerID()].lastAccess = 0 r = s.cache.searchCachedRegion([]byte("a"), true) c.Assert(r, IsNil) diff --git a/store/tikv/split_region.go b/store/tikv/split_region.go index 9a7b22fec204a..8851b9078bf29 100644 --- a/store/tikv/split_region.go +++ b/store/tikv/split_region.go @@ -135,3 +135,24 @@ func (s *tikvStore) WaitScatterRegionFinish(regionID uint64) error { } } } + +// CheckRegionInScattering uses to check whether scatter region finished. +func (s *tikvStore) CheckRegionInScattering(regionID uint64) (bool, error) { + bo := NewBackoffer(context.Background(), locateRegionMaxBackoff) + for { + resp, err := s.pdClient.GetOperator(context.Background(), regionID) + if err == nil && resp != nil { + if !bytes.Equal(resp.Desc, []byte("scatter-region")) || resp.Status != pdpb.OperatorStatus_RUNNING { + return false, nil + } + } + if err != nil { + err = bo.Backoff(BoRegionMiss, errors.New(err.Error())) + } else { + return true, nil + } + if err != nil { + return true, errors.Trace(err) + } + } +}