diff --git a/br/pkg/restore/split.go b/br/pkg/restore/split.go index ada8662522c21..b302dfc2487d3 100644 --- a/br/pkg/restore/split.go +++ b/br/pkg/restore/split.go @@ -455,14 +455,14 @@ type scanRegionBackoffer struct { func newScanRegionBackoffer() utils.Backoffer { return &scanRegionBackoffer{ - attempt: 3, + attempt: 30, } } // NextBackoff returns a duration to wait before retrying again func (b *scanRegionBackoffer) NextBackoff(err error) time.Duration { if berrors.ErrPDBatchScanRegion.Equal(err) { - // 500ms * 3 could be enough for splitting remain regions in the hole. + // 500ms * 30 could be enough for splitting remain regions in the hole. b.attempt-- return 500 * time.Millisecond } diff --git a/cmd/explaintest/r/explain_complex.result b/cmd/explaintest/r/explain_complex.result index b3c79948a142e..43f7c0f915f14 100644 --- a/cmd/explaintest/r/explain_complex.result +++ b/cmd/explaintest/r/explain_complex.result @@ -182,7 +182,7 @@ CREATE TABLE `tbl_009` (`a` int, `b` int); explain format = 'brief' select sum(a) from (select * from tbl_001 union all select * from tbl_002 union all select * from tbl_003 union all select * from tbl_004 union all select * from tbl_005 union all select * from tbl_006 union all select * from tbl_007 union all select * from tbl_008 union all select * from tbl_009) x group by b; id estRows task access object operator info HashAgg 72000.00 root group by:Column#32, funcs:sum(Column#31)->Column#30 -└─Projection 90000.00 root cast(Column#28, decimal(32,0) BINARY)->Column#31, Column#29 +└─Projection 90000.00 root cast(Column#28, decimal(10,0) BINARY)->Column#31, Column#29 └─Union 90000.00 root ├─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:tbl_001 keep order:false, stats:pseudo diff --git a/cmd/explaintest/r/explain_complex_stats.result b/cmd/explaintest/r/explain_complex_stats.result index 4365be0c45336..ed7021dbbfba2 100644 --- a/cmd/explaintest/r/explain_complex_stats.result +++ b/cmd/explaintest/r/explain_complex_stats.result @@ -205,7 +205,7 @@ load stats 's/explain_complex_stats_tbl_009.json'; explain format = 'brief' select sum(a) from (select * from tbl_001 union all select * from tbl_002 union all select * from tbl_003 union all select * from tbl_004 union all select * from tbl_005 union all select * from tbl_006 union all select * from tbl_007 union all select * from tbl_008 union all select * from tbl_009) x group by b; id estRows task access object operator info HashAgg 18000.00 root group by:Column#32, funcs:sum(Column#31)->Column#30 -└─Projection 18000.00 root cast(Column#28, decimal(32,0) BINARY)->Column#31, Column#29 +└─Projection 18000.00 root cast(Column#28, decimal(10,0) BINARY)->Column#31, Column#29 └─Union 18000.00 root ├─TableReader 2000.00 root data:TableFullScan │ └─TableFullScan 2000.00 cop[tikv] table:tbl_001 keep order:false diff --git a/cmd/explaintest/r/explain_easy.result b/cmd/explaintest/r/explain_easy.result index 2eec1954d46df..3eb32472170e5 100644 --- a/cmd/explaintest/r/explain_easy.result +++ b/cmd/explaintest/r/explain_easy.result @@ -91,7 +91,7 @@ Selection 0.33 root gt(test.t1.c2, 1) explain format = 'brief' select sum(t1.c1 in (select c1 from t2)) from t1; id estRows task access object operator info StreamAgg 1.00 root funcs:sum(Column#13)->Column#11 -└─Projection 10000.00 root cast(Column#10, decimal(65,0) BINARY)->Column#13 +└─Projection 10000.00 root cast(Column#10, decimal(3,0) BINARY)->Column#13 └─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.c1, test.t2.c1) ├─IndexReader(Build) 10000.00 root index:IndexFullScan │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo @@ -225,7 +225,7 @@ set @@session.tidb_opt_insubq_to_join_and_agg=0; explain format = 'brief' select sum(t1.c1 in (select c1 from t2)) from t1; id estRows task access object operator info StreamAgg 1.00 root funcs:sum(Column#13)->Column#11 -└─Projection 10000.00 root cast(Column#10, decimal(65,0) BINARY)->Column#13 +└─Projection 10000.00 root cast(Column#10, decimal(3,0) BINARY)->Column#13 └─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.c1, test.t2.c1) ├─IndexReader(Build) 10000.00 root index:IndexFullScan │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo @@ -241,7 +241,7 @@ HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(1, test.t2 explain format = 'brief' select sum(6 in (select c2 from t2)) from t1; id estRows task access object operator info StreamAgg 1.00 root funcs:sum(Column#13)->Column#11 -└─Projection 10000.00 root cast(Column#10, decimal(65,0) BINARY)->Column#13 +└─Projection 10000.00 root cast(Column#10, decimal(3,0) BINARY)->Column#13 └─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(6, test.t2.c2) ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo @@ -809,7 +809,7 @@ Projection 1.00 root Column#7 └─HashAgg(Probe) 1.00 root funcs:sum(Column#12)->Column#7 └─HashJoin 1.00 root CARTESIAN left outer join ├─HashAgg(Build) 1.00 root group by:1, funcs:sum(Column#14)->Column#12 - │ └─Projection 1.00 root cast(Column#6, decimal(42,0) BINARY)->Column#14 + │ └─Projection 1.00 root cast(Column#6, decimal(20,0) BINARY)->Column#14 │ └─MaxOneRow 1.00 root │ └─Projection 1.00 root Column#5 │ └─TableDual 1.00 root rows:1 @@ -831,7 +831,7 @@ Projection 1.00 root Column#5 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo └─StreamAgg(Probe) 1.00 root funcs:sum(Column#7)->Column#5 - └─Projection 1.00 root cast(Column#4, decimal(42,0) BINARY)->Column#7 + └─Projection 1.00 root cast(Column#4, decimal(20,0) BINARY)->Column#7 └─TableDual 1.00 root rows:1 explain format = 'brief' select sum(a), (select sum(a)), count(a) from t group by b order by (select count(a)); id estRows task access object operator info @@ -846,3 +846,121 @@ Projection 8000.00 root Column#4, Column#4, Column#5 └─HashAgg 8000.00 cop[tikv] group by:test.t.b, funcs:sum(test.t.a)->Column#13, funcs:count(test.t.a)->Column#14 └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo drop table if exists t; +create table t(a tinyint, b smallint, c mediumint, d int, e bigint); +insert into mysql.opt_rule_blacklist VALUES("aggregation_push_down"); +admin reload opt_rule_blacklist; + +explain format = 'brief' select sum(t1.a) from t t1 join t t2 on t1.a=t2.a; +id estRows task access object operator info +StreamAgg 1.00 root funcs:sum(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.a, decimal(3,0) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.a)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select sum(t1.b) from t t1 join t t2 on t1.b=t2.b; +id estRows task access object operator info +StreamAgg 1.00 root funcs:sum(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.b, decimal(5,0) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.b, test.t.b)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.b)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.b)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select sum(t1.c) from t t1 join t t2 on t1.c=t2.c; +id estRows task access object operator info +StreamAgg 1.00 root funcs:sum(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.c, decimal(8,0) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.c, test.t.c)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.c)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.c)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select sum(t1.d) from t t1 join t t2 on t1.d=t2.d; +id estRows task access object operator info +StreamAgg 1.00 root funcs:sum(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.d, decimal(10,0) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.d, test.t.d)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.d)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.d)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select sum(t1.e) from t t1 join t t2 on t1.e=t2.e; +id estRows task access object operator info +StreamAgg 1.00 root funcs:sum(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.e, decimal(20,0) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.e, test.t.e)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.e)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.e)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select avg(t1.a) from t t1 join t t2 on t1.a=t2.a; +id estRows task access object operator info +StreamAgg 1.00 root funcs:avg(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.a, decimal(8,4) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.a)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select avg(t1.b) from t t1 join t t2 on t1.b=t2.b; +id estRows task access object operator info +StreamAgg 1.00 root funcs:avg(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.b, decimal(10,4) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.b, test.t.b)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.b)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.b)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select avg(t1.c) from t t1 join t t2 on t1.c=t2.c; +id estRows task access object operator info +StreamAgg 1.00 root funcs:avg(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.c, decimal(13,4) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.c, test.t.c)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.c)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.c)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select avg(t1.d) from t t1 join t t2 on t1.d=t2.d; +id estRows task access object operator info +StreamAgg 1.00 root funcs:avg(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.d, decimal(15,4) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.d, test.t.d)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.d)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.d)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select avg(t1.e) from t t1 join t t2 on t1.e=t2.e; +id estRows task access object operator info +StreamAgg 1.00 root funcs:avg(Column#14)->Column#13 +└─Projection 12487.50 root cast(test.t.e, decimal(24,4) BINARY)->Column#14 + └─HashJoin 12487.50 root inner join, equal:[eq(test.t.e, test.t.e)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(test.t.e)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(test.t.e)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +drop table if exists t; +delete from mysql.opt_rule_blacklist where name="aggregation_push_down"; +admin reload opt_rule_blacklist; + diff --git a/cmd/explaintest/r/generated_columns.result b/cmd/explaintest/r/generated_columns.result index d7f120eb28f3f..970f00880ac5d 100644 --- a/cmd/explaintest/r/generated_columns.result +++ b/cmd/explaintest/r/generated_columns.result @@ -182,7 +182,7 @@ INSERT INTO t1 (a) VALUES (2), (1), (1), (3), (NULL); EXPLAIN format = 'brief' SELECT sum(a) FROM t1 GROUP BY b; id estRows task access object operator info HashAgg 8000.00 root group by:Column#7, funcs:sum(Column#6)->Column#5 -└─Projection 10000.00 root cast(test.t1.a, decimal(32,0) BINARY)->Column#6, test.t1.b +└─Projection 10000.00 root cast(test.t1.a, decimal(10,0) BINARY)->Column#6, test.t1.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo EXPLAIN format = 'brief' SELECT sum(a) FROM t1 GROUP BY c; @@ -194,13 +194,13 @@ HashAgg 8000.00 root group by:test.t1.c, funcs:sum(Column#6)->Column#5 EXPLAIN format = 'brief' SELECT sum(b) FROM t1 GROUP BY a; id estRows task access object operator info HashAgg 8000.00 root group by:Column#7, funcs:sum(Column#6)->Column#5 -└─Projection 10000.00 root cast(test.t1.b, decimal(32,0) BINARY)->Column#6, test.t1.a +└─Projection 10000.00 root cast(test.t1.b, decimal(10,0) BINARY)->Column#6, test.t1.a └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo EXPLAIN format = 'brief' SELECT sum(b) FROM t1 GROUP BY c; id estRows task access object operator info HashAgg 8000.00 root group by:Column#9, funcs:sum(Column#8)->Column#5 -└─Projection 10000.00 root cast(test.t1.b, decimal(32,0) BINARY)->Column#8, test.t1.c +└─Projection 10000.00 root cast(test.t1.b, decimal(10,0) BINARY)->Column#8, test.t1.c └─Projection 10000.00 root test.t1.b, test.t1.c └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo @@ -213,7 +213,7 @@ HashAgg 8000.00 root group by:test.t1.a, funcs:sum(Column#6)->Column#5 EXPLAIN format = 'brief' SELECT sum(c) FROM t1 GROUP BY b; id estRows task access object operator info HashAgg 8000.00 root group by:Column#7, funcs:sum(Column#6)->Column#5 -└─Projection 10000.00 root cast(test.t1.c, decimal(32,0) BINARY)->Column#6, test.t1.b +└─Projection 10000.00 root cast(test.t1.c, decimal(10,0) BINARY)->Column#6, test.t1.b └─Projection 10000.00 root test.t1.b, test.t1.c └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo diff --git a/cmd/explaintest/r/index_merge.result b/cmd/explaintest/r/index_merge.result index f790569635b28..f4d6571fd8fe3 100644 --- a/cmd/explaintest/r/index_merge.result +++ b/cmd/explaintest/r/index_merge.result @@ -237,11 +237,11 @@ insert into t1(c1, c2) values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5); explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; id estRows task access object operator info Sort_5 4060.74 root test.t1.c1 -└─IndexMerge_12 2250.55 root - ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) - └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Selection_12 2250.55 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; c1 c2 c3 1 1 2 @@ -252,11 +252,11 @@ c1 c2 c3 explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = c1 + c2 order by 1; id estRows task access object operator info Sort_5 5098.44 root test.t1.c1 -└─IndexMerge_12 2825.66 root - ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - └─Selection_11(Probe) 2825.66 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), eq(test.t1.c3, plus(test.t1.c1, test.t1.c2)))) - └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Selection_12 2825.66 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), eq(test.t1.c3, plus(test.t1.c1, test.t1.c2)))) + └─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = c1 + c2 order by 1; c1 c2 c3 1 1 2 @@ -267,11 +267,11 @@ c1 c2 c3 explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and substring(c3, c2) order by 1; id estRows task access object operator info Sort_5 5098.44 root test.t1.c1 -└─IndexMerge_12 2825.66 root - ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - └─Selection_11(Probe) 2825.66 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), istrue_with_null(cast(substring(cast(test.t1.c3, var_string(20)), test.t1.c2), double BINARY)))) - └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Selection_12 2825.66 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), istrue_with_null(cast(substring(cast(test.t1.c3, var_string(20)), test.t1.c2), double BINARY)))) + └─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and substring(c3, c2) order by 1; c1 c2 c3 1 1 2 @@ -282,11 +282,11 @@ c1 c2 c3 explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 order by 1; id estRows task access object operator info Sort_5 4800.37 root test.t1.c1 -└─IndexMerge_12 2660.47 root - ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - └─Selection_11(Probe) 2660.47 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), test.t1.c3)) - └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Selection_12 2660.47 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), test.t1.c3)) + └─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 order by 1; c1 c2 c3 1 1 2 @@ -302,11 +302,11 @@ select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 explain select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; id estRows task access object operator info Sort_5 4060.74 root test.t1.c1 -└─IndexMerge_12 2250.55 root - ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) - └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Selection_12 2250.55 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; c1 c2 c3 1 1 2 @@ -481,7 +481,7 @@ explain select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < id estRows task access object operator info Sort_6 1473.49 root Column#5 └─HashAgg_11 1473.49 root group by:Column#10, funcs:sum(Column#9)->Column#5 - └─Projection_18 1841.86 root cast(test.t1.c1, decimal(32,0) BINARY)->Column#9, test.t1.c1 + └─Projection_18 1841.86 root cast(test.t1.c1, decimal(10,0) BINARY)->Column#9, test.t1.c1 └─IndexMerge_16 1841.86 root ├─IndexRangeScan_12(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo ├─IndexRangeScan_13(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo @@ -722,11 +722,11 @@ c1 c2 c3 c4 c5 explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and substring(c3, 1, 1) = '1' order by 1; id estRows task access object operator info Sort_5 4433.77 root test.t1.c1 -└─IndexMerge_12 4433.77 root - ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo - ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo - └─Selection_11(Probe) 4433.77 cop[tikv] eq(substring(cast(test.t1.c3, var_string(20)), 1, 1), "1") - └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Selection_12 4433.77 root eq(substring(cast(test.t1.c3, var_string(20)), 1, 1), "1") + └─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and substring(c3, 1, 1) = '1' order by 1; c1 c2 c3 c4 c5 1 1 1 1 1 diff --git a/cmd/explaintest/r/select.result b/cmd/explaintest/r/select.result index 959e761aad086..f364a577e1071 100644 --- a/cmd/explaintest/r/select.result +++ b/cmd/explaintest/r/select.result @@ -385,7 +385,7 @@ id estRows task access object operator info Projection 10000.00 root and(or(or(gt(Column#11, 1), ne(test.t.a, Column#10)), if(ne(Column#12, 0), , 0)), and(ne(Column#13, 0), if(isnull(test.t.a), , 1)))->Column#14 └─HashJoin 10000.00 root CARTESIAN inner join ├─StreamAgg(Build) 1.00 root funcs:max(Column#16)->Column#10, funcs:count(distinct Column#17)->Column#11, funcs:sum(Column#18)->Column#12, funcs:count(1)->Column#13 - │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#18 + │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(20,0) BINARY)->Column#18 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan @@ -395,7 +395,7 @@ id estRows task access object operator info Projection 10000.00 root or(and(and(le(Column#11, 1), eq(test.t.a, Column#10)), if(ne(Column#12, 0), , 1)), or(eq(Column#13, 0), if(isnull(test.t.a), , 0)))->Column#14 └─HashJoin 10000.00 root CARTESIAN inner join ├─StreamAgg(Build) 1.00 root funcs:firstrow(Column#16)->Column#10, funcs:count(distinct Column#17)->Column#11, funcs:sum(Column#18)->Column#12, funcs:count(1)->Column#13 - │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#18 + │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(20,0) BINARY)->Column#18 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan diff --git a/cmd/explaintest/r/tpch.result b/cmd/explaintest/r/tpch.result index 319713d946aa2..c9edd22189514 100644 --- a/cmd/explaintest/r/tpch.result +++ b/cmd/explaintest/r/tpch.result @@ -773,7 +773,7 @@ id estRows task access object operator info Sort 1.00 root tpch.lineitem.l_shipmode └─Projection 1.00 root tpch.lineitem.l_shipmode, Column#27, Column#28 └─HashAgg 1.00 root group by:Column#40, funcs:sum(Column#37)->Column#27, funcs:sum(Column#38)->Column#28, funcs:firstrow(Column#39)->tpch.lineitem.l_shipmode - └─Projection 10023369.01 root cast(case(or(eq(tpch.orders.o_orderpriority, 1-URGENT), eq(tpch.orders.o_orderpriority, 2-HIGH)), 1, 0), decimal(22,0) BINARY)->Column#37, cast(case(and(ne(tpch.orders.o_orderpriority, 1-URGENT), ne(tpch.orders.o_orderpriority, 2-HIGH)), 1, 0), decimal(22,0) BINARY)->Column#38, tpch.lineitem.l_shipmode, tpch.lineitem.l_shipmode + └─Projection 10023369.01 root cast(case(or(eq(tpch.orders.o_orderpriority, 1-URGENT), eq(tpch.orders.o_orderpriority, 2-HIGH)), 1, 0), decimal(20,0) BINARY)->Column#37, cast(case(and(ne(tpch.orders.o_orderpriority, 1-URGENT), ne(tpch.orders.o_orderpriority, 2-HIGH)), 1, 0), decimal(20,0) BINARY)->Column#38, tpch.lineitem.l_shipmode, tpch.lineitem.l_shipmode └─Projection 10023369.01 root tpch.orders.o_orderpriority, tpch.lineitem.l_shipmode └─IndexJoin 10023369.01 root inner join, inner:TableReader, outer key:tpch.lineitem.l_orderkey, inner key:tpch.orders.o_orderkey, equal cond:eq(tpch.lineitem.l_orderkey, tpch.orders.o_orderkey) ├─TableReader(Build) 10023369.01 root data:Selection diff --git a/cmd/explaintest/r/window_function.result b/cmd/explaintest/r/window_function.result index 6c92b63dcd0d3..b29d1e5d3fba7 100644 --- a/cmd/explaintest/r/window_function.result +++ b/cmd/explaintest/r/window_function.result @@ -6,47 +6,47 @@ set @@session.tidb_window_concurrency = 1; explain format = 'brief' select sum(a) over() from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over() +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over() └─IndexReader 10000.00 root index:IndexFullScan └─IndexFullScan 10000.00 cop[tikv] table:t, index:idx(a) keep order:false, stats:pseudo explain format = 'brief' select sum(a) over(partition by a) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a) └─IndexReader 10000.00 root index:IndexFullScan └─IndexFullScan 10000.00 cop[tikv] table:t, index:idx(a) keep order:true, stats:pseudo explain format = 'brief' select sum(a) over(partition by a order by b) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' select sum(a) over(partition by a order by b rows unbounded preceding) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between unbounded preceding and current row) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between unbounded preceding and current row) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' select sum(a) over(partition by a order by b rows between 1 preceding and 1 following) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between 1 preceding and 1 following) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between 1 preceding and 1 following) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' select sum(a) over(partition by a order by b range between 1 preceding and 1 following) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between 1 preceding and 1 following) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between 1 preceding and 1 following) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' select sum(a) over(partition by a order by c range between interval '2:30' minute_second preceding and interval '2:30' minute_second following) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.c range between interval "2:30" "MINUTE_SECOND" preceding and interval "2:30" "MINUTE_SECOND" following) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.c range between interval "2:30" "MINUTE_SECOND" preceding and interval "2:30" "MINUTE_SECOND" following) └─Sort 10000.00 root test.t.a, test.t.c └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo @@ -54,20 +54,20 @@ set @@session.tidb_window_concurrency = 4; explain format = 'brief' select sum(a) over() from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over() +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over() └─IndexReader 10000.00 root index:IndexFullScan └─IndexFullScan 10000.00 cop[tikv] table:t, index:idx(a) keep order:false, stats:pseudo explain format = 'brief' select sum(a) over(partition by a) from t; id estRows task access object operator info Projection 10000.00 root Column#6 -└─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a) +└─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a) └─IndexReader 10000.00 root index:IndexFullScan └─IndexFullScan 10000.00 cop[tikv] table:t, index:idx(a) keep order:true, stats:pseudo explain format = 'brief' select sum(a) over(partition by a order by b) from t; id estRows task access object operator info Projection 10000.00 root Column#6 └─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader] - └─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row) + └─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo @@ -75,7 +75,7 @@ explain format = 'brief' select sum(a) over(partition by a order by b rows unbou id estRows task access object operator info Projection 10000.00 root Column#6 └─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader] - └─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between unbounded preceding and current row) + └─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between unbounded preceding and current row) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo @@ -83,7 +83,7 @@ explain format = 'brief' select sum(a) over(partition by a order by b rows betwe id estRows task access object operator info Projection 10000.00 root Column#6 └─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader] - └─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between 1 preceding and 1 following) + └─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b rows between 1 preceding and 1 following) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo @@ -91,7 +91,7 @@ explain format = 'brief' select sum(a) over(partition by a order by b range betw id estRows task access object operator info Projection 10000.00 root Column#6 └─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader] - └─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between 1 preceding and 1 following) + └─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.b range between 1 preceding and 1 following) └─Sort 10000.00 root test.t.a, test.t.b └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo @@ -99,7 +99,7 @@ explain format = 'brief' select sum(a) over(partition by a order by c range betw id estRows task access object operator info Projection 10000.00 root Column#6 └─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader] - └─Window 10000.00 root sum(cast(test.t.a, decimal(32,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.c range between interval "2:30" "MINUTE_SECOND" preceding and interval "2:30" "MINUTE_SECOND" following) + └─Window 10000.00 root sum(cast(test.t.a, decimal(10,0) BINARY))->Column#6 over(partition by test.t.a order by test.t.c range between interval "2:30" "MINUTE_SECOND" preceding and interval "2:30" "MINUTE_SECOND" following) └─Sort 10000.00 root test.t.a, test.t.c └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo @@ -110,7 +110,7 @@ analyze table t1; explain format = 'brief' select sum(a) over(partition by b) from t1; id estRows task access object operator info Projection 2.00 root Column#4 -└─Window 2.00 root sum(cast(test.t1.a, decimal(32,0) BINARY))->Column#4 over(partition by test.t1.b) +└─Window 2.00 root sum(cast(test.t1.a, decimal(10,0) BINARY))->Column#4 over(partition by test.t1.b) └─Sort 2.00 root test.t1.b └─TableReader 2.00 root data:TableFullScan └─TableFullScan 2.00 cop[tikv] table:t1 keep order:false @@ -120,7 +120,7 @@ explain format = 'brief' select sum(a) over(partition by b) from t1; id estRows task access object operator info Projection 3.00 root Column#4 └─Shuffle 3.00 root execution info: concurrency:2, data sources:[TableReader] - └─Window 3.00 root sum(cast(test.t1.a, decimal(32,0) BINARY))->Column#4 over(partition by test.t1.b) + └─Window 3.00 root sum(cast(test.t1.a, decimal(10,0) BINARY))->Column#4 over(partition by test.t1.b) └─Sort 3.00 root test.t1.b └─TableReader 3.00 root data:TableFullScan └─TableFullScan 3.00 cop[tikv] table:t1 keep order:false diff --git a/cmd/explaintest/t/explain_easy.test b/cmd/explaintest/t/explain_easy.test index 4d3b698bd7d72..161b716f5a666 100644 --- a/cmd/explaintest/t/explain_easy.test +++ b/cmd/explaintest/t/explain_easy.test @@ -218,3 +218,22 @@ explain format = 'brief' select count(a) from t group by b order by (select coun explain format = 'brief' select (select sum(count(a))) from t; explain format = 'brief' select sum(a), (select sum(a)), count(a) from t group by b order by (select count(a)); drop table if exists t; + +# lower precision for cast to decimal for integer type variables in sum function +create table t(a tinyint, b smallint, c mediumint, d int, e bigint); +insert into mysql.opt_rule_blacklist VALUES("aggregation_push_down"); +admin reload opt_rule_blacklist; +explain format = 'brief' select sum(t1.a) from t t1 join t t2 on t1.a=t2.a; +explain format = 'brief' select sum(t1.b) from t t1 join t t2 on t1.b=t2.b; +explain format = 'brief' select sum(t1.c) from t t1 join t t2 on t1.c=t2.c; +explain format = 'brief' select sum(t1.d) from t t1 join t t2 on t1.d=t2.d; +explain format = 'brief' select sum(t1.e) from t t1 join t t2 on t1.e=t2.e; +# note that avg will be converted to count and sum, and .decimal field will be non-zero +explain format = 'brief' select avg(t1.a) from t t1 join t t2 on t1.a=t2.a; +explain format = 'brief' select avg(t1.b) from t t1 join t t2 on t1.b=t2.b; +explain format = 'brief' select avg(t1.c) from t t1 join t t2 on t1.c=t2.c; +explain format = 'brief' select avg(t1.d) from t t1 join t t2 on t1.d=t2.d; +explain format = 'brief' select avg(t1.e) from t t1 join t t2 on t1.e=t2.e; +drop table if exists t; +delete from mysql.opt_rule_blacklist where name="aggregation_push_down"; +admin reload opt_rule_blacklist; diff --git a/ddl/db_cache_serial_test.go b/ddl/db_cache_serial_test.go deleted file mode 100644 index e024cef522682..0000000000000 --- a/ddl/db_cache_serial_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ddl_test - -import ( - "testing" - "time" - - "github.com/pingcap/tidb/ddl" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/errno" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestAlterTableCache(t *testing.T) { - store, err := mockstore.NewMockStore() - require.NoError(t, err) - session.SetSchemaLease(600 * time.Millisecond) - session.DisableStats4Test() - dom, err := session.BootstrapSession(store) - require.NoError(t, err) - - dom.SetStatsUpdating(true) - - clean := func() { - dom.Close() - err := store.Close() - require.NoError(t, err) - } - defer clean() - tk := testkit.NewTestKit(t, store) - tk2 := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk2.MustExec("use test") - /* Test of cache table */ - tk.MustExec("create table t1 ( n int auto_increment primary key)") - tk.MustGetErrCode("alter table t1 ca", errno.ErrParse) - tk.MustGetErrCode("alter table t2 cache", errno.ErrNoSuchTable) - tk.MustExec("alter table t1 cache") - checkTableCacheStatus(t, tk.Session(), "test", "t1", model.TableCacheStatusEnable) - tk.MustExec("drop table if exists t1") - /*Test can't skip schema checker*/ - tk.MustExec("drop table if exists t1,t2") - tk.MustExec("CREATE TABLE t1 (a int)") - tk.MustExec("CREATE TABLE t2 (a int)") - tk.MustExec("begin") - tk.MustExec("insert into t1 set a=1;") - tk2.MustExec("alter table t1 cache;") - _, err = tk.Exec("commit") - require.True(t, terror.ErrorEqual(domain.ErrInfoSchemaChanged, err)) - /* Test can skip schema checker */ - tk.MustExec("begin") - tk.MustExec("drop table if exists t1") - tk.MustExec("CREATE TABLE t1 (a int)") - tk.MustExec("insert into t1 set a=2;") - tk2.MustExec("alter table t2 cache") - tk.MustExec("commit") - // Test if a table is not exists - tk.MustExec("drop table if exists t") - tk.MustGetErrCode("alter table t cache", errno.ErrNoSuchTable) - tk.MustExec("create table t (a int)") - tk.MustExec("alter table t cache") - // Multiple alter cache is okay - tk.MustExec("alter table t cache") - tk.MustExec("alter table t cache") - // Test a temporary table - tk.MustExec("drop table if exists t") - tk.MustExec("create temporary table t (id int primary key auto_increment, u int unique, v int)") - tk.MustExec("drop table if exists tmp1") - // local temporary table alter is not supported - tk.MustGetErrCode("alter table t cache", errno.ErrUnsupportedDDLOperation) - // test global temporary table - tk.MustExec("create global temporary table tmp1 " + - "(id int not null primary key, code int not null, value int default null, unique key code(code))" + - "on commit delete rows") - tk.MustGetErrMsg("alter table tmp1 cache", ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("alter temporary table cache").Error()) - -} diff --git a/ddl/db_cache_test.go b/ddl/db_cache_test.go index 120c0c687e049..506b8d7089fe6 100644 --- a/ddl/db_cache_test.go +++ b/ddl/db_cache_test.go @@ -16,12 +16,16 @@ package ddl_test import ( "testing" + "time" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" @@ -133,3 +137,69 @@ func TestIndexOnCacheTable(t *testing.T) { tk.MustExec("alter table cache_index_1 cache") tk.MustGetErrCode("alter table cache_index_1 drop index i1, drop index i2;", errno.ErrOptOnCacheTable) } + +func TestAlterTableCache(t *testing.T) { + store, err := mockstore.NewMockStore() + require.NoError(t, err) + session.SetSchemaLease(600 * time.Millisecond) + session.DisableStats4Test() + dom, err := session.BootstrapSession(store) + require.NoError(t, err) + + dom.SetStatsUpdating(true) + + clean := func() { + dom.Close() + err := store.Close() + require.NoError(t, err) + } + defer clean() + tk := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk2.MustExec("use test") + /* Test of cache table */ + tk.MustExec("create table t1 ( n int auto_increment primary key)") + tk.MustGetErrCode("alter table t1 ca", errno.ErrParse) + tk.MustGetErrCode("alter table t2 cache", errno.ErrNoSuchTable) + tk.MustExec("alter table t1 cache") + checkTableCacheStatus(t, tk.Session(), "test", "t1", model.TableCacheStatusEnable) + tk.MustExec("drop table if exists t1") + /*Test can't skip schema checker*/ + tk.MustExec("drop table if exists t1,t2") + tk.MustExec("CREATE TABLE t1 (a int)") + tk.MustExec("CREATE TABLE t2 (a int)") + tk.MustExec("begin") + tk.MustExec("insert into t1 set a=1;") + tk2.MustExec("alter table t1 cache;") + _, err = tk.Exec("commit") + require.True(t, terror.ErrorEqual(domain.ErrInfoSchemaChanged, err)) + /* Test can skip schema checker */ + tk.MustExec("begin") + tk.MustExec("drop table if exists t1") + tk.MustExec("CREATE TABLE t1 (a int)") + tk.MustExec("insert into t1 set a=2;") + tk2.MustExec("alter table t2 cache") + tk.MustExec("commit") + // Test if a table is not exists + tk.MustExec("drop table if exists t") + tk.MustGetErrCode("alter table t cache", errno.ErrNoSuchTable) + tk.MustExec("create table t (a int)") + tk.MustExec("alter table t cache") + // Multiple alter cache is okay + tk.MustExec("alter table t cache") + tk.MustExec("alter table t cache") + // Test a temporary table + tk.MustExec("drop table if exists t") + tk.MustExec("create temporary table t (id int primary key auto_increment, u int unique, v int)") + tk.MustExec("drop table if exists tmp1") + // local temporary table alter is not supported + tk.MustGetErrCode("alter table t cache", errno.ErrUnsupportedDDLOperation) + // test global temporary table + tk.MustExec("create global temporary table tmp1 " + + "(id int not null primary key, code int not null, value int default null, unique key code(code))" + + "on commit delete rows") + tk.MustGetErrMsg("alter table tmp1 cache", ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("alter temporary table cache").Error()) +} diff --git a/ddl/ddl_algorithm_serial_test.go b/ddl/ddl_algorithm_test.go similarity index 100% rename from ddl/ddl_algorithm_serial_test.go rename to ddl/ddl_algorithm_test.go diff --git a/ddl/failtest/fail_db_serial_test.go b/ddl/failtest/fail_db_test.go similarity index 100% rename from ddl/failtest/fail_db_serial_test.go rename to ddl/failtest/fail_db_test.go diff --git a/ddl/partition_test.go b/ddl/partition_test.go index f2cdd1c4e3597..0b993a7de0a8c 100644 --- a/ddl/partition_test.go +++ b/ddl/partition_test.go @@ -16,55 +16,41 @@ package ddl import ( "context" + "testing" - . "github.com/pingcap/check" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" + "github.com/stretchr/testify/require" ) -var _ = SerialSuites(&testPartitionSuite{}) - -type testPartitionSuite struct { - store kv.Storage -} - -func (s *testPartitionSuite) SetUpSuite(c *C) { - s.store = testCreateStore(c, "test_store") -} - -func (s *testPartitionSuite) TearDownSuite(c *C) { - err := s.store.Close() - c.Assert(err, IsNil) -} - -func (s *testPartitionSuite) TestDropAndTruncatePartition(c *C) { +func TestDropAndTruncatePartition(t *testing.T) { + store := testCreateStoreT(t, "test_store") d, err := testNewDDLAndStart( context.Background(), - WithStore(s.store), + WithStore(store), WithLease(testLease), ) - c.Assert(err, IsNil) + require.NoError(t, err) defer func() { err := d.Stop() - c.Assert(err, IsNil) + require.NoError(t, err) }() dbInfo, err := testSchemaInfo(d, "test_partition") - c.Assert(err, IsNil) - testCreateSchema(c, testNewContext(d), d, dbInfo) + require.NoError(t, err) + testCreateSchemaT(t, testNewContext(d), d, dbInfo) // generate 5 partition in tableInfo. - tblInfo, partIDs := buildTableInfoWithPartition(c, d) + tblInfo, partIDs := buildTableInfoWithPartition(t, d) ctx := testNewContext(d) - testCreateTable(c, ctx, d, dbInfo, tblInfo) + testCreateTableT(t, ctx, d, dbInfo, tblInfo) - testDropPartition(c, ctx, d, dbInfo, tblInfo, []string{"p0", "p1"}) + testDropPartition(t, ctx, d, dbInfo, tblInfo, []string{"p0", "p1"}) - testTruncatePartition(c, ctx, d, dbInfo, tblInfo, []int64{partIDs[3], partIDs[4]}) + testTruncatePartition(t, ctx, d, dbInfo, tblInfo, []int64{partIDs[3], partIDs[4]}) } -func buildTableInfoWithPartition(c *C, d *ddl) (*model.TableInfo, []int64) { +func buildTableInfoWithPartition(t *testing.T, d *ddl) (*model.TableInfo, []int64) { tbl := &model.TableInfo{ Name: model.NewCIStr("t"), } @@ -76,14 +62,14 @@ func buildTableInfoWithPartition(c *C, d *ddl) (*model.TableInfo, []int64) { ID: allocateColumnID(tbl), } genIDs, err := d.genGlobalIDs(1) - c.Assert(err, IsNil) + require.NoError(t, err) tbl.ID = genIDs[0] tbl.Columns = []*model.ColumnInfo{col} tbl.Charset = "utf8" tbl.Collate = "utf8_bin" partIDs, err := d.genGlobalIDs(5) - c.Assert(err, IsNil) + require.NoError(t, err) partInfo := &model.PartitionInfo{ Type: model.PartitionTypeRange, Expr: tbl.Columns[0].Name.L, @@ -130,12 +116,12 @@ func buildDropPartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, partN } } -func testDropPartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job { +func testDropPartition(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job { job := buildDropPartitionJob(dbInfo, tblInfo, partNames) err := d.doDDLJob(ctx, job) - c.Assert(err, IsNil) - v := getSchemaVer(c, ctx) - checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + require.NoError(t, err) + v := getSchemaVerT(t, ctx) + checkHistoryJobArgsT(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } @@ -149,11 +135,11 @@ func buildTruncatePartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, p } } -func testTruncatePartition(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job { +func testTruncatePartition(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job { job := buildTruncatePartitionJob(dbInfo, tblInfo, pids) err := d.doDDLJob(ctx, job) - c.Assert(err, IsNil) - v := getSchemaVer(c, ctx) - checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + require.NoError(t, err) + v := getSchemaVerT(t, ctx) + checkHistoryJobArgsT(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } diff --git a/ddl/util/syncer_serial_test.go b/ddl/util/syncer_test.go similarity index 100% rename from ddl/util/syncer_serial_test.go rename to ddl/util/syncer_test.go diff --git a/executor/executor_test.go b/executor/executor_test.go index 02feb44abaa65..d4b11b0758d8b 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -9686,3 +9686,29 @@ func (s *testSerialSuite) TestUnreasonablyClose(c *C) { } c.Assert(opsAlreadyCoveredMask, Equals, opsNeedsCoveredMask, Commentf("these operators are not covered %s", commentBuf.String())) } + +func (s *testSerialSuite) TestIssue30971(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1 (id int);") + tk.MustExec("create table t2 (id int, c int);") + + testCases := []struct { + sql string + fields int + }{ + // Fix a bug that the column length field returned to client is incorrect using MySQL prepare protocol. + {"select * from t1 union select 1 from t1", 1}, + {"select c from t2 union select * from t1", 1}, + {"select * from t1", 1}, + {"select * from t2 where c in (select * from t1)", 2}, + {"insert into t1 values (?)", 0}, + {"update t1 set id = ?", 0}, + } + for _, test := range testCases { + _, _, fields, err := tk.Se.PrepareStmt(test.sql) + c.Assert(err, IsNil) + c.Assert(fields, HasLen, test.fields) + } +} diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 06d35d24fce1d..b5eee15e32600 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -543,6 +543,8 @@ func (e *memtableRetriever) setDataFromTables(ctx context.Context, sctx sessionc if !table.IsView() { if table.GetPartitionInfo() != nil { createOptions = "partitioned" + } else if table.TableCacheStatusType == model.TableCacheStatusEnable { + createOptions = "cached=on" } var autoIncID interface{} hasAutoIncID, _ := infoschema.HasAutoIncrementColumn(table) diff --git a/executor/prepared.go b/executor/prepared.go index 4f63bce491ab9..0676a77215f2e 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -220,7 +220,7 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { if err != nil { return err } - if _, ok := stmt.(*ast.SelectStmt); ok { + if p.Schema().Len() > 0 { e.Fields = colNames2ResultFields(p.Schema(), p.OutputNames(), vars.CurrentDB) } if e.ID == 0 { @@ -316,9 +316,8 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { prepared := preparedObj.PreparedAst delete(vars.PreparedStmtNameToID, e.Name) if plannercore.PreparedPlanCacheEnabled() { - bindSQL := planner.GetBindSQL4PlanCache(e.ctx, prepared.Stmt) e.ctx.PreparedPlanCache().Delete(plannercore.NewPlanCacheKey( - vars, id, prepared.SchemaVersion, bindSQL, + vars, id, prepared.SchemaVersion, )) } vars.RemovePreparedStmt(id) diff --git a/executor/show.go b/executor/show.go index ea4bdf4f1aee8..066d819cc9b80 100644 --- a/executor/show.go +++ b/executor/show.go @@ -1077,6 +1077,12 @@ func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.T fmt.Fprintf(buf, " /*T![placement] PLACEMENT POLICY=%s */", stringutil.Escape(tableInfo.PlacementPolicyRef.Name.String(), sqlMode)) } + if tableInfo.TableCacheStatusType == model.TableCacheStatusEnable { + // This is not meant to be understand by other components, so it's not written as /*T![cached] */ + // For all external components, cached table is just a normal table. + fmt.Fprintf(buf, " /* CACHED ON */") + } + // add direct placement info here appendDirectPlacementInfo(tableInfo.DirectPlacementOpts, buf) // add partition info here. diff --git a/executor/show_test.go b/executor/show_test.go index 34db4d54ae962..229d4399dae4d 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -1674,3 +1674,24 @@ func (s *testSuite5) TestShowTemporaryTable(c *C) { ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=2" tk.MustQuery("show create table t7").Check(testkit.Rows("t7 " + expect)) } + +func (s *testSuite5) TestShowCachedTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int)") + tk.MustExec("alter table t1 cache") + tk.MustQuery("show create table t1").Check( + testkit.Rows("t1 CREATE TABLE `t1` (\n" + + " `id` int(11) DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /* CACHED ON */")) + tk.MustQuery("select create_options from information_schema.tables where table_schema = 'test' and table_name = 't1'").Check( + testkit.Rows("cached=on")) + + tk.MustExec("alter table t1 nocache") + tk.MustQuery("show create table t1").Check( + testkit.Rows("t1 CREATE TABLE `t1` (\n" + + " `id` int(11) DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + tk.MustQuery("select create_options from information_schema.tables where table_schema = 'test' and table_name = 't1'").Check( + testkit.Rows("")) +} diff --git a/executor/testdata/agg_suite_out.json b/executor/testdata/agg_suite_out.json index abfb0f9d102ef..1937b7f4cc358 100644 --- a/executor/testdata/agg_suite_out.json +++ b/executor/testdata/agg_suite_out.json @@ -54,7 +54,7 @@ " ├─Apply(Build) 1.00 root CARTESIAN left outer join", " │ ├─Apply(Build) 1.00 root CARTESIAN left outer join", " │ │ ├─HashAgg(Build) 1.00 root funcs:sum(Column#28)->Column#9, funcs:firstrow(Column#29)->test.test.a", - " │ │ │ └─Projection 10000.00 root cast(test.test.a, decimal(32,0) BINARY)->Column#28, test.test.a", + " │ │ │ └─Projection 10000.00 root cast(test.test.a, decimal(10,0) BINARY)->Column#28, test.test.a", " │ │ │ └─TableReader 10000.00 root data:TableFullScan", " │ │ │ └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo", " │ │ └─Projection(Probe) 1.00 root ->Column#12", diff --git a/expression/aggregation/base_func.go b/expression/aggregation/base_func.go index 1c639eeb4f14d..a185910c82b97 100644 --- a/expression/aggregation/base_func.go +++ b/expression/aggregation/base_func.go @@ -178,7 +178,7 @@ func (a *baseFuncDesc) typeInfer4ApproxPercentile(ctx sessionctx.Context) error return nil } -// typeInfer4Sum should returns a "decimal", otherwise it returns a "double". +// typeInfer4Sum should return a "decimal", otherwise it returns a "double". // Because child returns integer or decimal type. func (a *baseFuncDesc) typeInfer4Sum(ctx sessionctx.Context) { switch a.Args[0].GetType().Tp { @@ -421,6 +421,7 @@ func (a *baseFuncDesc) WrapCastForAggArgs(ctx sessionctx.Context) { if a.Args[i].GetType().Tp == mysql.TypeNull { continue } + tpOld := a.Args[i].GetType().Tp a.Args[i] = castFunc(ctx, a.Args[i]) if a.Name != ast.AggFuncAvg && a.Name != ast.AggFuncSum { continue @@ -443,5 +444,37 @@ func (a *baseFuncDesc) WrapCastForAggArgs(ctx sessionctx.Context) { originTp := a.Args[i].GetType().Tp *(a.Args[i].GetType()) = *(a.RetTp) a.Args[i].GetType().Tp = originTp + + // refine each mysql integer type to the needed decimal precision for sum + if a.Name == ast.AggFuncSum { + adjustDecimalLenForSumInteger(a.Args[i].GetType(), tpOld) + } + } +} + +func adjustDecimalLenForSumInteger(ft *types.FieldType, tpOld byte) { + if types.IsTypeInteger(tpOld) && ft.Tp == mysql.TypeNewDecimal { + if flen, err := minimalDecimalLenForHoldingInteger(tpOld); err == nil { + ft.Flen = mathutil.Min(ft.Flen, flen+ft.Decimal) + } + } +} + +func minimalDecimalLenForHoldingInteger(tp byte) (int, error) { + switch tp { + case mysql.TypeTiny: + return 3, nil + case mysql.TypeShort: + return 5, nil + case mysql.TypeInt24: + return 8, nil + case mysql.TypeLong: + return 10, nil + case mysql.TypeLonglong: + return 20, nil + case mysql.TypeYear: + return 4, nil + default: + return -1, errors.Errorf("Invalid type: %v", tp) } } diff --git a/expression/builtin.go b/expression/builtin.go index 1b67239613498..71fde7647a598 100644 --- a/expression/builtin.go +++ b/expression/builtin.go @@ -114,7 +114,7 @@ func newBaseBuiltinFunc(ctx sessionctx.Context, funcName string, args []Expressi // newBaseBuiltinFuncWithTp creates a built-in function signature with specified types of arguments and the return type of the function. // argTps indicates the types of the args, retType indicates the return type of the built-in function. -// Every built-in function needs determined argTps and retType when we create it. +// Every built-in function needs to be determined argTps and retType when we create it. func newBaseBuiltinFuncWithTp(ctx sessionctx.Context, funcName string, args []Expression, retType types.EvalType, argTps ...types.EvalType) (bf baseBuiltinFunc, err error) { if len(args) != len(argTps) { panic("unexpected length of args and argTps") diff --git a/go.mod b/go.mod index f11eab1cf0456..6a60bdbeb67df 100644 --- a/go.mod +++ b/go.mod @@ -65,7 +65,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.0-rc.0.20211221041211-e9de5625c45c + github.com/tikv/client-go/v2 v2.0.0-rc.0.20211223062159-300275dee63e github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee github.com/twmb/murmur3 v1.1.3 github.com/uber/jaeger-client-go v2.22.1+incompatible diff --git a/go.sum b/go.sum index 2a4c23d9d70bc..087c367fd7482 100644 --- a/go.sum +++ b/go.sum @@ -712,8 +712,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tikv/client-go/v2 v2.0.0-rc.0.20211221041211-e9de5625c45c h1:1P6iN1csRSZNHXuaylArmG3/bA5MpYVzc9ZkdHK/L2Y= -github.com/tikv/client-go/v2 v2.0.0-rc.0.20211221041211-e9de5625c45c/go.mod h1:wRuh+W35daKTiYBld0oBlT6PSkzEVr+pB/vChzJZk+8= +github.com/tikv/client-go/v2 v2.0.0-rc.0.20211223062159-300275dee63e h1:UildvukO7gTs4/bW+h6jNnpv6syWmh2VMQxD5sMm9II= +github.com/tikv/client-go/v2 v2.0.0-rc.0.20211223062159-300275dee63e/go.mod h1:wRuh+W35daKTiYBld0oBlT6PSkzEVr+pB/vChzJZk+8= github.com/tikv/pd v1.1.0-beta.0.20211029083450-e65f0c55b6ae/go.mod h1:varH0IE0jJ9E9WN2Ei/N6pajMlPkcXdDEf7f5mmsUVQ= github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee h1:rAAdvQ8Hh36syHr92g0VmZEpkH+40RGQBpFL2121xMs= github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee/go.mod h1:lRbwxBAhnTQR5vqbTzeI/Bj62bD2OvYYuFezo2vrmeI= diff --git a/infoschema/cluster_tables_serial_test.go b/infoschema/cluster_tables_test.go similarity index 100% rename from infoschema/cluster_tables_serial_test.go rename to infoschema/cluster_tables_test.go diff --git a/infoschema/perfschema/tables_serial_test.go b/infoschema/perfschema/tables_serial_test.go deleted file mode 100644 index f8126a57b2b7c..0000000000000 --- a/infoschema/perfschema/tables_serial_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package perfschema_test - -import ( - "fmt" - "io" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "runtime/pprof" - "strings" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestTiKVProfileCPU(t *testing.T) { - store, clean := newMockStore(t) - defer clean() - - router := http.NewServeMux() - mockServer := httptest.NewServer(router) - mockAddr := strings.TrimPrefix(mockServer.URL, "http://") - defer mockServer.Close() - - // mock tikv profile - copyHandler := func(filename string) http.HandlerFunc { - return func(w http.ResponseWriter, _ *http.Request) { - file, err := os.Open(filepath.Join(currentSourceDir(), filename)) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - defer func() { terror.Log(file.Close()) }() - _, err = io.Copy(w, file) - terror.Log(err) - } - } - router.HandleFunc("/debug/pprof/profile", copyHandler("testdata/tikv.cpu.profile")) - - // failpoint setting - servers := []string{ - strings.Join([]string{"tikv", mockAddr, mockAddr}, ","), - strings.Join([]string{"pd", mockAddr, mockAddr}, ","), - } - fpExpr := strings.Join(servers, ";") - fpName := "github.com/pingcap/tidb/infoschema/perfschema/mockRemoteNodeStatusAddress" - require.NoError(t, failpoint.Enable(fpName, fmt.Sprintf(`return("%s")`, fpExpr))) - defer func() { require.NoError(t, failpoint.Disable(fpName)) }() - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use performance_schema") - result := tk.MustQuery("select function, percent_abs, percent_rel from tikv_profile_cpu where depth < 3") - - warnings := tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - result.Check(testkit.Rows( - "root 100% 100%", - "├─tikv::server::load_statistics::linux::ThreadLoadStatistics::record::h59facb8d680e7794 75.00% 75.00%", - "│ └─procinfo::pid::stat::stat_task::h69e1aa2c331aebb6 75.00% 100%", - "├─nom::nom::digit::h905aaaeff7d8ec8e 16.07% 16.07%", - "│ ├─ as core::iter::traits::iterator::Iterator>::next::h16936f9061bb75e4 6.25% 38.89%", - "│ ├─Unknown 3.57% 22.22%", - "│ ├─<&u8 as nom::traits::AsChar>::is_dec_digit::he9eacc3fad26ab81 2.68% 16.67%", - "│ ├─<&[u8] as nom::traits::InputIter>::iter_indices::h6192338433683bff 1.79% 11.11%", - "│ └─<&[T] as nom::traits::Slice>>::slice::h38d31f11f84aa302 1.79% 11.11%", - "├─::realloc::h5199c50710ab6f9d 1.79% 1.79%", - "│ └─rallocx 1.79% 100%", - "├─::dealloc::hea83459aa98dd2dc 1.79% 1.79%", - "│ └─sdallocx 1.79% 100%", - "├─::alloc::hc7962e02169a5c56 0.89% 0.89%", - "│ └─mallocx 0.89% 100%", - "├─engine::rocks::util::engine_metrics::flush_engine_iostall_properties::h64a7661c95aa1db7 0.89% 0.89%", - "│ └─rocksdb::rocksdb::DB::get_map_property_cf::h9722f9040411af44 0.89% 100%", - "├─core::ptr::real_drop_in_place::h8def0d99e7136f33 0.89% 0.89%", - "│ └─ as core::ops::drop::Drop>::drop::h9b59b303bffde02c 0.89% 100%", - "├─tikv_util::metrics::threads_linux::ThreadInfoStatistics::record::ha8cc290b3f46af88 0.89% 0.89%", - "│ └─procinfo::pid::stat::stat_task::h69e1aa2c331aebb6 0.89% 100%", - "├─crossbeam_utils::backoff::Backoff::snooze::h5c121ef4ce616a3c 0.89% 0.89%", - "│ └─core::iter::range::>::next::hdb23ceb766e7a91f 0.89% 100%", - "└─::next::he129c78b3deb639d 0.89% 0.89%", - " └─Unknown 0.89% 100%")) - - // We can use current processe profile to mock profile of PD because the PD has the - // same way of retrieving profile with TiDB. And the purpose of this test case is used - // to make sure all profile HTTP API have been accessed. - accessed := map[string]struct{}{} - handlerFactory := func(name string, debug ...int) func(w http.ResponseWriter, _ *http.Request) { - debugLevel := 0 - if len(debug) > 0 { - debugLevel = debug[0] - } - return func(w http.ResponseWriter, _ *http.Request) { - profile := pprof.Lookup(name) - if profile == nil { - http.Error(w, fmt.Sprintf("profile %s not found", name), http.StatusBadRequest) - return - } - if err := profile.WriteTo(w, debugLevel); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - accessed[name] = struct{}{} - } - } - - // mock PD profile - router.HandleFunc("/pd/api/v1/debug/pprof/profile", copyHandler("../../util/profile/testdata/test.pprof")) - router.HandleFunc("/pd/api/v1/debug/pprof/heap", handlerFactory("heap")) - router.HandleFunc("/pd/api/v1/debug/pprof/mutex", handlerFactory("mutex")) - router.HandleFunc("/pd/api/v1/debug/pprof/allocs", handlerFactory("allocs")) - router.HandleFunc("/pd/api/v1/debug/pprof/block", handlerFactory("block")) - router.HandleFunc("/pd/api/v1/debug/pprof/goroutine", handlerFactory("goroutine", 2)) - - tk.MustQuery("select * from pd_profile_cpu where depth < 3") - warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - tk.MustQuery("select * from pd_profile_memory where depth < 3") - warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - tk.MustQuery("select * from pd_profile_mutex where depth < 3") - warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - tk.MustQuery("select * from pd_profile_allocs where depth < 3") - warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - tk.MustQuery("select * from pd_profile_block where depth < 3") - warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - tk.MustQuery("select * from pd_profile_goroutines") - warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) - - require.Lenf(t, accessed, 5, "expect all HTTP API had been accessed, but found: %v", accessed) -} diff --git a/infoschema/perfschema/tables_test.go b/infoschema/perfschema/tables_test.go index be739a5b6af48..057ba404efec7 100644 --- a/infoschema/perfschema/tables_test.go +++ b/infoschema/perfschema/tables_test.go @@ -15,12 +15,21 @@ package perfschema_test import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" "path/filepath" "runtime" + "runtime/pprof" + "strings" "testing" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema/perfschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/testkit" @@ -44,6 +53,133 @@ func TestPerfSchemaTables(t *testing.T) { tk.MustQuery("select * from events_stages_history_long").Check(testkit.Rows()) } +func TestTiKVProfileCPU(t *testing.T) { + store, clean := newMockStore(t) + defer clean() + + router := http.NewServeMux() + mockServer := httptest.NewServer(router) + mockAddr := strings.TrimPrefix(mockServer.URL, "http://") + defer mockServer.Close() + + // mock tikv profile + copyHandler := func(filename string) http.HandlerFunc { + return func(w http.ResponseWriter, _ *http.Request) { + file, err := os.Open(filepath.Join(currentSourceDir(), filename)) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + defer func() { terror.Log(file.Close()) }() + _, err = io.Copy(w, file) + terror.Log(err) + } + } + router.HandleFunc("/debug/pprof/profile", copyHandler("testdata/tikv.cpu.profile")) + + // failpoint setting + servers := []string{ + strings.Join([]string{"tikv", mockAddr, mockAddr}, ","), + strings.Join([]string{"pd", mockAddr, mockAddr}, ","), + } + fpExpr := strings.Join(servers, ";") + fpName := "github.com/pingcap/tidb/infoschema/perfschema/mockRemoteNodeStatusAddress" + require.NoError(t, failpoint.Enable(fpName, fmt.Sprintf(`return("%s")`, fpExpr))) + defer func() { require.NoError(t, failpoint.Disable(fpName)) }() + + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use performance_schema") + result := tk.MustQuery("select function, percent_abs, percent_rel from tikv_profile_cpu where depth < 3") + + warnings := tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + result.Check(testkit.Rows( + "root 100% 100%", + "├─tikv::server::load_statistics::linux::ThreadLoadStatistics::record::h59facb8d680e7794 75.00% 75.00%", + "│ └─procinfo::pid::stat::stat_task::h69e1aa2c331aebb6 75.00% 100%", + "├─nom::nom::digit::h905aaaeff7d8ec8e 16.07% 16.07%", + "│ ├─ as core::iter::traits::iterator::Iterator>::next::h16936f9061bb75e4 6.25% 38.89%", + "│ ├─Unknown 3.57% 22.22%", + "│ ├─<&u8 as nom::traits::AsChar>::is_dec_digit::he9eacc3fad26ab81 2.68% 16.67%", + "│ ├─<&[u8] as nom::traits::InputIter>::iter_indices::h6192338433683bff 1.79% 11.11%", + "│ └─<&[T] as nom::traits::Slice>>::slice::h38d31f11f84aa302 1.79% 11.11%", + "├─::realloc::h5199c50710ab6f9d 1.79% 1.79%", + "│ └─rallocx 1.79% 100%", + "├─::dealloc::hea83459aa98dd2dc 1.79% 1.79%", + "│ └─sdallocx 1.79% 100%", + "├─::alloc::hc7962e02169a5c56 0.89% 0.89%", + "│ └─mallocx 0.89% 100%", + "├─engine::rocks::util::engine_metrics::flush_engine_iostall_properties::h64a7661c95aa1db7 0.89% 0.89%", + "│ └─rocksdb::rocksdb::DB::get_map_property_cf::h9722f9040411af44 0.89% 100%", + "├─core::ptr::real_drop_in_place::h8def0d99e7136f33 0.89% 0.89%", + "│ └─ as core::ops::drop::Drop>::drop::h9b59b303bffde02c 0.89% 100%", + "├─tikv_util::metrics::threads_linux::ThreadInfoStatistics::record::ha8cc290b3f46af88 0.89% 0.89%", + "│ └─procinfo::pid::stat::stat_task::h69e1aa2c331aebb6 0.89% 100%", + "├─crossbeam_utils::backoff::Backoff::snooze::h5c121ef4ce616a3c 0.89% 0.89%", + "│ └─core::iter::range::>::next::hdb23ceb766e7a91f 0.89% 100%", + "└─::next::he129c78b3deb639d 0.89% 0.89%", + " └─Unknown 0.89% 100%")) + + // We can use current processe profile to mock profile of PD because the PD has the + // same way of retrieving profile with TiDB. And the purpose of this test case is used + // to make sure all profile HTTP API have been accessed. + accessed := map[string]struct{}{} + handlerFactory := func(name string, debug ...int) func(w http.ResponseWriter, _ *http.Request) { + debugLevel := 0 + if len(debug) > 0 { + debugLevel = debug[0] + } + return func(w http.ResponseWriter, _ *http.Request) { + profile := pprof.Lookup(name) + if profile == nil { + http.Error(w, fmt.Sprintf("profile %s not found", name), http.StatusBadRequest) + return + } + if err := profile.WriteTo(w, debugLevel); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + accessed[name] = struct{}{} + } + } + + // mock PD profile + router.HandleFunc("/pd/api/v1/debug/pprof/profile", copyHandler("../../util/profile/testdata/test.pprof")) + router.HandleFunc("/pd/api/v1/debug/pprof/heap", handlerFactory("heap")) + router.HandleFunc("/pd/api/v1/debug/pprof/mutex", handlerFactory("mutex")) + router.HandleFunc("/pd/api/v1/debug/pprof/allocs", handlerFactory("allocs")) + router.HandleFunc("/pd/api/v1/debug/pprof/block", handlerFactory("block")) + router.HandleFunc("/pd/api/v1/debug/pprof/goroutine", handlerFactory("goroutine", 2)) + + tk.MustQuery("select * from pd_profile_cpu where depth < 3") + warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + tk.MustQuery("select * from pd_profile_memory where depth < 3") + warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + tk.MustQuery("select * from pd_profile_mutex where depth < 3") + warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + tk.MustQuery("select * from pd_profile_allocs where depth < 3") + warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + tk.MustQuery("select * from pd_profile_block where depth < 3") + warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + tk.MustQuery("select * from pd_profile_goroutines") + warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() + require.Lenf(t, warnings, 0, "expect no warnings, but found: %+v", warnings) + + require.Lenf(t, accessed, 5, "expect all HTTP API had been accessed, but found: %v", accessed) +} + func newMockStore(t *testing.T) (store kv.Storage, clean func()) { var err error store, err = mockstore.NewMockStore() diff --git a/infoschema/tables_serial_test.go b/infoschema/tables_test.go similarity index 100% rename from infoschema/tables_serial_test.go rename to infoschema/tables_test.go diff --git a/parser/mysql/type.go b/parser/mysql/type.go index be030bd9c81d3..c54d0f8984b63 100644 --- a/parser/mysql/type.go +++ b/parser/mysql/type.go @@ -16,15 +16,15 @@ package mysql // MySQL type information. const ( TypeUnspecified byte = 0 - TypeTiny byte = 1 - TypeShort byte = 2 - TypeLong byte = 3 + TypeTiny byte = 1 // TINYINT + TypeShort byte = 2 // SMALLINT + TypeLong byte = 3 // INT TypeFloat byte = 4 TypeDouble byte = 5 TypeNull byte = 6 TypeTimestamp byte = 7 - TypeLonglong byte = 8 - TypeInt24 byte = 9 + TypeLonglong byte = 8 // BIGINT + TypeInt24 byte = 9 // MEDIUMINT TypeDate byte = 10 /* TypeDuration original name was TypeTime, renamed to TypeDuration to resolve the conflict with Go type Time.*/ TypeDuration byte = 11 diff --git a/planner/cascades/testdata/integration_suite_out.json b/planner/cascades/testdata/integration_suite_out.json index 3b2719c02d293..21b601bfc9198 100644 --- a/planner/cascades/testdata/integration_suite_out.json +++ b/planner/cascades/testdata/integration_suite_out.json @@ -197,7 +197,7 @@ " └─Sort_29 6400.00 root test.t.b", " └─Selection_28 6400.00 root gt(Column#4, 1)", " └─HashAgg_19 8000.00 root group by:test.t.b, funcs:avg(Column#11)->Column#3, funcs:sum(Column#12)->Column#4, funcs:firstrow(test.t.b)->test.t.b", - " └─Projection_20 10000.00 root cast(test.t.a, decimal(15,4) BINARY)->Column#11, cast(test.t.a, decimal(32,0) BINARY)->Column#12, test.t.b, test.t.b", + " └─Projection_20 10000.00 root cast(test.t.a, decimal(15,4) BINARY)->Column#11, cast(test.t.a, decimal(10,0) BINARY)->Column#12, test.t.b, test.t.b", " └─TableReader_21 10000.00 root data:TableFullScan_22", " └─TableFullScan_22 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" ], @@ -636,7 +636,7 @@ "Plan": [ "Projection_8 8000.00 root Column#5, test.t.c, Column#5, Column#6, Column#7, Column#8, Column#9", "└─HashAgg_9 8000.00 root group by:test.t.c, funcs:avg(Column#10)->Column#5, funcs:count(distinct test.t.a, test.t.b)->Column#6, funcs:count(distinct test.t.a)->Column#7, funcs:count(distinct test.t.c)->Column#8, funcs:sum(Column#11)->Column#9, funcs:firstrow(test.t.c)->test.t.c", - " └─Projection_10 10000.00 root cast(test.t.b, decimal(15,4) BINARY)->Column#10, test.t.a, test.t.b, test.t.a, test.t.c, cast(test.t.b, decimal(32,0) BINARY)->Column#11, test.t.c, test.t.c", + " └─Projection_10 10000.00 root cast(test.t.b, decimal(15,4) BINARY)->Column#10, test.t.a, test.t.b, test.t.a, test.t.c, cast(test.t.b, decimal(10,0) BINARY)->Column#11, test.t.c, test.t.c", " └─TableReader_11 10000.00 root data:TableFullScan_12", " └─TableFullScan_12 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" ], diff --git a/planner/core/cache.go b/planner/core/cache.go index 2ef974340c063..7e741e203d3dc 100644 --- a/planner/core/cache.go +++ b/planner/core/cache.go @@ -79,7 +79,6 @@ type planCacheKey struct { timezoneOffset int isolationReadEngines map[kv.StoreType]struct{} selectLimit uint64 - bindSQL string hash []byte } @@ -110,7 +109,6 @@ func (key *planCacheKey) Hash() []byte { key.hash = append(key.hash, kv.TiFlash.Name()...) } key.hash = codec.EncodeInt(key.hash, int64(key.selectLimit)) - key.hash = append(key.hash, hack.Slice(key.bindSQL)...) } return key.hash } @@ -132,7 +130,7 @@ func SetPstmtIDSchemaVersion(key kvcache.Key, pstmtID uint32, schemaVersion int6 } // NewPlanCacheKey creates a new planCacheKey object. -func NewPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVersion int64, bindSQL string) kvcache.Key { +func NewPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVersion int64) kvcache.Key { timezoneOffset := 0 if sessionVars.TimeZone != nil { _, timezoneOffset = time.Now().In(sessionVars.TimeZone).Zone() @@ -146,7 +144,6 @@ func NewPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVe timezoneOffset: timezoneOffset, isolationReadEngines: make(map[kv.StoreType]struct{}), selectLimit: sessionVars.SelectLimit, - bindSQL: bindSQL, } for k, v := range sessionVars.IsolationReadEngines { key.isolationReadEngines[k] = v @@ -185,10 +182,11 @@ type PlanCacheValue struct { OutPutNames []*types.FieldName TblInfo2UnionScan map[*model.TableInfo]bool UserVarTypes FieldSlice + BindSQL string } // NewPlanCacheValue creates a SQLCacheValue. -func NewPlanCacheValue(plan Plan, names []*types.FieldName, srcMap map[*model.TableInfo]bool, userVarTps []*types.FieldType) *PlanCacheValue { +func NewPlanCacheValue(plan Plan, names []*types.FieldName, srcMap map[*model.TableInfo]bool, userVarTps []*types.FieldType, bindSQL string) *PlanCacheValue { dstMap := make(map[*model.TableInfo]bool) for k, v := range srcMap { dstMap[k] = v @@ -202,6 +200,7 @@ func NewPlanCacheValue(plan Plan, names []*types.FieldName, srcMap map[*model.Ta OutPutNames: names, TblInfo2UnionScan: dstMap, UserVarTypes: userVarTypes, + BindSQL: bindSQL, } } diff --git a/planner/core/cache_test.go b/planner/core/cache_test.go index c75a4b3963713..3527fa9d7ecb2 100644 --- a/planner/core/cache_test.go +++ b/planner/core/cache_test.go @@ -28,6 +28,6 @@ func TestCacheKey(t *testing.T) { ctx.GetSessionVars().SQLMode = mysql.ModeNone ctx.GetSessionVars().TimeZone = time.UTC ctx.GetSessionVars().ConnectionID = 0 - key := NewPlanCacheKey(ctx.GetSessionVars(), 1, 1, "") + key := NewPlanCacheKey(ctx.GetSessionVars(), 1, 1) require.Equal(t, []byte{0x74, 0x65, 0x73, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x74, 0x69, 0x64, 0x62, 0x74, 0x69, 0x6b, 0x76, 0x74, 0x69, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, key.Hash()) } diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 54f30d7d998ea..9dc276fec650b 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -404,7 +404,7 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, var bindSQL string if prepared.UseCache { bindSQL = GetBindSQL4PlanCache(sctx, prepared.Stmt) - cacheKey = NewPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion, bindSQL) + cacheKey = NewPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) } tps := make([]*types.FieldType, len(e.UsingVars)) for i, param := range e.UsingVars { @@ -447,6 +447,13 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, } cachedVals := cacheValue.([]*PlanCacheValue) for _, cachedVal := range cachedVals { + if cachedVal.BindSQL != bindSQL { + // When BindSQL does not match, it means that we have added a new binding, + // and the original cached plan will be invalid, + // so the original cached plan can be cleared directly + sctx.PreparedPlanCache().Delete(cacheKey) + break + } if !cachedVal.UserVarTypes.Equal(tps) { continue } @@ -510,13 +517,10 @@ REBUILD: // rebuild key to exclude kv.TiFlash when stmt is not read only if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) { delete(sessVars.IsolationReadEngines, kv.TiFlash) - cacheKey = NewPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion, sessVars.StmtCtx.BindSQL) + cacheKey = NewPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion) sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} - } else { - // We need to reconstruct the plan cache key based on the bindSQL. - cacheKey = NewPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion, sessVars.StmtCtx.BindSQL) } - cached := NewPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, tps) + cached := NewPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, tps, sessVars.StmtCtx.BindSQL) preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) if cacheVals, exists := sctx.PreparedPlanCache().Get(cacheKey); exists { diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 166d3adc298b3..5157ffb738b24 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" tidbutil "github.com/pingcap/tidb/util" @@ -973,7 +974,7 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c if prop.ExpectedCnt < ds.stats.RowCount { totalRowCount *= prop.ExpectedCnt / ds.stats.RowCount } - ts, partialCost, err := ds.buildIndexMergeTableScan(prop, path.TableFilters, totalRowCount) + ts, partialCost, remainingFilters, err := ds.buildIndexMergeTableScan(prop, path.TableFilters, totalRowCount) if err != nil { return nil, err } @@ -981,6 +982,9 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c cop.tablePlan = ts cop.idxMergePartPlans = scans cop.cst = totalCost + if remainingFilters != nil { + cop.rootTaskConds = remainingFilters + } task = cop.convertToRootTask(ds.ctx) ds.addSelection4PlanCache(task.(*rootTask), ds.tableStats.ScaleByExpectCnt(totalRowCount), prop) return task, nil @@ -1092,8 +1096,10 @@ func setIndexMergeTableScanHandleCols(ds *DataSource, ts *PhysicalTableScan) (er return } +// buildIndexMergeTableScan() returns Selection that will be pushed to TiKV. +// Filters that cannot be pushed to TiKV are also returned, and an extra Selection above IndexMergeReader will be constructed later. func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, tableFilters []expression.Expression, - totalRowCount float64) (PhysicalPlan, float64, error) { + totalRowCount float64) (PhysicalPlan, float64, []expression.Expression, error) { var partialCost float64 sessVars := ds.ctx.GetSessionVars() ts := PhysicalTableScan{ @@ -1108,7 +1114,7 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, ts.SetSchema(ds.schema.Clone()) err := setIndexMergeTableScanHandleCols(ds, ts) if err != nil { - return nil, 0, err + return nil, 0, nil, err } if ts.Table.PKIsHandle { if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil { @@ -1124,17 +1130,44 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, ts.stats.StatsVersion = statistics.PseudoVersion } if len(tableFilters) > 0 { - partialCost += totalRowCount * sessVars.CopCPUFactor - selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, tableFilters, nil) - if err != nil { - logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) - selectivity = SelectionFactor + pushedFilters, remainingFilters := extractFiltersForIndexMerge(sessVars.StmtCtx, ds.ctx.GetClient(), tableFilters) + pushedFilters1, remainingFilters1 := SplitSelCondsWithVirtualColumn(pushedFilters) + pushedFilters = pushedFilters1 + remainingFilters = append(remainingFilters, remainingFilters1...) + if len(pushedFilters) != 0 { + partialCost += totalRowCount * sessVars.CopCPUFactor + selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, pushedFilters, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = SelectionFactor + } + sel := PhysicalSelection{Conditions: pushedFilters}.Init(ts.ctx, ts.stats.ScaleByExpectCnt(selectivity*totalRowCount), ts.blockOffset) + sel.SetChildren(ts) + return sel, partialCost, remainingFilters, nil + } + return ts, partialCost, remainingFilters, nil + } + return ts, partialCost, nil, nil +} + +// extractFiltersForIndexMerge returns: +// `pushed`: exprs that can be pushed to TiKV. +// `remaining`: exprs that can NOT be pushed to TiKV but can be pushed to other storage engines. +// Why do we need this func? +// IndexMerge only works on TiKV, so we need to find all exprs that cannot be pushed to TiKV, and add a new Selection above IndexMergeReader. +// But the new Selection should exclude the exprs that can NOT be pushed to ALL the storage engines. +// Because these exprs have already been put in another Selection(check rule_predicate_push_down). +func extractFiltersForIndexMerge(sc *stmtctx.StatementContext, client kv.Client, filters []expression.Expression) (pushed []expression.Expression, remaining []expression.Expression) { + for _, expr := range filters { + if expression.CanExprsPushDown(sc, []expression.Expression{expr}, client, kv.TiKV) { + pushed = append(pushed, expr) + continue + } + if expression.CanExprsPushDown(sc, []expression.Expression{expr}, client, kv.UnSpecified) { + remaining = append(remaining, expr) } - sel := PhysicalSelection{Conditions: tableFilters}.Init(ts.ctx, ts.stats.ScaleByExpectCnt(selectivity*totalRowCount), ts.blockOffset) - sel.SetChildren(ts) - return sel, partialCost, nil } - return ts, partialCost, nil + return } func indexCoveringCol(col *expression.Column, indexCols []*expression.Column, idxColLens []int) bool { @@ -1409,15 +1442,15 @@ func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSou } // SplitSelCondsWithVirtualColumn filter the select conditions which contain virtual column -func SplitSelCondsWithVirtualColumn(conds []expression.Expression) ([]expression.Expression, []expression.Expression) { - var filterConds []expression.Expression - for i := len(conds) - 1; i >= 0; i-- { +func SplitSelCondsWithVirtualColumn(conds []expression.Expression) (withoutVirt []expression.Expression, withVirt []expression.Expression) { + for i := range conds { if expression.ContainVirtualColumn(conds[i : i+1]) { - filterConds = append(filterConds, conds[i]) - conds = append(conds[:i], conds[i+1:]...) + withVirt = append(withVirt, conds[i]) + } else { + withoutVirt = append(withoutVirt, conds[i]) } } - return conds, filterConds + return withoutVirt, withVirt } func matchIndicesProp(idxCols []*expression.Column, colLens []int, propItems []property.SortItem) bool { diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 0dfc4532a89bc..38048d4d30009 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -4992,6 +4992,82 @@ func (s *testIntegrationSuite) TestIssue30094(c *C) { )) } +func (s *testIntegrationSuite) TestIssue30200(c *C) { + tk := testkit.NewTestKit(c, s.store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 varchar(100), c2 varchar(100), key(c1), key(c2), c3 varchar(100));") + tk.MustExec("insert into t1 values('ab', '10', '10');") + + // lpad has not been pushed to TiKV or TiFlash. + tk.MustQuery("explain format=brief select /*+ use_index_merge(t1) */ * from t1 where c1 = 'ab' or c2 = '10' and char_length(lpad(c1, 10, 'a')) = 10;").Check(testkit.Rows( + "Selection 15.99 root or(eq(test.t1.c1, \"ab\"), and(eq(test.t1.c2, \"10\"), eq(char_length(lpad(test.t1.c1, 10, \"a\")), 10)))", + "└─IndexMerge 19.99 root ", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:c1(c1) range:[\"ab\",\"ab\"], keep order:false, stats:pseudo", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:c2(c2) range:[\"10\",\"10\"], keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 19.99 cop[tikv] table:t1 keep order:false, stats:pseudo")) + tk.MustQuery("select /*+ use_index_merge(t1) */ 1 from t1 where c1 = 'de' or c2 = '10' and char_length(lpad(c1, 10, 'a')) = 10;").Check(testkit.Rows("1")) + + // `left` has not been pushed to TiKV, but it has been pushed to TiFlash. + tk.MustQuery("explain format=brief select /*+ use_index_merge(t1) */ * from t1 where c1 = 'ab' or c2 = '10' and char_length(left(c1, 10)) = 10;").Check(testkit.Rows( + "Selection 0.04 root or(eq(test.t1.c1, \"ab\"), and(eq(test.t1.c2, \"10\"), eq(char_length(left(test.t1.c1, 10)), 10)))", + "└─IndexMerge 19.99 root ", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:c1(c1) range:[\"ab\",\"ab\"], keep order:false, stats:pseudo", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:c2(c2) range:[\"10\",\"10\"], keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 19.99 cop[tikv] table:t1 keep order:false, stats:pseudo")) + tk.MustQuery("select /*+ use_index_merge(t1) */ 1 from t1 where c1 = 'ab' or c2 = '10' and char_length(left(c1, 10)) = 10;").Check(testkit.Rows("1")) + + // If no hint, we cannot use index merge if filter cannot be pushed to any storage. + oriIndexMergeSwitcher := tk.MustQuery("select @@tidb_enable_index_merge;").Rows()[0][0].(string) + tk.MustExec("set tidb_enable_index_merge = on;") + defer func() { + tk.MustExec(fmt.Sprintf("set tidb_enable_index_merge = %s;", oriIndexMergeSwitcher)) + }() + tk.MustQuery("explain format=brief select * from t1 where c1 = 'ab' or c2 = '10' and char_length(lpad(c1, 10, 'a')) = 10;").Check(testkit.Rows( + "Selection 8000.00 root or(eq(test.t1.c1, \"ab\"), and(eq(test.t1.c2, \"10\"), eq(char_length(lpad(test.t1.c1, 10, \"a\")), 10)))", + "└─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo")) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 varchar(100), c2 varchar(100), c3 varchar(100), c4 varchar(100), key idx_0(c1), key idx_1(c2, c3));") + tk.MustExec("insert into t1 values('ab', '10', '10', '10');") + // c3 is part of idx_1, so it will be put in partial_path's IndexFilters instead of TableFilters. + // But it still cannot be pushed to TiKV. + tk.MustQuery("explain select /*+ use_index_merge(t1) */ 1 from t1 where c1 = 'de' or c2 = '10' and char_length(lpad(c3, 10, 'a')) = 10;").Check(testkit.Rows( + "Projection_4 15.99 root 1->Column#6", + "└─Selection_5 15.99 root or(eq(test.t1.c1, \"de\"), and(eq(test.t1.c2, \"10\"), eq(char_length(lpad(test.t1.c3, 10, \"a\")), 10)))", + " └─IndexMerge_9 19.99 root ", + " ├─IndexRangeScan_6(Build) 10.00 cop[tikv] table:t1, index:idx_0(c1) range:[\"de\",\"de\"], keep order:false, stats:pseudo", + " ├─IndexRangeScan_7(Build) 10.00 cop[tikv] table:t1, index:idx_1(c2, c3) range:[\"10\",\"10\"], keep order:false, stats:pseudo", + " └─TableRowIDScan_8(Probe) 19.99 cop[tikv] table:t1 keep order:false, stats:pseudo")) + tk.MustQuery("select /*+ use_index_merge(t1) */ 1 from t1 where c1 = 'de' or c2 = '10' and char_length(lpad(c3, 10, 'a')) = 10;").Check(testkit.Rows("1")) + + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1 (c1 int , pk int, primary key( pk ) , unique key( c1));") + tk.MustExec("insert into t1 values(-3896405, -1), (-2, 1), (-1, -2);") + // to_base64(left(pk, 5)) is in partial_path's TableFilters. But it cannot be pushed to TiKV. So it should be executed in TiDB. + tk.MustQuery("explain select /*+ use_index_merge( t1 ) */ * from t1 where t1.c1 in (-3896405) or t1.pk in (1, 53330) and to_base64(left(pk, 5));").Check(testkit.Rows( + "Selection_5 2.40 root or(eq(test.t1.c1, -3896405), and(in(test.t1.pk, 1, 53330), istrue_with_null(cast(to_base64(left(cast(test.t1.pk, var_string(20)), 5)), double BINARY))))", + "└─IndexMerge_9 3.00 root ", + " ├─IndexRangeScan_6(Build) 1.00 cop[tikv] table:t1, index:c1(c1) range:[-3896405,-3896405], keep order:false, stats:pseudo", + " ├─TableRangeScan_7(Build) 2.00 cop[tikv] table:t1 range:[1,1], [53330,53330], keep order:false, stats:pseudo", + " └─TableRowIDScan_8(Probe) 3.00 cop[tikv] table:t1 keep order:false, stats:pseudo")) + tk.MustQuery("select /*+ use_index_merge( t1 ) */ * from t1 where t1.c1 in (-3896405) or t1.pk in (1, 53330) and to_base64(left(pk, 5));").Check(testkit.Rows("-3896405 -1")) + + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int, c2 int, c3 int as (c1 + c2), key(c1), key(c2), key(c3));") + tk.MustExec("insert into t1(c1, c2) values(1, 1);") + tk.MustQuery("explain format=brief select /*+ use_index_merge(t1) */ * from t1 where c1 < -10 or c2 < 10 and reverse(c3) = '2';").Check(testkit.Rows( + "Selection 2825.66 root or(lt(test.t1.c1, -10), and(lt(test.t1.c2, 10), eq(reverse(cast(test.t1.c3, var_string(20))), \"2\")))", + "└─IndexMerge 5542.21 root ", + " ├─IndexRangeScan(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,-10), keep order:false, stats:pseudo", + " ├─IndexRangeScan(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo", + " └─TableRowIDScan(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo")) + tk.MustQuery("select /*+ use_index_merge(t1) */ * from t1 where c1 < -10 or c2 < 10 and reverse(c3) = '2';").Check(testkit.Rows("1 1 2")) +} + func (s *testIntegrationSuite) TestIssue29705(c *C) { tk := testkit.NewTestKit(c, s.store) origin := tk.MustQuery("SELECT @@session.tidb_partition_prune_mode") @@ -5034,3 +5110,26 @@ func (s *testIntegrationSuite) TestIssue30804(c *C) { c.Assert(core.ErrWindowNoSuchWindow.Equal(err), IsTrue) tk.MustExec("select avg(0) over w1 from t1 where b > (select sum(t2.a) over w2 from t2 window w2 as (partition by t2.b)) window w1 as (partition by t1.b)") } + +func (s *testIntegrationSuite) TestIndexMergeWarning(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(c1 int, c2 int)") + tk.MustExec("select /*+ use_index_merge(t1) */ * from t1 where c1 < 1 or c2 < 1") + warningMsg := "Warning 1105 IndexMerge is inapplicable or disabled. No available filter or available index." + tk.MustQuery("show warnings").Check(testkit.Rows(warningMsg)) + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(c1 int, c2 int, key(c1), key(c2))") + tk.MustExec("select /*+ use_index_merge(t1), no_index_merge() */ * from t1 where c1 < 1 or c2 < 1") + warningMsg = "Warning 1105 IndexMerge is inapplicable or disabled. Got no_index_merge hint or tidb_enable_index_merge is off." + tk.MustQuery("show warnings").Check(testkit.Rows(warningMsg)) + + tk.MustExec("drop table if exists t1") + tk.MustExec("create temporary table t1(c1 int, c2 int, key(c1), key(c2))") + tk.MustExec("select /*+ use_index_merge(t1) */ * from t1 where c1 < 1 or c2 < 1") + warningMsg = "Warning 1105 IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table." + tk.MustQuery("show warnings").Check(testkit.Rows(warningMsg)) +} diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index 5fe0426b5c15b..1b0f6c4543985 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -549,7 +549,7 @@ type DataSource struct { // pushedDownConds are the conditions that will be pushed down to coprocessor. pushedDownConds []expression.Expression // allConds contains all the filters on this table. For now it's maintained - // in predicate push down and used only in partition pruning. + // in predicate push down and used in partition pruning/index merge. allConds []expression.Expression statisticTable *statistics.Table diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index 1b3d13c02cc03..bc145e4a14ad7 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -2016,3 +2016,26 @@ func (s *testPlanSuite) TestIssue28316(c *C) { tk.MustQuery("explain format='brief' " + ts).Check(testkit.Rows(output[i].Plan...)) } } + +func (s *testPlanSuite) TestIssue30965(c *C) { + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t30965") + tk.MustExec("CREATE TABLE `t30965` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL, `c` int(11) DEFAULT NULL, `d` int(11) GENERATED ALWAYS AS (`a` + 1) VIRTUAL, KEY `ib` (`b`));") + tk.MustExec("insert into t30965 (a,b,c) value(3,4,5);") + tk.MustQuery("select count(*) from t30965 where d = 2 and b = 4 and a = 3 and c = 5;").Check(testkit.Rows("0")) + tk.MustQuery("explain format = 'brief' select count(*) from t30965 where d = 2 and b = 4 and a = 3 and c = 5;").Check( + testkit.Rows( + "StreamAgg 1.00 root funcs:count(1)->Column#6", + "└─Selection 0.00 root eq(test.t30965.d, 2)", + " └─IndexLookUp 0.00 root ", + " ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t30965, index:ib(b) range:[4,4], keep order:false, stats:pseudo", + " └─Selection(Probe) 0.00 cop[tikv] eq(test.t30965.a, 3), eq(test.t30965.c, 5)", + " └─TableRowIDScan 10.00 cop[tikv] table:t30965 keep order:false, stats:pseudo")) +} diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 6e2f9aaed4f53..7eb2a3c041c52 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -109,7 +109,7 @@ func TryAddExtraLimit(ctx sessionctx.Context, node ast.StmtNode) ast.StmtNode { return node } -// Preprocess resolves table names of the node, and checks some statements validation. +// Preprocess resolves table names of the node, and checks some statements' validation. // preprocessReturn used to extract the infoschema for the tableName and the timestamp from the asof clause. func Preprocess(ctx sessionctx.Context, node ast.Node, preprocessOpt ...PreprocessOpt) error { v := preprocessor{ctx: ctx, tableAliasInJoin: make([]map[string]interface{}, 0), withName: make(map[string]interface{})} diff --git a/planner/core/stats.go b/planner/core/stats.go index d3f23427b2f40..4b7ac3e33d00a 100644 --- a/planner/core/stats.go +++ b/planner/core/stats.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -409,9 +410,20 @@ func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema * } // Consider the IndexMergePath. Now, we just generate `IndexMergePath` in DNF case. - isPossibleIdxMerge := len(ds.pushedDownConds) > 0 && len(ds.possibleAccessPaths) > 1 - sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !ds.ctx.GetSessionVars().StmtCtx.NoIndexMergeHint - // If there is an index path, we current do not consider `IndexMergePath`. + // Use allConds instread of pushedDownConds, + // because we want to use IndexMerge even if some expr cannot be pushed to TiKV. + // We will create new Selection for exprs that cannot be pushed in convertToIndexMergeScan. + var indexMergeConds []expression.Expression + for _, expr := range ds.allConds { + indexMergeConds = append(indexMergeConds, expression.PushDownNot(ds.ctx, expr)) + } + + stmtCtx := ds.ctx.GetSessionVars().StmtCtx + isPossibleIdxMerge := len(indexMergeConds) > 0 && len(ds.possibleAccessPaths) > 1 + sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || len(ds.indexMergeHints) > 0) && !stmtCtx.NoIndexMergeHint + // We current do not consider `IndexMergePath`: + // 1. If there is an index path. + // 2. TODO: If there exists exprs that cannot be pushed down. This is to avoid wrongly estRow of Selection added by rule_predicate_push_down. needConsiderIndexMerge := true if len(ds.indexMergeHints) == 0 { for i := 1; i < len(ds.possibleAccessPaths); i++ { @@ -420,24 +432,45 @@ func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema * break } } + if needConsiderIndexMerge { + // PushDownExprs() will append extra warnings, which is annoying. So we reset warnings here. + warnings := stmtCtx.GetWarnings() + _, remaining := expression.PushDownExprs(stmtCtx, indexMergeConds, ds.ctx.GetClient(), kv.UnSpecified) + stmtCtx.SetWarnings(warnings) + if len(remaining) != 0 { + needConsiderIndexMerge = false + } + } } - readFromTableCache := ds.ctx.GetSessionVars().StmtCtx.ReadFromTableCache + readFromTableCache := stmtCtx.ReadFromTableCache if isPossibleIdxMerge && sessionAndStmtPermission && needConsiderIndexMerge && ds.tableInfo.TempTableType != model.TempTableLocal && !readFromTableCache { - err := ds.generateAndPruneIndexMergePath(ds.indexMergeHints != nil) + err := ds.generateAndPruneIndexMergePath(indexMergeConds, ds.indexMergeHints != nil) if err != nil { return nil, err } } else if len(ds.indexMergeHints) > 0 { ds.indexMergeHints = nil - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable or disabled")) + var msg string + if !isPossibleIdxMerge { + msg = "No available filter or available index." + } else if !sessionAndStmtPermission { + msg = "Got no_index_merge hint or tidb_enable_index_merge is off." + } else if ds.tableInfo.TempTableType == model.TempTableLocal { + msg = "Cannot use IndexMerge on temporary table." + } else if readFromTableCache { + msg = "Cannot use IndexMerge on TableCache." + } + msg = fmt.Sprintf("IndexMerge is inapplicable or disabled. %s", msg) + stmtCtx.AppendWarning(errors.Errorf(msg)) + logutil.BgLogger().Debug(msg) } return ds.stats, nil } -func (ds *DataSource) generateAndPruneIndexMergePath(needPrune bool) error { +func (ds *DataSource) generateAndPruneIndexMergePath(indexMergeConds []expression.Expression, needPrune bool) error { regularPathCount := len(ds.possibleAccessPaths) - err := ds.generateIndexMergeOrPaths() + err := ds.generateIndexMergeOrPaths(indexMergeConds) if err != nil { return err } @@ -448,12 +481,22 @@ func (ds *DataSource) generateAndPruneIndexMergePath(needPrune bool) error { // With hints and without generated IndexMerge paths if regularPathCount == len(ds.possibleAccessPaths) { ds.indexMergeHints = nil - ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable or disabled")) + ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable.")) return nil } // Do not need to consider the regular paths in find_best_task(). + // So we can use index merge's row count as DataSource's row count. if needPrune { ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] + minRowCount := ds.possibleAccessPaths[0].CountAfterAccess + for _, path := range ds.possibleAccessPaths { + if minRowCount < path.CountAfterAccess { + minRowCount = path.CountAfterAccess + } + } + if ds.stats.RowCount > minRowCount { + ds.stats = ds.tableStats.ScaleByExpectCnt(minRowCount) + } } return nil } @@ -510,9 +553,9 @@ func (is *LogicalIndexScan) DeriveStats(childStats []*property.StatsInfo, selfSc } // getIndexMergeOrPath generates all possible IndexMergeOrPaths. -func (ds *DataSource) generateIndexMergeOrPaths() error { +func (ds *DataSource) generateIndexMergeOrPaths(filters []expression.Expression) error { usedIndexCount := len(ds.possibleAccessPaths) - for i, cond := range ds.pushedDownConds { + for i, cond := range filters { sf, ok := cond.(*expression.ScalarFunction) if !ok || sf.FuncName.L != ast.LogicOr { continue @@ -548,7 +591,7 @@ func (ds *DataSource) generateIndexMergeOrPaths() error { continue } if len(partialPaths) > 1 { - possiblePath := ds.buildIndexMergeOrPath(partialPaths, i) + possiblePath := ds.buildIndexMergeOrPath(filters, partialPaths, i) if possiblePath == nil { return nil } @@ -686,16 +729,29 @@ func (ds *DataSource) buildIndexMergePartialPath(indexAccessPaths []*util.Access } // buildIndexMergeOrPath generates one possible IndexMergePath. -func (ds *DataSource) buildIndexMergeOrPath(partialPaths []*util.AccessPath, current int) *util.AccessPath { +func (ds *DataSource) buildIndexMergeOrPath(filters []expression.Expression, partialPaths []*util.AccessPath, current int) *util.AccessPath { indexMergePath := &util.AccessPath{PartialIndexPaths: partialPaths} - indexMergePath.TableFilters = append(indexMergePath.TableFilters, ds.pushedDownConds[:current]...) - indexMergePath.TableFilters = append(indexMergePath.TableFilters, ds.pushedDownConds[current+1:]...) + indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[:current]...) + indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current+1:]...) + var addCurrentFilter bool for _, path := range partialPaths { // If any partial path contains table filters, we need to keep the whole DNF filter in the Selection. if len(path.TableFilters) > 0 { - indexMergePath.TableFilters = append(indexMergePath.TableFilters, ds.pushedDownConds[current]) - break + addCurrentFilter = true } + // If any partial path's index filter cannot be pushed to TiKV, we should keep the whole DNF filter. + if len(path.IndexFilters) != 0 && !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, path.IndexFilters, ds.ctx.GetClient(), kv.TiKV) { + addCurrentFilter = true + // Clear IndexFilter, the whole filter will be put in indexMergePath.TableFilters. + path.IndexFilters = nil + } + if len(path.TableFilters) != 0 && !expression.CanExprsPushDown(ds.ctx.GetSessionVars().StmtCtx, path.TableFilters, ds.ctx.GetClient(), kv.TiKV) { + addCurrentFilter = true + path.TableFilters = nil + } + } + if addCurrentFilter { + indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current]) } return indexMergePath } diff --git a/planner/core/task.go b/planner/core/task.go index 187140c613aa5..4b9c5692ca29d 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1005,6 +1005,7 @@ func (t *copTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { setTableScanToTableRowIDScan(p.tablePlan) newTask.p = p p.cost = newTask.cost() + t.handleRootTaskConds(ctx, newTask) if t.needExtraProj { schema := t.originSchema proj := PhysicalProjection{Exprs: expression.Column2Exprs(schema.Columns)}.Init(ctx, p.stats, t.idxMergePartPlans[0].SelectBlockOffset(), nil) @@ -1066,6 +1067,11 @@ func (t *copTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { } } + t.handleRootTaskConds(ctx, newTask) + return newTask +} + +func (t *copTask) handleRootTaskConds(ctx sessionctx.Context, newTask *rootTask) { if len(t.rootTaskConds) > 0 { selectivity, _, err := t.tblColHists.Selectivity(ctx, t.rootTaskConds, nil) if err != nil { @@ -1077,8 +1083,6 @@ func (t *copTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { newTask.p = sel sel.cost = newTask.cost() } - - return newTask } // setTableScanToTableRowIDScan is to update the isChildOfIndexLookUp attribute of PhysicalTableScan child diff --git a/planner/core/testdata/integration_serial_suite_out.json b/planner/core/testdata/integration_serial_suite_out.json index fa07e7323f022..8e88ca5157d3f 100644 --- a/planner/core/testdata/integration_serial_suite_out.json +++ b/planner/core/testdata/integration_serial_suite_out.json @@ -1463,7 +1463,7 @@ "StreamAgg 1.00 root funcs:avg(Column#7, Column#8)->Column#4", "└─TableReader 1.00 root data:StreamAgg", " └─StreamAgg 1.00 batchCop[tiflash] funcs:count(Column#9)->Column#7, funcs:sum(Column#10)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] test.t.a, cast(test.t.a, decimal(37,4) BINARY)->Column#10", + " └─Projection 10000.00 batchCop[tiflash] test.t.a, cast(test.t.a, decimal(14,4) BINARY)->Column#10", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null @@ -1474,7 +1474,7 @@ "StreamAgg 1.00 root funcs:avg(Column#7, Column#8)->Column#4", "└─TableReader 1.00 root data:StreamAgg", " └─StreamAgg 1.00 batchCop[tiflash] funcs:count(Column#9)->Column#7, funcs:sum(Column#10)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] test.t.a, cast(test.t.a, decimal(37,4) BINARY)->Column#10", + " └─Projection 10000.00 batchCop[tiflash] test.t.a, cast(test.t.a, decimal(14,4) BINARY)->Column#10", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null @@ -1485,7 +1485,7 @@ "StreamAgg 1.00 root funcs:sum(Column#6)->Column#4", "└─TableReader 1.00 root data:StreamAgg", " └─StreamAgg 1.00 batchCop[tiflash] funcs:sum(Column#7)->Column#6", - " └─Projection 10000.00 batchCop[tiflash] cast(test.t.a, decimal(32,0) BINARY)->Column#7", + " └─Projection 10000.00 batchCop[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#7", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null @@ -1496,7 +1496,7 @@ "StreamAgg 1.00 root funcs:sum(Column#6)->Column#4", "└─TableReader 1.00 root data:StreamAgg", " └─StreamAgg 1.00 batchCop[tiflash] funcs:sum(Column#7)->Column#6", - " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.a, 1), decimal(41,0) BINARY)->Column#7", + " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.a, 1), decimal(20,0) BINARY)->Column#7", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null @@ -1507,7 +1507,7 @@ "StreamAgg 1.00 root funcs:sum(Column#6)->Column#4", "└─TableReader 1.00 root data:StreamAgg", " └─StreamAgg 1.00 batchCop[tiflash] funcs:sum(Column#7)->Column#6", - " └─Projection 10000.00 batchCop[tiflash] cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#7", + " └─Projection 10000.00 batchCop[tiflash] cast(isnull(test.t.a), decimal(20,0) BINARY)->Column#7", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ], "Warn": null @@ -1837,7 +1837,7 @@ "HashAgg 1.00 root funcs:sum(Column#8)->Column#6", "└─TableReader 1.00 root data:HashAgg", " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#10)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(41,0) BINARY)->Column#10", + " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(20,0) BINARY)->Column#10", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, @@ -1866,7 +1866,7 @@ "StreamAgg 1.00 root funcs:sum(Column#8)->Column#6", "└─TableReader 1.00 root data:StreamAgg", " └─StreamAgg 1.00 batchCop[tiflash] funcs:sum(Column#10)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(41,0) BINARY)->Column#10", + " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(20,0) BINARY)->Column#10", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, @@ -1992,7 +1992,7 @@ "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#11)->Column#9", - " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(41,0) BINARY)->Column#11", + " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(20,0) BINARY)->Column#11", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, @@ -2024,7 +2024,7 @@ "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#11)->Column#10", - " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(41,0) BINARY)->Column#11", + " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(20,0) BINARY)->Column#11", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, @@ -2442,7 +2442,7 @@ "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#9)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(41,0) BINARY)->Column#9", + " └─Projection 10000.00 batchCop[tiflash] cast(plus(test.t.id, 1), decimal(20,0) BINARY)->Column#9", " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, @@ -2595,7 +2595,7 @@ "└─ExchangeSender 7992.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 7992.00 batchCop[tiflash] Column#7", " └─HashAgg 7992.00 batchCop[tiflash] group by:Column#11, funcs:sum(Column#10)->Column#7", - " └─Projection 12487.50 batchCop[tiflash] cast(test.t.id, decimal(32,0) BINARY)->Column#10, test.t.id", + " └─Projection 12487.50 batchCop[tiflash] cast(test.t.id, decimal(10,0) BINARY)->Column#10, test.t.id", " └─HashJoin 12487.50 batchCop[tiflash] inner join, equal:[eq(test.t.id, test.t.id)]", " ├─ExchangeReceiver(Build) 9990.00 batchCop[tiflash] ", " │ └─ExchangeSender 9990.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t.id, collate: N/A]", @@ -2622,7 +2622,7 @@ " │ └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo", " └─Projection(Probe) 7992.00 batchCop[tiflash] Column#11, test.t.id", " └─HashAgg 7992.00 batchCop[tiflash] group by:Column#39, funcs:sum(Column#37)->Column#11, funcs:firstrow(Column#38)->test.t.id", - " └─Projection 9990.00 batchCop[tiflash] cast(test.t.id, decimal(32,0) BINARY)->Column#37, test.t.id, test.t.id", + " └─Projection 9990.00 batchCop[tiflash] cast(test.t.id, decimal(10,0) BINARY)->Column#37, test.t.id, test.t.id", " └─HashJoin 9990.00 batchCop[tiflash] inner join, equal:[eq(test.t.id, test.t.id)]", " ├─Projection(Build) 7992.00 batchCop[tiflash] test.t.id, Column#13", " │ └─HashAgg 7992.00 batchCop[tiflash] group by:test.t.id, funcs:firstrow(test.t.id)->test.t.id, funcs:sum(Column#17)->Column#13", @@ -2704,7 +2704,7 @@ "└─ExchangeSender 6400.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 6400.00 batchCop[tiflash] Column#4", " └─HashAgg 6400.00 batchCop[tiflash] group by:Column#22, funcs:sum(Column#21)->Column#4", - " └─Projection 6400.00 batchCop[tiflash] cast(test.t.id, decimal(32,0) BINARY)->Column#21, test.t.value", + " └─Projection 6400.00 batchCop[tiflash] cast(test.t.id, decimal(10,0) BINARY)->Column#21, test.t.value", " └─Projection 6400.00 batchCop[tiflash] test.t.id, test.t.value", " └─HashAgg 6400.00 batchCop[tiflash] group by:test.t.id, test.t.value, funcs:firstrow(test.t.id)->test.t.id, funcs:firstrow(test.t.value)->test.t.value", " └─ExchangeReceiver 6400.00 batchCop[tiflash] ", @@ -2868,7 +2868,7 @@ " └─ExchangeReceiver 7992.00 batchCop[tiflash] ", " └─ExchangeSender 7992.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t.id, collate: N/A]", " └─HashAgg 7992.00 batchCop[tiflash] group by:Column#11, funcs:sum(Column#10)->Column#8", - " └─Projection 12487.50 batchCop[tiflash] cast(test.t.id, decimal(32,0) BINARY)->Column#10, test.t.id", + " └─Projection 12487.50 batchCop[tiflash] cast(test.t.id, decimal(10,0) BINARY)->Column#10, test.t.id", " └─HashJoin 12487.50 batchCop[tiflash] inner join, equal:[eq(test.t.id, test.t.id)]", " ├─ExchangeReceiver(Build) 9990.00 batchCop[tiflash] ", " │ └─ExchangeSender 9990.00 batchCop[tiflash] ExchangeType: Broadcast", @@ -2922,7 +2922,7 @@ " └─ExchangeReceiver 7992.00 batchCop[tiflash] ", " └─ExchangeSender 7992.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t.id, collate: N/A]", " └─HashAgg 7992.00 batchCop[tiflash] group by:Column#33, funcs:sum(Column#32)->Column#17", - " └─Projection 9990.00 batchCop[tiflash] cast(test.t.id, decimal(32,0) BINARY)->Column#32, test.t.id", + " └─Projection 9990.00 batchCop[tiflash] cast(test.t.id, decimal(10,0) BINARY)->Column#32, test.t.id", " └─HashJoin 9990.00 batchCop[tiflash] inner join, equal:[eq(test.t.id, test.t.id)]", " ├─ExchangeReceiver(Build) 7992.00 batchCop[tiflash] ", " │ └─ExchangeSender 7992.00 batchCop[tiflash] ExchangeType: Broadcast", @@ -3157,7 +3157,7 @@ " │ └─TableReader 1.00 root data:ExchangeSender", " │ └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " │ └─HashAgg 1.00 batchCop[tiflash] funcs:min(Column#42)->Column#36, funcs:sum(Column#43)->Column#37, funcs:count(1)->Column#38", - " │ └─Projection 10000.00 batchCop[tiflash] test.ts.col_varchar_64, cast(isnull(test.ts.col_varchar_64), decimal(22,0) BINARY)->Column#43", + " │ └─Projection 10000.00 batchCop[tiflash] test.ts.col_varchar_64, cast(isnull(test.ts.col_varchar_64), decimal(20,0) BINARY)->Column#43", " │ └─TableFullScan 10000.00 batchCop[tiflash] table:SUBQUERY4_t1 keep order:false, stats:pseudo", " └─TableReader(Probe) 12487.50 root data:ExchangeSender", " └─ExchangeSender 12487.50 cop[tiflash] ExchangeType: PassThrough", @@ -3379,7 +3379,7 @@ "└─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 8000.00 batchCop[tiflash] Column#5, Column#6, Column#7, div(Column#8, cast(case(eq(Column#11, 0), 1, Column#11), decimal(20,0) BINARY))->Column#8", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#20, funcs:group_concat(Column#13, Column#14, Column#15 order by Column#16 separator \",\")->Column#5, funcs:count(1)->Column#6, funcs:min(Column#17)->Column#7, funcs:count(Column#18)->Column#11, funcs:sum(Column#19)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#15, test.ts.col_0, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#19, test.ts.col_2", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#15, test.ts.col_0, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#19, test.ts.col_2", " └─ExchangeReceiver 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_2, collate: N/A]", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" @@ -3399,7 +3399,7 @@ " └─ExchangeReceiver 8000.00 batchCop[tiflash] ", " └─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_2, collate: N/A]", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#32, Column#33, Column#34, Column#35, funcs:count(1)->Column#25, funcs:max(Column#29)->Column#26, funcs:count(Column#30)->Column#27, funcs:sum(Column#31)->Column#28", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#31, test.ts.col_2, test.ts.col_0, test.ts.col_1, test.ts.id", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#31, test.ts.col_2, test.ts.col_0, test.ts.col_1, test.ts.id", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3413,7 +3413,7 @@ "└─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 1.00 batchCop[tiflash] Column#5, Column#6, Column#7, div(Column#8, cast(case(eq(Column#10, 0), 1, Column#10), decimal(20,0) BINARY))->Column#8", " └─HashAgg 1.00 batchCop[tiflash] funcs:group_concat(Column#11, Column#12, Column#13 order by Column#14 separator \",\")->Column#5, funcs:count(Column#15)->Column#6, funcs:min(Column#16)->Column#7, funcs:count(Column#17)->Column#10, funcs:sum(Column#18)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#13, test.ts.col_0, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#18", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#13, test.ts.col_0, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#18", " └─ExchangeReceiver 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: PassThrough", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" @@ -3433,7 +3433,7 @@ " └─ExchangeReceiver 1.00 batchCop[tiflash] ", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] group by:Column#27, Column#28, Column#29, funcs:count(Column#23)->Column#19, funcs:max(Column#24)->Column#20, funcs:count(Column#25)->Column#21, funcs:sum(Column#26)->Column#22", - " └─Projection 10000.00 batchCop[tiflash] test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#26, test.ts.col_0, test.ts.col_1, test.ts.id", + " └─Projection 10000.00 batchCop[tiflash] test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#26, test.ts.col_0, test.ts.col_1, test.ts.id", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3450,7 +3450,7 @@ " └─ExchangeReceiver 8000.00 batchCop[tiflash] ", " └─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_2, collate: N/A]", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#40, funcs:group_concat(Column#33, Column#34, Column#35 separator \",\")->Column#28, funcs:count(Column#36)->Column#29, funcs:min(Column#37)->Column#30, funcs:count(Column#38)->Column#31, funcs:sum(Column#39)->Column#32", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#35, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#39, test.ts.col_2", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#35, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#39, test.ts.col_2", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3468,7 +3468,7 @@ " └─ExchangeReceiver 8000.00 batchCop[tiflash] ", " └─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_2, collate: N/A]", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#33, Column#34, Column#35, Column#36, funcs:count(Column#29)->Column#25, funcs:max(Column#30)->Column#26, funcs:count(Column#31)->Column#27, funcs:sum(Column#32)->Column#28", - " └─Projection 10000.00 batchCop[tiflash] test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#32, test.ts.col_2, test.ts.col_0, test.ts.col_1, test.ts.id", + " └─Projection 10000.00 batchCop[tiflash] test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#32, test.ts.col_2, test.ts.col_0, test.ts.col_1, test.ts.id", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3482,7 +3482,7 @@ "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] funcs:group_concat(Column#24, Column#25, Column#26 separator \",\")->Column#14, funcs:count(Column#27)->Column#15, funcs:min(Column#28)->Column#16, funcs:count(Column#29)->Column#17, funcs:sum(Column#30)->Column#18", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#26, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#30", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#26, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#30", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3500,7 +3500,7 @@ " └─ExchangeReceiver 1.00 batchCop[tiflash] ", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] group by:Column#27, Column#28, Column#29, funcs:count(Column#23)->Column#19, funcs:max(Column#24)->Column#20, funcs:count(Column#25)->Column#21, funcs:sum(Column#26)->Column#22", - " └─Projection 10000.00 batchCop[tiflash] test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#26, test.ts.col_0, test.ts.col_1, test.ts.id", + " └─Projection 10000.00 batchCop[tiflash] test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#26, test.ts.col_0, test.ts.col_1, test.ts.id", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3514,7 +3514,7 @@ "└─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 8000.00 batchCop[tiflash] Column#5, Column#6, Column#7, div(Column#8, cast(case(eq(Column#17, 0), 1, Column#17), decimal(20,0) BINARY))->Column#8", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#29, funcs:group_concat(Column#21, Column#22, Column#23 separator \",\")->Column#5, funcs:count(Column#24)->Column#6, funcs:group_concat(Column#25 order by Column#26 separator \",\")->Column#7, funcs:count(Column#27)->Column#17, funcs:sum(Column#28)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#23, test.ts.id, test.ts.col_0, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#28, test.ts.col_2", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#23, test.ts.id, test.ts.col_0, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#28, test.ts.col_2", " └─ExchangeReceiver 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_2, collate: N/A]", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" @@ -3530,7 +3530,7 @@ "└─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 8000.00 batchCop[tiflash] Column#5, Column#6, Column#7, div(Column#8, cast(case(eq(Column#14, 0), 1, Column#14), decimal(20,0) BINARY))->Column#8", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#25, funcs:group_concat(distinct Column#17 separator \",\")->Column#5, funcs:count(Column#18)->Column#6, funcs:group_concat(Column#19, Column#20 order by Column#21, Column#22 separator \",\")->Column#7, funcs:count(Column#23)->Column#14, funcs:sum(Column#24)->Column#8", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.id, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#20, test.ts.col_1, test.ts.id, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#24, test.ts.col_2", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.id, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#20, test.ts.col_1, test.ts.id, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#24, test.ts.col_2", " └─ExchangeReceiver 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_2, collate: N/A]", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" @@ -3546,7 +3546,7 @@ "└─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 1.00 batchCop[tiflash] Column#5, Column#6, Column#7, Column#8, div(Column#9, cast(case(eq(Column#15, 0), 1, Column#15), decimal(20,0) BINARY))->Column#9", " └─HashAgg 1.00 batchCop[tiflash] funcs:group_concat(Column#18, Column#19 separator \",\")->Column#5, funcs:count(Column#20)->Column#6, funcs:group_concat(Column#21, Column#22 order by Column#23, Column#24 separator \",\")->Column#7, funcs:min(Column#25)->Column#8, funcs:count(Column#26)->Column#15, funcs:sum(Column#27)->Column#9", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, cast(test.ts.id, var_string(20))->Column#19, test.ts.id, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#22, test.ts.col_1, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#27", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, cast(test.ts.id, var_string(20))->Column#19, test.ts.id, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#22, test.ts.col_1, test.ts.id, test.ts.col_0, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#27", " └─ExchangeReceiver 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: PassThrough", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" @@ -3562,7 +3562,7 @@ "└─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─Projection 1.00 batchCop[tiflash] Column#5, Column#6, Column#7, Column#8, div(Column#9, cast(case(eq(Column#13, 0), 1, Column#13), decimal(20,0) BINARY))->Column#9", " └─HashAgg 1.00 batchCop[tiflash] funcs:group_concat(distinct Column#15, Column#16, Column#17 separator \",\")->Column#5, funcs:count(Column#18)->Column#6, funcs:group_concat(Column#19, Column#20 order by Column#21, Column#22 separator \",\")->Column#7, funcs:max(Column#23)->Column#8, funcs:count(Column#24)->Column#13, funcs:sum(Column#25)->Column#9", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#17, test.ts.id, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#20, test.ts.col_1, test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#25", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_0, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#17, test.ts.id, test.ts.col_1, cast(test.ts.id, var_string(20))->Column#20, test.ts.col_1, test.ts.id, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#25", " └─ExchangeReceiver 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: PassThrough", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" @@ -3582,7 +3582,7 @@ " └─ExchangeReceiver 1.00 batchCop[tiflash] ", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", " └─HashAgg 1.00 batchCop[tiflash] group by:Column#29, Column#30, Column#31, Column#32, funcs:group_concat(Column#24, Column#25 separator \",\")->Column#20, funcs:max(Column#26)->Column#21, funcs:count(Column#27)->Column#22, funcs:sum(Column#28)->Column#23", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_1, cast(test.ts.id, var_string(20))->Column#25, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#28, test.ts.col_0, test.ts.col_1, test.ts.id, test.ts.col_2", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_1, cast(test.ts.id, var_string(20))->Column#25, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#28, test.ts.col_0, test.ts.col_1, test.ts.id, test.ts.col_2", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ @@ -3600,7 +3600,7 @@ " └─ExchangeReceiver 8000.00 batchCop[tiflash] ", " └─ExchangeSender 8000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.ts.col_0, collate: N/A]", " └─HashAgg 8000.00 batchCop[tiflash] group by:Column#35, Column#36, Column#37, Column#38, funcs:group_concat(Column#30, Column#31 separator \",\")->Column#26, funcs:max(Column#32)->Column#27, funcs:count(Column#33)->Column#28, funcs:sum(Column#34)->Column#29", - " └─Projection 10000.00 batchCop[tiflash] test.ts.col_1, cast(test.ts.id, var_string(20))->Column#31, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(37,4) BINARY)->Column#34, test.ts.col_0, test.ts.col_1, test.ts.id, test.ts.col_2", + " └─Projection 10000.00 batchCop[tiflash] test.ts.col_1, cast(test.ts.id, var_string(20))->Column#31, test.ts.col_1, test.ts.id, cast(test.ts.id, decimal(14,4) BINARY)->Column#34, test.ts.col_0, test.ts.col_1, test.ts.id, test.ts.col_2", " └─TableFullScan 10000.00 batchCop[tiflash] table:ts keep order:false, stats:pseudo" ], "Warning": [ diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json index 6c946cdac4d2d..c3bbda1ed3f2c 100644 --- a/planner/core/testdata/integration_suite_out.json +++ b/planner/core/testdata/integration_suite_out.json @@ -1332,7 +1332,7 @@ "└─IndexRangeScan 20.00 cop[tikv] table:tt, index:a(a) range:[10,10], [20,20], keep order:false, stats:pseudo" ], "Warnings": [ - "Warning 1105 IndexMerge is inapplicable or disabled" + "Warning 1105 IndexMerge is inapplicable." ] }, { @@ -1342,7 +1342,7 @@ "└─IndexRangeScan 6666.67 cop[tikv] table:tt, index:a(a) range:[-inf,10), [15,15], (20,+inf], keep order:false, stats:pseudo" ], "Warnings": [ - "Warning 1105 IndexMerge is inapplicable or disabled" + "Warning 1105 IndexMerge is inapplicable." ] } ] diff --git a/planner/core/testdata/ordered_result_mode_suite_out.json b/planner/core/testdata/ordered_result_mode_suite_out.json index 80d8b06a86fd6..8016c33bed6f4 100644 --- a/planner/core/testdata/ordered_result_mode_suite_out.json +++ b/planner/core/testdata/ordered_result_mode_suite_out.json @@ -401,7 +401,7 @@ "Plan": [ "Projection_8 10000.00 root Column#6", "└─Sort_9 10000.00 root test.t1.b, test.t1.a, Column#6", - " └─Window_11 10000.00 root sum(cast(test.t1.b, decimal(32,0) BINARY))->Column#6 over(partition by test.t1.a)", + " └─Window_11 10000.00 root sum(cast(test.t1.b, decimal(10,0) BINARY))->Column#6 over(partition by test.t1.a)", " └─TableReader_13 10000.00 root data:TableFullScan_12", " └─TableFullScan_12 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" ] diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 75db6cb736e99..f70d1eb7e5550 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -1705,7 +1705,7 @@ "SQL": "select /*+ HASH_AGG(), AGG_TO_COP() */ sum(distinct b) from pt;", "Plan": [ "HashAgg 1.00 root funcs:sum(distinct Column#9)->Column#4", - "└─Projection 16000.00 root cast(test.pt.b, decimal(32,0) BINARY)->Column#9", + "└─Projection 16000.00 root cast(test.pt.b, decimal(10,0) BINARY)->Column#9", " └─PartitionUnion 16000.00 root ", " ├─HashAgg 8000.00 root group by:test.pt.b, funcs:firstrow(test.pt.b)->test.pt.b, funcs:firstrow(test.pt.b)->test.pt.b", " │ └─TableReader 8000.00 root data:HashAgg", @@ -1788,7 +1788,7 @@ "Plan": [ "Projection 8000.00 root Column#5, test.t.c, Column#5, Column#6, Column#7, Column#8, Column#9", "└─HashAgg 8000.00 root group by:Column#17, funcs:avg(Column#10)->Column#5, funcs:count(distinct Column#11, Column#12)->Column#6, funcs:count(distinct Column#13)->Column#7, funcs:count(distinct Column#14)->Column#8, funcs:sum(Column#15)->Column#9, funcs:firstrow(Column#16)->test.t.c", - " └─Projection 10000.00 root cast(test.t.b, decimal(15,4) BINARY)->Column#10, test.t.a, test.t.b, test.t.a, test.t.c, cast(test.t.b, decimal(32,0) BINARY)->Column#15, test.t.c, test.t.c", + " └─Projection 10000.00 root cast(test.t.b, decimal(15,4) BINARY)->Column#10, test.t.a, test.t.b, test.t.a, test.t.c, cast(test.t.b, decimal(10,0) BINARY)->Column#15, test.t.c, test.t.c", " └─TableReader 10000.00 root data:TableFullScan", " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" ], @@ -1850,7 +1850,7 @@ "SQL": "select /*+ HASH_AGG(), AGG_TO_COP() */ sum(distinct b) from pt;", "Plan": [ "HashAgg 1.00 root funcs:sum(distinct Column#9)->Column#4", - "└─Projection 16000.00 root cast(test.pt.b, decimal(32,0) BINARY)->Column#9", + "└─Projection 16000.00 root cast(test.pt.b, decimal(10,0) BINARY)->Column#9", " └─PartitionUnion 16000.00 root ", " ├─HashAgg 8000.00 root group by:test.pt.b, funcs:firstrow(test.pt.b)->test.pt.b, funcs:firstrow(test.pt.b)->test.pt.b", " │ └─TableReader 8000.00 root data:HashAgg", @@ -1892,7 +1892,7 @@ "SQL": "select /*+ HASH_AGG(), AGG_TO_COP() */ sum(distinct b) from pt;", "Plan": [ "HashAgg 1.00 root funcs:sum(distinct Column#9)->Column#4", - "└─Projection 16000.00 root cast(test.pt.b, decimal(32,0) BINARY)->Column#9", + "└─Projection 16000.00 root cast(test.pt.b, decimal(10,0) BINARY)->Column#9", " └─PartitionUnion 16000.00 root ", " ├─HashAgg 8000.00 root group by:test.pt.b, funcs:firstrow(test.pt.b)->test.pt.b, funcs:firstrow(test.pt.b)->test.pt.b", " │ └─TableReader 8000.00 root data:HashAgg", diff --git a/planner/core/testdata/plan_suite_unexported_out.json b/planner/core/testdata/plan_suite_unexported_out.json index 6401df6aa9264..391797fa59644 100644 --- a/planner/core/testdata/plan_suite_unexported_out.json +++ b/planner/core/testdata/plan_suite_unexported_out.json @@ -93,7 +93,7 @@ "Join{DataScan(t1)->DataScan(t2)}(test.t.a,test.t.a)->Projection", "Dual->Projection", "DataScan(t)->Projection->Projection->Window(min(test.t.a)->Column#14)->Sel([lt(test.t.a, 10) eq(test.t.b, Column#14)])->Projection->Projection", - "DataScan(t)->Projection->Projection->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14)->Sel([gt(Column#14, cast(test.t.b, decimal(20,0) BINARY))])->Projection->Projection" + "DataScan(t)->Projection->Projection->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14)->Sel([gt(Column#14, cast(test.t.b, decimal(20,0) BINARY))])->Projection->Projection" ] }, { @@ -189,12 +189,12 @@ "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(order by test.t.a, test.t.b desc range between unbounded preceding and current row))->Projection", "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", "[planner:1054]Unknown column 'z' in 'field list'", - "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(10,0) BINARY))->Column#14 over())->Sort->Projection", "IndexReader(Index(t.f)[[NULL,+inf]]->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over())->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "TableReader(Table(t)->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Sort->Projection", - "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#38 over())->MaxOneRow}->Sel([Column#38])->Projection", + "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#38 over())->MaxOneRow}->Sel([Column#38])->Projection", "[planner:3594]You cannot use the alias 'w' of an expression containing a window function in this context.'", "[planner:1247]Reference 'sum_a' not supported (reference to window function)", "[planner:3579]Window name 'w2' is not defined.", @@ -203,11 +203,11 @@ "[planner:3581]A window which depends on another cannot define partitioning.", "[planner:3581]A window which depends on another cannot define partitioning.", "[planner:3582]Window 'w' has a frame definition, so cannot be referenced by another window.", - "IndexReader(Index(t.f)[[NULL,+inf]])->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(rows between 1 preceding and 1 following))->Projection", + "IndexReader(Index(t.f)[[NULL,+inf]])->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(rows between 1 preceding and 1 following))->Projection", "[planner:3583]Window '' cannot inherit 'w' since both contain an ORDER BY clause.", "[planner:3591]Window 'w1' is defined twice.", "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "[planner:1235]This version of TiDB doesn't yet support 'GROUPS'", "[planner:3584]Window '': frame start cannot be UNBOUNDED FOLLOWING.", "[planner:3585]Window '': frame end cannot be UNBOUNDED PRECEDING.", @@ -219,7 +219,7 @@ "[planner:3590]Window '' has a non-constant frame bound.", "[planner:3586]Window '': frame start or end is negative, NULL or of non-integral type", "[planner:3588]Window '' with RANGE frame has ORDER BY expression of datetime type. Only INTERVAL bound value allowed.", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(order by test.t.a range between 1.0 preceding and 1 following))->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(order by test.t.a range between 1.0 preceding and 1 following))->Projection", "IndexReader(Index(t.f)[[NULL,+inf]])->Window(row_number()->Column#14 over())->Projection", "TableReader(Table(t))->HashAgg->Window(max(Column#13)->Column#15 over(rows between 1 preceding and 1 following))->Projection", "[planner:1210]Incorrect arguments to nth_value", @@ -229,11 +229,11 @@ "IndexReader(Index(t.f)[[NULL,+inf]])->Window(ntile()->Column#14 over())->Projection", "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.b))->Projection", "TableReader(Table(t))->Window(nth_value(test.t.i_date, 1)->Column#14 over())->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#15, sum(cast(test.t.c, decimal(65,0) BINARY))->Column#16 over(order by test.t.a range between unbounded preceding and current row))->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(10,0) BINARY))->Column#15, sum(cast(test.t.c, decimal(10,0) BINARY))->Column#16 over(order by test.t.a range between unbounded preceding and current row))->Projection", "[planner:3593]You cannot use the window function 'sum' in this context.'", "[planner:3593]You cannot use the window function 'sum' in this context.'", "[planner:3593]You cannot use the window function 'row_number' in this context.'", - "TableReader(Table(t))->Sort->Window(sum(cast(test.t.c, decimal(65,0) BINARY))->Column#17 over(partition by test.t.a order by test.t.c range between unbounded preceding and current row))->Sort->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#18 over(order by test.t.a, test.t.b, test.t.c range between unbounded preceding and current row))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#19 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row))->Window(sum(cast(test.t.d, decimal(65,0) BINARY))->Column#20 over())->Projection", + "TableReader(Table(t))->Sort->Window(sum(cast(test.t.c, decimal(10,0) BINARY))->Column#17 over(partition by test.t.a order by test.t.c range between unbounded preceding and current row))->Sort->Window(sum(cast(test.t.b, decimal(10,0) BINARY))->Column#18 over(order by test.t.a, test.t.b, test.t.c range between unbounded preceding and current row))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#19 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row))->Window(sum(cast(test.t.d, decimal(10,0) BINARY))->Column#20 over())->Projection", "[planner:3587]Window 'w1' with RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression, of numeric or temporal type", "TableReader(Table(t))->Sort->Window(dense_rank()->Column#14 over(partition by test.t.b order by test.t.a desc, test.t.b desc))->Projection", "[planner:3587]Window 'w1' with RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression, of numeric or temporal type", @@ -262,12 +262,12 @@ "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(order by test.t.a, test.t.b desc range between unbounded preceding and current row))->Projection", "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", "[planner:1054]Unknown column 'z' in 'field list'", - "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(10,0) BINARY))->Column#14 over())->Sort->Projection", "IndexReader(Index(t.f)[[NULL,+inf]]->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over())->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "TableReader(Table(t)->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Sort->Projection", - "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#38 over())->MaxOneRow}->Sel([Column#38])->Projection", + "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#38 over())->MaxOneRow}->Sel([Column#38])->Projection", "[planner:3594]You cannot use the alias 'w' of an expression containing a window function in this context.'", "[planner:1247]Reference 'sum_a' not supported (reference to window function)", "[planner:3579]Window name 'w2' is not defined.", @@ -276,11 +276,11 @@ "[planner:3581]A window which depends on another cannot define partitioning.", "[planner:3581]A window which depends on another cannot define partitioning.", "[planner:3582]Window 'w' has a frame definition, so cannot be referenced by another window.", - "IndexReader(Index(t.f)[[NULL,+inf]])->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(rows between 1 preceding and 1 following))->Projection", + "IndexReader(Index(t.f)[[NULL,+inf]])->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(rows between 1 preceding and 1 following))->Projection", "[planner:3583]Window '' cannot inherit 'w' since both contain an ORDER BY clause.", "[planner:3591]Window 'w1' is defined twice.", "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "[planner:1235]This version of TiDB doesn't yet support 'GROUPS'", "[planner:3584]Window '': frame start cannot be UNBOUNDED FOLLOWING.", "[planner:3585]Window '': frame end cannot be UNBOUNDED PRECEDING.", @@ -292,7 +292,7 @@ "[planner:3590]Window '' has a non-constant frame bound.", "[planner:3586]Window '': frame start or end is negative, NULL or of non-integral type", "[planner:3588]Window '' with RANGE frame has ORDER BY expression of datetime type. Only INTERVAL bound value allowed.", - "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(order by test.t.a range between 1.0 preceding and 1 following))->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#14 over(order by test.t.a range between 1.0 preceding and 1 following))->Projection", "IndexReader(Index(t.f)[[NULL,+inf]])->Window(row_number()->Column#14 over())->Projection", "TableReader(Table(t))->HashAgg->Window(max(Column#13)->Column#15 over(rows between 1 preceding and 1 following))->Projection", "[planner:1210]Incorrect arguments to nth_value", @@ -302,11 +302,11 @@ "IndexReader(Index(t.f)[[NULL,+inf]])->Window(ntile()->Column#14 over())->Projection", "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.b))->Partition(execution info: concurrency:4, data sources:[TableReader_10])->Projection", "TableReader(Table(t))->Window(nth_value(test.t.i_date, 1)->Column#14 over())->Projection", - "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#15, sum(cast(test.t.c, decimal(65,0) BINARY))->Column#16 over(order by test.t.a range between unbounded preceding and current row))->Projection", + "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(10,0) BINARY))->Column#15, sum(cast(test.t.c, decimal(10,0) BINARY))->Column#16 over(order by test.t.a range between unbounded preceding and current row))->Projection", "[planner:3593]You cannot use the window function 'sum' in this context.'", "[planner:3593]You cannot use the window function 'sum' in this context.'", "[planner:3593]You cannot use the window function 'row_number' in this context.'", - "TableReader(Table(t))->Sort->Window(sum(cast(test.t.c, decimal(65,0) BINARY))->Column#17 over(partition by test.t.a order by test.t.c range between unbounded preceding and current row))->Sort->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#18 over(order by test.t.a, test.t.b, test.t.c range between unbounded preceding and current row))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#19 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row))->Window(sum(cast(test.t.d, decimal(65,0) BINARY))->Column#20 over())->Projection", + "TableReader(Table(t))->Sort->Window(sum(cast(test.t.c, decimal(10,0) BINARY))->Column#17 over(partition by test.t.a order by test.t.c range between unbounded preceding and current row))->Sort->Window(sum(cast(test.t.b, decimal(10,0) BINARY))->Column#18 over(order by test.t.a, test.t.b, test.t.c range between unbounded preceding and current row))->Window(sum(cast(test.t.a, decimal(10,0) BINARY))->Column#19 over(partition by test.t.a order by test.t.b range between unbounded preceding and current row))->Window(sum(cast(test.t.d, decimal(10,0) BINARY))->Column#20 over())->Projection", "[planner:3587]Window 'w1' with RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression, of numeric or temporal type", "TableReader(Table(t))->Sort->Window(dense_rank()->Column#14 over(partition by test.t.b order by test.t.a desc, test.t.b desc))->Partition(execution info: concurrency:4, data sources:[TableReader_9])->Projection", "[planner:3587]Window 'w1' with RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression, of numeric or temporal type", diff --git a/planner/core/testdata/stats_suite_out.json b/planner/core/testdata/stats_suite_out.json index f7e40ddcc0e61..369fbe4fb0adf 100644 --- a/planner/core/testdata/stats_suite_out.json +++ b/planner/core/testdata/stats_suite_out.json @@ -275,7 +275,7 @@ "SQL": "select count(tmp.a_sum) from (select t1.a as a, t1.b as b, sum(a) over() as a_sum from t1) tmp group by tmp.a, tmp.b", "Plan": [ "HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#5)->Column#6", - "└─Window 4.00 root sum(cast(test.t1.a, decimal(32,0) BINARY))->Column#5 over()", + "└─Window 4.00 root sum(cast(test.t1.a, decimal(10,0) BINARY))->Column#5 over()", " └─TableReader 4.00 root data:TableFullScan", " └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false" ] diff --git a/server/driver_tidb.go b/server/driver_tidb.go index dfe88d5364114..0840cceb86c0a 100644 --- a/server/driver_tidb.go +++ b/server/driver_tidb.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/planner" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/stmtctx" @@ -165,10 +164,8 @@ func (ts *TiDBStatement) Close() error { if !ok { return errors.Errorf("invalid CachedPrepareStmt type") } - preparedAst := preparedObj.PreparedAst - bindSQL := planner.GetBindSQL4PlanCache(ts.ctx, preparedAst.Stmt) ts.ctx.PreparedPlanCache().Delete(core.NewPlanCacheKey( - ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion, bindSQL)) + ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion)) } ts.ctx.GetSessionVars().RemovePreparedStmt(ts.id) } diff --git a/session/bootstrap_serial_test.go b/session/bootstrap_test.go similarity index 100% rename from session/bootstrap_serial_test.go rename to session/bootstrap_test.go diff --git a/session/clustered_index_serial_test.go b/session/clustered_index_serial_test.go deleted file mode 100644 index 056c525944ac0..0000000000000 --- a/session/clustered_index_serial_test.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package session_test - -import ( - "fmt" - "math/rand" - "strings" - "testing" - - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/errno" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/util/collate" - "github.com/pingcap/tidb/util/israce" -) - -func TestCreateClusteredTable(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := createTestKit(t, store) - tk.MustExec("set @@tidb_enable_clustered_index = 'int_only';") - tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") - tk.MustExec("create table t1(id int primary key, v int)") - tk.MustExec("create table t2(id varchar(10) primary key, v int)") - tk.MustExec("create table t3(id int primary key clustered, v int)") - tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") - tk.MustExec("create table t5(id int primary key nonclustered, v int)") - tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") - tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") - tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") - tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) - - tk.MustExec("set @@tidb_enable_clustered_index = 'off';") - tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") - tk.MustExec("create table t1(id int primary key, v int)") - tk.MustExec("create table t2(id varchar(10) primary key, v int)") - tk.MustExec("create table t3(id int primary key clustered, v int)") - tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") - tk.MustExec("create table t5(id int primary key nonclustered, v int)") - tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") - tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") - tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") - tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) - - tk.MustExec("set @@tidb_enable_clustered_index = 'on';") - tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") - tk.MustExec("create table t1(id int primary key, v int)") - tk.MustExec("create table t2(id varchar(10) primary key, v int)") - tk.MustExec("create table t3(id int primary key clustered, v int)") - tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") - tk.MustExec("create table t5(id int primary key nonclustered, v int)") - tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") - tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") - tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") - tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) - - tk.MustExec("set @@tidb_enable_clustered_index = 'int_only';") - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") - tk.MustExec("create table t1(id int primary key, v int)") - tk.MustExec("create table t2(id varchar(10) primary key, v int)") - tk.MustExec("create table t3(id int primary key clustered, v int)") - tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") - tk.MustExec("create table t5(id int primary key nonclustered, v int)") - tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") - tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") - tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") - tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) - tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) - tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) -} - -// Test for union scan in prefixed clustered index table. -// See https://github.com/pingcap/tidb/issues/22069. -func TestClusteredUnionScanOnPrefixingPrimaryKey(t *testing.T) { - originCollate := collate.NewCollationEnabled() - collate.SetNewCollationEnabledForTest(false) - defer collate.SetNewCollationEnabledForTest(originCollate) - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := createTestKit(t, store) - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (col_1 varchar(255), col_2 tinyint, primary key idx_1 (col_1(1)));") - tk.MustExec("insert into t values ('aaaaa', -38);") - tk.MustExec("insert into t values ('bbbbb', -48);") - - tk.MustExec("begin PESSIMISTIC;") - tk.MustExec("update t set col_2 = 47 where col_1 in ('aaaaa') order by col_1,col_2;") - tk.MustQuery("select * from t;").Check(testkit.Rows("aaaaa 47", "bbbbb -48")) - tk.MustGetErrCode("insert into t values ('bb', 0);", errno.ErrDupEntry) - tk.MustGetErrCode("insert into t values ('aa', 0);", errno.ErrDupEntry) - tk.MustExec("commit;") - tk.MustQuery("select * from t;").Check(testkit.Rows("aaaaa 47", "bbbbb -48")) - tk.MustExec("admin check table t;") -} - -// https://github.com/pingcap/tidb/issues/22453 -func TestClusteredIndexSplitAndAddIndex2(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := createTestKit(t, store) - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (a int, b enum('Alice'), c int, primary key (c, b));") - tk.MustExec("insert into t values (-1,'Alice',100);") - tk.MustExec("insert into t values (-1,'Alice',7000);") - tk.MustQuery("split table t between (0,'Alice') and (10000,'Alice') regions 2;").Check(testkit.Rows("1 1")) - tk.MustExec("set @@global.tidb_ddl_error_count_limit = 3;") - tk.MustExec("alter table t add index idx (c);") - tk.MustExec("admin check table t;") -} - -func TestClusteredIndexSyntax(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - const showPKType = `select tidb_pk_type from information_schema.tables where table_schema = 'test' and table_name = 't';` - const nonClustered, clustered = `NONCLUSTERED`, `CLUSTERED` - assertPkType := func(sql string, pkType string) { - tk.MustExec("drop table if exists t;") - tk.MustExec(sql) - tk.MustQuery(showPKType).Check(testkit.Rows(pkType)) - } - - // Test single integer column as the primary key. - clusteredDefault := clustered - assertPkType("create table t (a int primary key, b int);", clusteredDefault) - assertPkType("create table t (a int, b int, primary key(a) clustered);", clustered) - assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] clustered */);", clustered) - assertPkType("create table t (a int, b int, primary key(a) nonclustered);", nonClustered) - assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] nonclustered */);", nonClustered) - - // Test for clustered index. - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - assertPkType("create table t (a int, b varchar(255), primary key(b, a));", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - assertPkType("create table t (a int, b varchar(255), primary key(b, a));", clusteredDefault) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] nonclustered */);", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] clustered */);", clustered) - - tk.MustGetErrCode("create table t (a varchar(255) unique key clustered);", errno.ErrParse) - tk.MustGetErrCode("create table t (a varchar(255), foreign key (a) reference t1(a) clustered);", errno.ErrParse) - tk.MustGetErrCode("create table t (a varchar(255), foreign key (a) clustered reference t1(a));", errno.ErrParse) - tk.MustGetErrCode("create table t (a varchar(255) clustered);", errno.ErrParse) - - errMsg := "[ddl:8200]CLUSTERED/NONCLUSTERED keyword is only supported for primary key" - tk.MustGetErrMsg("create table t (a varchar(255), unique key(a) clustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), unique key(a) nonclustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), unique index(a) clustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), unique index(a) nonclustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), key(a) clustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), key(a) nonclustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), index(a) clustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), index(a) nonclustered);", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), b decimal(5, 4), primary key (a, b) clustered, key (b) clustered)", errMsg) - tk.MustGetErrMsg("create table t (a varchar(255), b decimal(5, 4), primary key (a, b) clustered, key (b) nonclustered)", errMsg) -} - -func TestPrefixClusteredIndexAddIndexAndRecover(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk1 := testkit.NewTestKit(t, store) - tk1.MustExec("use test;") - tk1.MustExec("drop table if exists t;") - defer func() { - tk1.MustExec("drop table if exists t;") - }() - - tk1.MustExec("create table t(a char(3), b char(3), primary key(a(1)) clustered)") - tk1.MustExec("insert into t values ('aaa', 'bbb')") - tk1.MustExec("alter table t add index idx(b)") - tk1.MustQuery("select * from t use index(idx)").Check(testkit.Rows("aaa bbb")) - tk1.MustExec("admin check table t") - tk1.MustExec("admin recover index t idx") - tk1.MustQuery("select * from t use index(idx)").Check(testkit.Rows("aaa bbb")) - tk1.MustExec("admin check table t") -} - -func TestPartitionTable(t *testing.T) { - if israce.RaceEnabled { - t.Skip("exhaustive types test, skip race test") - } - - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("create database test_view") - tk.MustExec("use test_view") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`create table thash (a int, b int, c varchar(32), primary key(a, b) clustered) partition by hash(a) partitions 4`) - tk.MustExec(`create table trange (a int, b int, c varchar(32), primary key(a, b) clustered) partition by range columns(a) ( - partition p0 values less than (3000), - partition p1 values less than (6000), - partition p2 values less than (9000), - partition p3 values less than (10000))`) - tk.MustExec(`create table tnormal (a int, b int, c varchar(32), primary key(a, b))`) - - vals := make([]string, 0, 4000) - existedPK := make(map[string]struct{}, 4000) - for i := 0; i < 4000; { - a := rand.Intn(10000) - b := rand.Intn(10000) - pk := fmt.Sprintf("%v, %v", a, b) - if _, ok := existedPK[pk]; ok { - continue - } - existedPK[pk] = struct{}{} - i++ - vals = append(vals, fmt.Sprintf(`(%v, %v, '%v')`, a, b, rand.Intn(10000))) - } - - tk.MustExec("insert into thash values " + strings.Join(vals, ", ")) - tk.MustExec("insert into trange values " + strings.Join(vals, ", ")) - tk.MustExec("insert into tnormal values " + strings.Join(vals, ", ")) - - for i := 0; i < 200; i++ { - cond := fmt.Sprintf("where a in (%v, %v, %v) and b < %v", rand.Intn(10000), rand.Intn(10000), rand.Intn(10000), rand.Intn(10000)) - result := tk.MustQuery("select * from tnormal " + cond).Sort().Rows() - tk.MustQuery("select * from thash use index(primary) " + cond).Sort().Check(result) - tk.MustQuery("select * from trange use index(primary) " + cond).Sort().Check(result) - } -} - -// https://github.com/pingcap/tidb/issues/23106 -func TestClusteredIndexDecodeRestoredDataV5(t *testing.T) { - defer collate.SetNewCollationEnabledForTest(false) - collate.SetNewCollationEnabledForTest(true) - - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (id1 int, id2 varchar(10), a1 int, primary key(id1, id2) clustered) collate utf8mb4_general_ci;") - tk.MustExec("insert into t values (1, 'asd', 1), (1, 'dsa', 1);") - tk.MustGetErrCode("alter table t add unique index t_idx(id1, a1);", errno.ErrDupEntry) - - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (id1 int, id2 varchar(10), a1 int, primary key(id1, id2) clustered, unique key t_idx(id1, a1)) collate utf8mb4_general_ci;") - tk.MustExec("begin;") - tk.MustExec("insert into t values (1, 'asd', 1);") - tk.MustQuery("select * from t use index (t_idx);").Check(testkit.Rows("1 asd 1")) - tk.MustExec("commit;") - tk.MustExec("admin check table t;") - tk.MustExec("drop table t;") -} - -// https://github.com/pingcap/tidb/issues/23178 -func TestPrefixedClusteredIndexUniqueKeyWithNewCollation(t *testing.T) { - defer collate.SetNewCollationEnabledForTest(false) - collate.SetNewCollationEnabledForTest(true) - - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("create table t (a text collate utf8mb4_general_ci not null, b int(11) not null, " + - "primary key (a(10), b) clustered, key idx(a(2)) ) default charset=utf8mb4 collate=utf8mb4_bin;") - tk.MustExec("insert into t values ('aaa', 2);") - // Key-value content: sk = sortKey, p = prefixed - // row record: sk(aaa), 2 -> aaa - // index record: sk(p(aa)), {sk(aaa), 2} -> restore data(aaa) - tk.MustExec("admin check table t;") - tk.MustExec("drop table t;") -} - -func TestClusteredIndexNewCollationWithOldRowFormat(t *testing.T) { - // This case maybe not useful, because newCollation isn't convenience to run on TiKV(it's required serialSuit) - // but unistore doesn't support old row format. - defer collate.SetNewCollationEnabledForTest(false) - collate.SetNewCollationEnabledForTest(true) - - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.Session().GetSessionVars().RowEncoder.Enable = false - tk.MustExec("drop table if exists t2") - tk.MustExec("create table t2(col_1 varchar(132) CHARACTER SET utf8 COLLATE utf8_unicode_ci, primary key(col_1) clustered)") - tk.MustExec("insert into t2 select 'aBc'") - tk.MustQuery("select col_1 from t2 where col_1 = 'aBc'").Check(testkit.Rows("aBc")) -} diff --git a/session/clustered_index_test.go b/session/clustered_index_test.go index b993bd3405a6d..32de5a4348632 100644 --- a/session/clustered_index_test.go +++ b/session/clustered_index_test.go @@ -15,14 +15,20 @@ package session_test import ( + "fmt" + "math/rand" + "strings" "testing" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/testdata" + "github.com/pingcap/tidb/util/collate" + "github.com/pingcap/tidb/util/israce" "github.com/stretchr/testify/require" ) @@ -421,3 +427,315 @@ func TestClusteredIndexSelectWhereInNull(t *testing.T) { tk.MustExec("create table t (a datetime, b bigint, primary key (a));") tk.MustQuery("select * from t where a in (null);").Check(testkit.Rows( /* empty result */ )) } + +func TestCreateClusteredTable(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := createTestKit(t, store) + tk.MustExec("set @@tidb_enable_clustered_index = 'int_only';") + tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") + tk.MustExec("create table t1(id int primary key, v int)") + tk.MustExec("create table t2(id varchar(10) primary key, v int)") + tk.MustExec("create table t3(id int primary key clustered, v int)") + tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") + tk.MustExec("create table t5(id int primary key nonclustered, v int)") + tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") + tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") + tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") + tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) + + tk.MustExec("set @@tidb_enable_clustered_index = 'off';") + tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") + tk.MustExec("create table t1(id int primary key, v int)") + tk.MustExec("create table t2(id varchar(10) primary key, v int)") + tk.MustExec("create table t3(id int primary key clustered, v int)") + tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") + tk.MustExec("create table t5(id int primary key nonclustered, v int)") + tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") + tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") + tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") + tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) + + tk.MustExec("set @@tidb_enable_clustered_index = 'on';") + tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") + tk.MustExec("create table t1(id int primary key, v int)") + tk.MustExec("create table t2(id varchar(10) primary key, v int)") + tk.MustExec("create table t3(id int primary key clustered, v int)") + tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") + tk.MustExec("create table t5(id int primary key nonclustered, v int)") + tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") + tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") + tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") + tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) + + tk.MustExec("set @@tidb_enable_clustered_index = 'int_only';") + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.AlterPrimaryKey = true + }) + tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7, t8") + tk.MustExec("create table t1(id int primary key, v int)") + tk.MustExec("create table t2(id varchar(10) primary key, v int)") + tk.MustExec("create table t3(id int primary key clustered, v int)") + tk.MustExec("create table t4(id varchar(10) primary key clustered, v int)") + tk.MustExec("create table t5(id int primary key nonclustered, v int)") + tk.MustExec("create table t6(id varchar(10) primary key nonclustered, v int)") + tk.MustExec("create table t7(id varchar(10), v int, primary key (id) /*T![clustered_index] CLUSTERED */)") + tk.MustExec("create table t8(id varchar(10), v int, primary key (id) /*T![clustered_index] NONCLUSTERED */)") + tk.MustQuery("show index from t1").Check(testkit.Rows("t1 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t2").Check(testkit.Rows("t2 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t3").Check(testkit.Rows("t3 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t4").Check(testkit.Rows("t4 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t5").Check(testkit.Rows("t5 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t6").Check(testkit.Rows("t6 0 PRIMARY 1 id A 0 BTREE YES NO")) + tk.MustQuery("show index from t7").Check(testkit.Rows("t7 0 PRIMARY 1 id A 0 BTREE YES YES")) + tk.MustQuery("show index from t8").Check(testkit.Rows("t8 0 PRIMARY 1 id A 0 BTREE YES NO")) +} + +// Test for union scan in prefixed clustered index table. +// See https://github.com/pingcap/tidb/issues/22069. +func TestClusteredUnionScanOnPrefixingPrimaryKey(t *testing.T) { + originCollate := collate.NewCollationEnabled() + collate.SetNewCollationEnabledForTest(false) + defer collate.SetNewCollationEnabledForTest(originCollate) + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := createTestKit(t, store) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (col_1 varchar(255), col_2 tinyint, primary key idx_1 (col_1(1)));") + tk.MustExec("insert into t values ('aaaaa', -38);") + tk.MustExec("insert into t values ('bbbbb', -48);") + + tk.MustExec("begin PESSIMISTIC;") + tk.MustExec("update t set col_2 = 47 where col_1 in ('aaaaa') order by col_1,col_2;") + tk.MustQuery("select * from t;").Check(testkit.Rows("aaaaa 47", "bbbbb -48")) + tk.MustGetErrCode("insert into t values ('bb', 0);", errno.ErrDupEntry) + tk.MustGetErrCode("insert into t values ('aa', 0);", errno.ErrDupEntry) + tk.MustExec("commit;") + tk.MustQuery("select * from t;").Check(testkit.Rows("aaaaa 47", "bbbbb -48")) + tk.MustExec("admin check table t;") +} + +// https://github.com/pingcap/tidb/issues/22453 +func TestClusteredIndexSplitAndAddIndex2(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := createTestKit(t, store) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b enum('Alice'), c int, primary key (c, b));") + tk.MustExec("insert into t values (-1,'Alice',100);") + tk.MustExec("insert into t values (-1,'Alice',7000);") + tk.MustQuery("split table t between (0,'Alice') and (10000,'Alice') regions 2;").Check(testkit.Rows("1 1")) + tk.MustExec("set @@global.tidb_ddl_error_count_limit = 3;") + tk.MustExec("alter table t add index idx (c);") + tk.MustExec("admin check table t;") +} + +func TestClusteredIndexSyntax(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + const showPKType = `select tidb_pk_type from information_schema.tables where table_schema = 'test' and table_name = 't';` + const nonClustered, clustered = `NONCLUSTERED`, `CLUSTERED` + assertPkType := func(sql string, pkType string) { + tk.MustExec("drop table if exists t;") + tk.MustExec(sql) + tk.MustQuery(showPKType).Check(testkit.Rows(pkType)) + } + + // Test single integer column as the primary key. + clusteredDefault := clustered + assertPkType("create table t (a int primary key, b int);", clusteredDefault) + assertPkType("create table t (a int, b int, primary key(a) clustered);", clustered) + assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] clustered */);", clustered) + assertPkType("create table t (a int, b int, primary key(a) nonclustered);", nonClustered) + assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] nonclustered */);", nonClustered) + + // Test for clustered index. + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly + assertPkType("create table t (a int, b varchar(255), primary key(b, a));", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + assertPkType("create table t (a int, b varchar(255), primary key(b, a));", clusteredDefault) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] nonclustered */);", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] clustered */);", clustered) + + tk.MustGetErrCode("create table t (a varchar(255) unique key clustered);", errno.ErrParse) + tk.MustGetErrCode("create table t (a varchar(255), foreign key (a) reference t1(a) clustered);", errno.ErrParse) + tk.MustGetErrCode("create table t (a varchar(255), foreign key (a) clustered reference t1(a));", errno.ErrParse) + tk.MustGetErrCode("create table t (a varchar(255) clustered);", errno.ErrParse) + + errMsg := "[ddl:8200]CLUSTERED/NONCLUSTERED keyword is only supported for primary key" + tk.MustGetErrMsg("create table t (a varchar(255), unique key(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), unique key(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), unique index(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), unique index(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), key(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), key(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), index(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), index(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), b decimal(5, 4), primary key (a, b) clustered, key (b) clustered)", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), b decimal(5, 4), primary key (a, b) clustered, key (b) nonclustered)", errMsg) +} + +func TestPrefixClusteredIndexAddIndexAndRecover(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk1 := testkit.NewTestKit(t, store) + tk1.MustExec("use test;") + tk1.MustExec("drop table if exists t;") + defer func() { + tk1.MustExec("drop table if exists t;") + }() + + tk1.MustExec("create table t(a char(3), b char(3), primary key(a(1)) clustered)") + tk1.MustExec("insert into t values ('aaa', 'bbb')") + tk1.MustExec("alter table t add index idx(b)") + tk1.MustQuery("select * from t use index(idx)").Check(testkit.Rows("aaa bbb")) + tk1.MustExec("admin check table t") + tk1.MustExec("admin recover index t idx") + tk1.MustQuery("select * from t use index(idx)").Check(testkit.Rows("aaa bbb")) + tk1.MustExec("admin check table t") +} + +func TestPartitionTable(t *testing.T) { + if israce.RaceEnabled { + t.Skip("exhaustive types test, skip race test") + } + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("create database test_view") + tk.MustExec("use test_view") + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + + tk.MustExec(`create table thash (a int, b int, c varchar(32), primary key(a, b) clustered) partition by hash(a) partitions 4`) + tk.MustExec(`create table trange (a int, b int, c varchar(32), primary key(a, b) clustered) partition by range columns(a) ( + partition p0 values less than (3000), + partition p1 values less than (6000), + partition p2 values less than (9000), + partition p3 values less than (10000))`) + tk.MustExec(`create table tnormal (a int, b int, c varchar(32), primary key(a, b))`) + + vals := make([]string, 0, 4000) + existedPK := make(map[string]struct{}, 4000) + for i := 0; i < 4000; { + a := rand.Intn(10000) + b := rand.Intn(10000) + pk := fmt.Sprintf("%v, %v", a, b) + if _, ok := existedPK[pk]; ok { + continue + } + existedPK[pk] = struct{}{} + i++ + vals = append(vals, fmt.Sprintf(`(%v, %v, '%v')`, a, b, rand.Intn(10000))) + } + + tk.MustExec("insert into thash values " + strings.Join(vals, ", ")) + tk.MustExec("insert into trange values " + strings.Join(vals, ", ")) + tk.MustExec("insert into tnormal values " + strings.Join(vals, ", ")) + + for i := 0; i < 200; i++ { + cond := fmt.Sprintf("where a in (%v, %v, %v) and b < %v", rand.Intn(10000), rand.Intn(10000), rand.Intn(10000), rand.Intn(10000)) + result := tk.MustQuery("select * from tnormal " + cond).Sort().Rows() + tk.MustQuery("select * from thash use index(primary) " + cond).Sort().Check(result) + tk.MustQuery("select * from trange use index(primary) " + cond).Sort().Check(result) + } +} + +// https://github.com/pingcap/tidb/issues/23106 +func TestClusteredIndexDecodeRestoredDataV5(t *testing.T) { + defer collate.SetNewCollationEnabledForTest(false) + collate.SetNewCollationEnabledForTest(true) + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (id1 int, id2 varchar(10), a1 int, primary key(id1, id2) clustered) collate utf8mb4_general_ci;") + tk.MustExec("insert into t values (1, 'asd', 1), (1, 'dsa', 1);") + tk.MustGetErrCode("alter table t add unique index t_idx(id1, a1);", errno.ErrDupEntry) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (id1 int, id2 varchar(10), a1 int, primary key(id1, id2) clustered, unique key t_idx(id1, a1)) collate utf8mb4_general_ci;") + tk.MustExec("begin;") + tk.MustExec("insert into t values (1, 'asd', 1);") + tk.MustQuery("select * from t use index (t_idx);").Check(testkit.Rows("1 asd 1")) + tk.MustExec("commit;") + tk.MustExec("admin check table t;") + tk.MustExec("drop table t;") +} + +// https://github.com/pingcap/tidb/issues/23178 +func TestPrefixedClusteredIndexUniqueKeyWithNewCollation(t *testing.T) { + defer collate.SetNewCollationEnabledForTest(false) + collate.SetNewCollationEnabledForTest(true) + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec("create table t (a text collate utf8mb4_general_ci not null, b int(11) not null, " + + "primary key (a(10), b) clustered, key idx(a(2)) ) default charset=utf8mb4 collate=utf8mb4_bin;") + tk.MustExec("insert into t values ('aaa', 2);") + // Key-value content: sk = sortKey, p = prefixed + // row record: sk(aaa), 2 -> aaa + // index record: sk(p(aa)), {sk(aaa), 2} -> restore data(aaa) + tk.MustExec("admin check table t;") + tk.MustExec("drop table t;") +} + +func TestClusteredIndexNewCollationWithOldRowFormat(t *testing.T) { + // This case maybe not useful, because newCollation isn't convenience to run on TiKV(it's required serialSuit) + // but unistore doesn't support old row format. + defer collate.SetNewCollationEnabledForTest(false) + collate.SetNewCollationEnabledForTest(true) + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.Session().GetSessionVars().RowEncoder.Enable = false + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t2(col_1 varchar(132) CHARACTER SET utf8 COLLATE utf8_unicode_ci, primary key(col_1) clustered)") + tk.MustExec("insert into t2 select 'aBc'") + tk.MustQuery("select col_1 from t2 where col_1 = 'aBc'").Check(testkit.Rows("aBc")) +} diff --git a/session/index_usage_sync_lease_serial_test.go b/session/index_usage_sync_lease_test.go similarity index 100% rename from session/index_usage_sync_lease_serial_test.go rename to session/index_usage_sync_lease_test.go diff --git a/session/schema_amender_serial_test.go b/session/schema_amender_test.go similarity index 100% rename from session/schema_amender_serial_test.go rename to session/schema_amender_test.go diff --git a/session/session.go b/session/session.go index f685b44d0be43..399032bd6cbfd 100644 --- a/session/session.go +++ b/session/session.go @@ -313,8 +313,7 @@ func (s *session) cleanRetryInfo() { preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) if ok { preparedAst = preparedObj.PreparedAst - bindSQL := planner.GetBindSQL4PlanCache(s, preparedAst.Stmt) - cacheKey = plannercore.NewPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion, bindSQL) + cacheKey = plannercore.NewPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion) } } } diff --git a/session/session_test.go b/session/session_test.go index e70660f172111..67c9fb64aef12 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -5420,7 +5420,7 @@ func (s *testSessionSuite) TestLocalTemporaryTableScan(c *C) { "12 112 1012", "3 113 1003", "14 114 1014", "16 116 1016", "7 117 1007", "18 118 1018", )) - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled")) + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table.")) } doModify := func() { @@ -5459,7 +5459,7 @@ func (s *testSessionSuite) TestLocalTemporaryTableScan(c *C) { "3 113 1003", "14 114 1014", "7 117 9999", "18 118 1018", "12 132 1012", )) - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled")) + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled. Cannot use IndexMerge on temporary table.")) } assertSelectAsUnModified() diff --git a/store/batch_coprocessor_serial_test.go b/store/batch_coprocessor_test.go similarity index 100% rename from store/batch_coprocessor_serial_test.go rename to store/batch_coprocessor_test.go diff --git a/store/driver/config_serial_test.go b/store/driver/config_test.go similarity index 100% rename from store/driver/config_serial_test.go rename to store/driver/config_test.go diff --git a/store/driver/snap_interceptor_serial_test.go b/store/driver/snap_interceptor_test.go similarity index 100% rename from store/driver/snap_interceptor_serial_test.go rename to store/driver/snap_interceptor_test.go diff --git a/store/driver/sql_fail_serial_test.go b/store/driver/sql_fail_test.go similarity index 100% rename from store/driver/sql_fail_serial_test.go rename to store/driver/sql_fail_test.go diff --git a/store/driver/txn_serial_test.go b/store/driver/txn_test.go similarity index 100% rename from store/driver/txn_serial_test.go rename to store/driver/txn_test.go diff --git a/store/gcworker/gc_worker_serial_test.go b/store/gcworker/gc_worker_test.go similarity index 100% rename from store/gcworker/gc_worker_serial_test.go rename to store/gcworker/gc_worker_test.go diff --git a/table/tables/cache_test.go b/table/tables/cache_test.go index a4dc5b4d43d68..788ecd48fd1d7 100644 --- a/table/tables/cache_test.go +++ b/table/tables/cache_test.go @@ -142,7 +142,7 @@ func TestCacheTableBasicScan(t *testing.T) { "12 112 1012", "3 113 1003", "14 114 1014", "16 116 1016", "7 117 1007", "18 118 1018", )) - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled")) + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 IndexMerge is inapplicable or disabled. Cannot use IndexMerge on TableCache.")) } assertSelect() diff --git a/table/tables/index_serial_test.go b/table/tables/index_serial_test.go deleted file mode 100644 index 16784c16d0c7e..0000000000000 --- a/table/tables/index_serial_test.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tables_test - -import ( - "context" - "io" - "testing" - "time" - - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/sessionctx/stmtctx" - "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/tables" - "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/codec" - "github.com/pingcap/tidb/util/collate" - "github.com/pingcap/tidb/util/mock" - "github.com/pingcap/tidb/util/rowcodec" - "github.com/stretchr/testify/require" -) - -func TestIndex(t *testing.T) { - tblInfo := &model.TableInfo{ - ID: 1, - Indices: []*model.IndexInfo{ - { - ID: 2, - Name: model.NewCIStr("test"), - Columns: []*model.IndexColumn{ - {Offset: 0}, - {Offset: 1}, - }, - }, - }, - Columns: []*model.ColumnInfo{ - {ID: 1, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeVarchar)}, - {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, - }, - } - index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) - - // Test ununiq index. - store, clean := testkit.CreateMockStore(t) - defer clean() - txn, err := store.Begin() - require.NoError(t, err) - - values := types.MakeDatums(1, 2) - mockCtx := mock.NewContext() - _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil) - require.NoError(t, err) - - it, err := index.SeekFirst(txn) - require.NoError(t, err) - - getValues, h, err := it.Next() - require.NoError(t, err) - require.Len(t, getValues, 2) - require.Equal(t, int64(1), getValues[0].GetInt64()) - require.Equal(t, int64(2), getValues[1].GetInt64()) - require.Equal(t, int64(1), h.IntValue()) - it.Close() - sc := &stmtctx.StatementContext{TimeZone: time.Local} - exist, _, err := index.Exist(sc, txn, values, kv.IntHandle(100)) - require.NoError(t, err) - require.False(t, exist) - - exist, _, err = index.Exist(sc, txn, values, kv.IntHandle(1)) - require.NoError(t, err) - require.True(t, exist) - - err = index.Delete(sc, txn, values, kv.IntHandle(1)) - require.NoError(t, err) - - it, err = index.SeekFirst(txn) - require.NoError(t, err) - - _, _, err = it.Next() - require.Truef(t, terror.ErrorEqual(err, io.EOF), "err %v", err) - it.Close() - - _, err = index.Create(mockCtx, txn, values, kv.IntHandle(0), nil) - require.NoError(t, err) - - _, err = index.SeekFirst(txn) - require.NoError(t, err) - - _, hit, err := index.Seek(sc, txn, values) - require.NoError(t, err) - require.False(t, hit) - - err = index.Drop(txn) - require.NoError(t, err) - - it, hit, err = index.Seek(sc, txn, values) - require.NoError(t, err) - require.False(t, hit) - - _, _, err = it.Next() - require.Truef(t, terror.ErrorEqual(err, io.EOF), "err %v", err) - it.Close() - - it, err = index.SeekFirst(txn) - require.NoError(t, err) - - _, _, err = it.Next() - require.Truef(t, terror.ErrorEqual(err, io.EOF), "err %v", err) - it.Close() - - err = txn.Commit(context.Background()) - require.NoError(t, err) - - tblInfo = &model.TableInfo{ - ID: 2, - Indices: []*model.IndexInfo{ - { - ID: 3, - Name: model.NewCIStr("test"), - Unique: true, - Columns: []*model.IndexColumn{ - {Offset: 0}, - {Offset: 1}, - }, - }, - }, - Columns: []*model.ColumnInfo{ - {ID: 1, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeVarchar)}, - {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, - }, - } - index = tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) - - // Test uniq index. - txn, err = store.Begin() - require.NoError(t, err) - - _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil) - require.NoError(t, err) - - _, err = index.Create(mockCtx, txn, values, kv.IntHandle(2), nil) - require.NotNil(t, err) - - it, err = index.SeekFirst(txn) - require.NoError(t, err) - - getValues, h, err = it.Next() - require.NoError(t, err) - require.Len(t, getValues, 2) - require.Equal(t, int64(1), getValues[0].GetInt64()) - require.Equal(t, int64(2), getValues[1].GetInt64()) - require.Equal(t, int64(1), h.IntValue()) - it.Close() - - exist, h, err = index.Exist(sc, txn, values, kv.IntHandle(1)) - require.NoError(t, err) - require.Equal(t, int64(1), h.IntValue()) - require.True(t, exist) - - exist, h, err = index.Exist(sc, txn, values, kv.IntHandle(2)) - require.NotNil(t, err) - require.Equal(t, int64(1), h.IntValue()) - require.True(t, exist) - - err = txn.Commit(context.Background()) - require.NoError(t, err) - - _, err = index.FetchValues(make([]types.Datum, 0), nil) - require.NotNil(t, err) - - txn, err = store.Begin() - require.NoError(t, err) - - // Test the function of Next when the value of unique key is nil. - values2 := types.MakeDatums(nil, nil) - _, err = index.Create(mockCtx, txn, values2, kv.IntHandle(2), nil) - require.NoError(t, err) - it, err = index.SeekFirst(txn) - require.NoError(t, err) - getValues, h, err = it.Next() - require.NoError(t, err) - require.Len(t, getValues, 2) - require.Equal(t, nil, getValues[0].GetInterface()) - require.Equal(t, nil, getValues[1].GetInterface()) - require.Equal(t, int64(2), h.IntValue()) - it.Close() - - err = txn.Commit(context.Background()) - require.NoError(t, err) -} - -func TestCombineIndexSeek(t *testing.T) { - tblInfo := &model.TableInfo{ - ID: 1, - Indices: []*model.IndexInfo{ - { - ID: 2, - Name: model.NewCIStr("test"), - Columns: []*model.IndexColumn{ - {Offset: 1}, - {Offset: 2}, - }, - }, - }, - Columns: []*model.ColumnInfo{ - {Offset: 0}, - {Offset: 1}, - {Offset: 2}, - }, - } - index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) - - store, clean := testkit.CreateMockStore(t) - defer clean() - txn, err := store.Begin() - require.NoError(t, err) - - mockCtx := mock.NewContext() - values := types.MakeDatums("abc", "def") - _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil) - require.NoError(t, err) - - index2 := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) - sc := &stmtctx.StatementContext{TimeZone: time.Local} - iter, hit, err := index2.Seek(sc, txn, types.MakeDatums("abc", nil)) - require.NoError(t, err) - defer iter.Close() - require.False(t, hit) - _, h, err := iter.Next() - require.NoError(t, err) - require.Equal(t, int64(1), h.IntValue()) -} - -func TestMultiColumnCommonHandle(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - tblInfo := buildTableInfo(t, "create table t (a int, b int, u varchar(64) unique, nu varchar(64), primary key (a, b), index nu (nu))") - var idxUnique, idxNonUnique table.Index - for _, idxInfo := range tblInfo.Indices { - idx := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - if idxInfo.Name.L == "u" { - idxUnique = idx - } else if idxInfo.Name.L == "nu" { - idxNonUnique = idx - } - } - var a, b *model.ColumnInfo - for _, col := range tblInfo.Columns { - if col.Name.String() == "a" { - a = col - } else if col.Name.String() == "b" { - b = col - } - } - require.NotNil(t, a) - require.NotNil(t, b) - - store, clean := testkit.CreateMockStore(t) - defer clean() - txn, err := store.Begin() - require.NoError(t, err) - mockCtx := mock.NewContext() - sc := mockCtx.GetSessionVars().StmtCtx - // create index for "insert t values (3, 2, "abc", "abc") - idxColVals := types.MakeDatums("abc") - handleColVals := types.MakeDatums(3, 2) - encodedHandle, err := codec.EncodeKey(sc, nil, handleColVals...) - require.NoError(t, err) - commonHandle, err := kv.NewCommonHandle(encodedHandle) - require.NoError(t, err) - _ = idxNonUnique - for _, idx := range []table.Index{idxUnique, idxNonUnique} { - key, _, err := idx.GenIndexKey(sc, idxColVals, commonHandle, nil) - require.NoError(t, err) - _, err = idx.Create(mockCtx, txn, idxColVals, commonHandle, nil) - require.NoError(t, err) - val, err := txn.Get(context.Background(), key) - require.NoError(t, err) - colInfo := tables.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo) - colInfo = append(colInfo, rowcodec.ColInfo{ - ID: a.ID, - IsPKHandle: false, - Ft: rowcodec.FieldTypeFromModelColumn(a), - }) - colInfo = append(colInfo, rowcodec.ColInfo{ - ID: b.ID, - IsPKHandle: false, - Ft: rowcodec.FieldTypeFromModelColumn(b), - }) - colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault, colInfo) - require.NoError(t, err) - require.Len(t, colVals, 3) - _, d, err := codec.DecodeOne(colVals[0]) - require.NoError(t, err) - require.Equal(t, "abc", d.GetString()) - _, d, err = codec.DecodeOne(colVals[1]) - require.NoError(t, err) - require.Equal(t, int64(3), d.GetInt64()) - _, d, err = codec.DecodeOne(colVals[2]) - require.NoError(t, err) - require.Equal(t, int64(2), d.GetInt64()) - handle, err := tablecodec.DecodeIndexHandle(key, val, 1) - require.NoError(t, err) - require.False(t, handle.IsInt()) - require.Equal(t, commonHandle.Encoded(), handle.Encoded()) - } -} diff --git a/table/tables/index_test.go b/table/tables/index_test.go index 5678ce8b39b18..3e70c85365bd8 100644 --- a/table/tables/index_test.go +++ b/table/tables/index_test.go @@ -1,4 +1,4 @@ -// Copyright 2016 PingCAP, Inc. +// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,23 +16,316 @@ package tables_test import ( "context" + "io" "testing" + "time" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/mock" + "github.com/pingcap/tidb/util/rowcodec" "github.com/stretchr/testify/require" ) +func TestIndex(t *testing.T) { + tblInfo := &model.TableInfo{ + ID: 1, + Indices: []*model.IndexInfo{ + { + ID: 2, + Name: model.NewCIStr("test"), + Columns: []*model.IndexColumn{ + {Offset: 0}, + {Offset: 1}, + }, + }, + }, + Columns: []*model.ColumnInfo{ + {ID: 1, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, + }, + } + index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + + // Test ununiq index. + store, clean := testkit.CreateMockStore(t) + defer clean() + txn, err := store.Begin() + require.NoError(t, err) + + values := types.MakeDatums(1, 2) + mockCtx := mock.NewContext() + _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil) + require.NoError(t, err) + + it, err := index.SeekFirst(txn) + require.NoError(t, err) + + getValues, h, err := it.Next() + require.NoError(t, err) + require.Len(t, getValues, 2) + require.Equal(t, int64(1), getValues[0].GetInt64()) + require.Equal(t, int64(2), getValues[1].GetInt64()) + require.Equal(t, int64(1), h.IntValue()) + it.Close() + sc := &stmtctx.StatementContext{TimeZone: time.Local} + exist, _, err := index.Exist(sc, txn, values, kv.IntHandle(100)) + require.NoError(t, err) + require.False(t, exist) + + exist, _, err = index.Exist(sc, txn, values, kv.IntHandle(1)) + require.NoError(t, err) + require.True(t, exist) + + err = index.Delete(sc, txn, values, kv.IntHandle(1)) + require.NoError(t, err) + + it, err = index.SeekFirst(txn) + require.NoError(t, err) + + _, _, err = it.Next() + require.Truef(t, terror.ErrorEqual(err, io.EOF), "err %v", err) + it.Close() + + _, err = index.Create(mockCtx, txn, values, kv.IntHandle(0), nil) + require.NoError(t, err) + + _, err = index.SeekFirst(txn) + require.NoError(t, err) + + _, hit, err := index.Seek(sc, txn, values) + require.NoError(t, err) + require.False(t, hit) + + err = index.Drop(txn) + require.NoError(t, err) + + it, hit, err = index.Seek(sc, txn, values) + require.NoError(t, err) + require.False(t, hit) + + _, _, err = it.Next() + require.Truef(t, terror.ErrorEqual(err, io.EOF), "err %v", err) + it.Close() + + it, err = index.SeekFirst(txn) + require.NoError(t, err) + + _, _, err = it.Next() + require.Truef(t, terror.ErrorEqual(err, io.EOF), "err %v", err) + it.Close() + + err = txn.Commit(context.Background()) + require.NoError(t, err) + + tblInfo = &model.TableInfo{ + ID: 2, + Indices: []*model.IndexInfo{ + { + ID: 3, + Name: model.NewCIStr("test"), + Unique: true, + Columns: []*model.IndexColumn{ + {Offset: 0}, + {Offset: 1}, + }, + }, + }, + Columns: []*model.ColumnInfo{ + {ID: 1, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 0, FieldType: *types.NewFieldType(mysql.TypeVarchar)}, + {ID: 2, Name: model.NewCIStr("c2"), State: model.StatePublic, Offset: 1, FieldType: *types.NewFieldType(mysql.TypeString)}, + }, + } + index = tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + + // Test uniq index. + txn, err = store.Begin() + require.NoError(t, err) + + _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil) + require.NoError(t, err) + + _, err = index.Create(mockCtx, txn, values, kv.IntHandle(2), nil) + require.NotNil(t, err) + + it, err = index.SeekFirst(txn) + require.NoError(t, err) + + getValues, h, err = it.Next() + require.NoError(t, err) + require.Len(t, getValues, 2) + require.Equal(t, int64(1), getValues[0].GetInt64()) + require.Equal(t, int64(2), getValues[1].GetInt64()) + require.Equal(t, int64(1), h.IntValue()) + it.Close() + + exist, h, err = index.Exist(sc, txn, values, kv.IntHandle(1)) + require.NoError(t, err) + require.Equal(t, int64(1), h.IntValue()) + require.True(t, exist) + + exist, h, err = index.Exist(sc, txn, values, kv.IntHandle(2)) + require.NotNil(t, err) + require.Equal(t, int64(1), h.IntValue()) + require.True(t, exist) + + err = txn.Commit(context.Background()) + require.NoError(t, err) + + _, err = index.FetchValues(make([]types.Datum, 0), nil) + require.NotNil(t, err) + + txn, err = store.Begin() + require.NoError(t, err) + + // Test the function of Next when the value of unique key is nil. + values2 := types.MakeDatums(nil, nil) + _, err = index.Create(mockCtx, txn, values2, kv.IntHandle(2), nil) + require.NoError(t, err) + it, err = index.SeekFirst(txn) + require.NoError(t, err) + getValues, h, err = it.Next() + require.NoError(t, err) + require.Len(t, getValues, 2) + require.Equal(t, nil, getValues[0].GetInterface()) + require.Equal(t, nil, getValues[1].GetInterface()) + require.Equal(t, int64(2), h.IntValue()) + it.Close() + + err = txn.Commit(context.Background()) + require.NoError(t, err) +} + +func TestCombineIndexSeek(t *testing.T) { + tblInfo := &model.TableInfo{ + ID: 1, + Indices: []*model.IndexInfo{ + { + ID: 2, + Name: model.NewCIStr("test"), + Columns: []*model.IndexColumn{ + {Offset: 1}, + {Offset: 2}, + }, + }, + }, + Columns: []*model.ColumnInfo{ + {Offset: 0}, + {Offset: 1}, + {Offset: 2}, + }, + } + index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + + store, clean := testkit.CreateMockStore(t) + defer clean() + txn, err := store.Begin() + require.NoError(t, err) + + mockCtx := mock.NewContext() + values := types.MakeDatums("abc", "def") + _, err = index.Create(mockCtx, txn, values, kv.IntHandle(1), nil) + require.NoError(t, err) + + index2 := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) + sc := &stmtctx.StatementContext{TimeZone: time.Local} + iter, hit, err := index2.Seek(sc, txn, types.MakeDatums("abc", nil)) + require.NoError(t, err) + defer iter.Close() + require.False(t, hit) + _, h, err := iter.Next() + require.NoError(t, err) + require.Equal(t, int64(1), h.IntValue()) +} + +func TestMultiColumnCommonHandle(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + tblInfo := buildTableInfo(t, "create table t (a int, b int, u varchar(64) unique, nu varchar(64), primary key (a, b), index nu (nu))") + var idxUnique, idxNonUnique table.Index + for _, idxInfo := range tblInfo.Indices { + idx := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) + if idxInfo.Name.L == "u" { + idxUnique = idx + } else if idxInfo.Name.L == "nu" { + idxNonUnique = idx + } + } + var a, b *model.ColumnInfo + for _, col := range tblInfo.Columns { + if col.Name.String() == "a" { + a = col + } else if col.Name.String() == "b" { + b = col + } + } + require.NotNil(t, a) + require.NotNil(t, b) + + store, clean := testkit.CreateMockStore(t) + defer clean() + txn, err := store.Begin() + require.NoError(t, err) + mockCtx := mock.NewContext() + sc := mockCtx.GetSessionVars().StmtCtx + // create index for "insert t values (3, 2, "abc", "abc") + idxColVals := types.MakeDatums("abc") + handleColVals := types.MakeDatums(3, 2) + encodedHandle, err := codec.EncodeKey(sc, nil, handleColVals...) + require.NoError(t, err) + commonHandle, err := kv.NewCommonHandle(encodedHandle) + require.NoError(t, err) + _ = idxNonUnique + for _, idx := range []table.Index{idxUnique, idxNonUnique} { + key, _, err := idx.GenIndexKey(sc, idxColVals, commonHandle, nil) + require.NoError(t, err) + _, err = idx.Create(mockCtx, txn, idxColVals, commonHandle, nil) + require.NoError(t, err) + val, err := txn.Get(context.Background(), key) + require.NoError(t, err) + colInfo := tables.BuildRowcodecColInfoForIndexColumns(idx.Meta(), tblInfo) + colInfo = append(colInfo, rowcodec.ColInfo{ + ID: a.ID, + IsPKHandle: false, + Ft: rowcodec.FieldTypeFromModelColumn(a), + }) + colInfo = append(colInfo, rowcodec.ColInfo{ + ID: b.ID, + IsPKHandle: false, + Ft: rowcodec.FieldTypeFromModelColumn(b), + }) + colVals, err := tablecodec.DecodeIndexKV(key, val, 1, tablecodec.HandleDefault, colInfo) + require.NoError(t, err) + require.Len(t, colVals, 3) + _, d, err := codec.DecodeOne(colVals[0]) + require.NoError(t, err) + require.Equal(t, "abc", d.GetString()) + _, d, err = codec.DecodeOne(colVals[1]) + require.NoError(t, err) + require.Equal(t, int64(3), d.GetInt64()) + _, d, err = codec.DecodeOne(colVals[2]) + require.NoError(t, err) + require.Equal(t, int64(2), d.GetInt64()) + handle, err := tablecodec.DecodeIndexHandle(key, val, 1) + require.NoError(t, err) + require.False(t, handle.IsInt()) + require.Equal(t, commonHandle.Encoded(), handle.Encoded()) + } +} + func TestSingleColumnCommonHandle(t *testing.T) { tblInfo := buildTableInfo(t, "create table t (a varchar(255) primary key, u int unique, nu int, index nu (nu))") var idxUnique, idxNonUnique table.Index diff --git a/telemetry/data_window_serial_test.go b/telemetry/data_window_test.go similarity index 100% rename from telemetry/data_window_serial_test.go rename to telemetry/data_window_test.go diff --git a/telemetry/telemetry_serial_test.go b/telemetry/telemetry_serial_test.go deleted file mode 100644 index c464d46c4c6de..0000000000000 --- a/telemetry/telemetry_serial_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package telemetry_test - -import ( - "runtime" - "testing" - - "github.com/Jeffail/gabs/v2" - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/telemetry" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" - "go.etcd.io/etcd/integration" -) - -func TestReport(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("integration.NewClusterV3 will create file contains a colon which is not allowed on Windows") - } - - etcdCluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) - defer etcdCluster.Terminate(t) - store, clean := testkit.CreateMockStore(t) - defer clean() - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - - config.GetGlobalConfig().EnableTelemetry = false - require.NoError(t, telemetry.ReportUsageData(se, etcdCluster.RandClient())) - - status, err := telemetry.GetTelemetryStatus(etcdCluster.RandClient()) - require.NoError(t, err) - - jsonParsed, err := gabs.ParseJSON([]byte(status)) - require.NoError(t, err) - require.True(t, jsonParsed.Path("is_error").Data().(bool)) - require.Equal(t, "telemetry is disabled", jsonParsed.Path("error_msg").Data().(string)) - require.False(t, jsonParsed.Path("is_request_sent").Data().(bool)) -} diff --git a/telemetry/telemetry_test.go b/telemetry/telemetry_test.go index c7f3cef8d92a8..56fc2da76852a 100644 --- a/telemetry/telemetry_test.go +++ b/telemetry/telemetry_test.go @@ -101,3 +101,29 @@ func TestPreview(t *testing.T) { require.NoError(t, err) require.Equal(t, "", r) } + +func TestReport(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("integration.NewClusterV3 will create file contains a colon which is not allowed on Windows") + } + + etcdCluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer etcdCluster.Terminate(t) + store, clean := testkit.CreateMockStore(t) + defer clean() + se, err := session.CreateSession4Test(store) + require.NoError(t, err) + defer se.Close() + + config.GetGlobalConfig().EnableTelemetry = false + require.NoError(t, telemetry.ReportUsageData(se, etcdCluster.RandClient())) + + status, err := telemetry.GetTelemetryStatus(etcdCluster.RandClient()) + require.NoError(t, err) + + jsonParsed, err := gabs.ParseJSON([]byte(status)) + require.NoError(t, err) + require.True(t, jsonParsed.Path("is_error").Data().(bool)) + require.Equal(t, "telemetry is disabled", jsonParsed.Path("error_msg").Data().(string)) + require.False(t, jsonParsed.Path("is_request_sent").Data().(bool)) +} diff --git a/types/enum_serial_test.go b/types/enum_test.go similarity index 100% rename from types/enum_serial_test.go rename to types/enum_test.go diff --git a/types/mydecimal_serial_test.go b/types/mydecimal_serial_test.go deleted file mode 100644 index cd72ef7ce131a..0000000000000 --- a/types/mydecimal_serial_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -// this test will change global variable `wordBufLen`, so it must run in serial -func TestShiftMyDecimal(t *testing.T) { - type tcase struct { - input string - shift int - output string - err error - } - - var dotest = func(tests []tcase) { - for _, test := range tests { - t.Run(fmt.Sprintf("%v (shift: %v, wordBufLen: %v)", test.input, test.shift, wordBufLen), func(t *testing.T) { - var dec MyDecimal - require.NoError(t, dec.FromString([]byte(test.input))) - require.Equal(t, test.err, dec.Shift(test.shift)) - require.Equal(t, test.output, string(dec.ToString())) - }) - } - } - - wordBufLen = maxWordBufLen - tests := []tcase{ - {"123.123", 1, "1231.23", nil}, - {"123457189.123123456789000", 1, "1234571891.23123456789", nil}, - {"123457189.123123456789000", 8, "12345718912312345.6789", nil}, - {"123457189.123123456789000", 9, "123457189123123456.789", nil}, - {"123457189.123123456789000", 10, "1234571891231234567.89", nil}, - {"123457189.123123456789000", 17, "12345718912312345678900000", nil}, - {"123457189.123123456789000", 18, "123457189123123456789000000", nil}, - {"123457189.123123456789000", 19, "1234571891231234567890000000", nil}, - {"123457189.123123456789000", 26, "12345718912312345678900000000000000", nil}, - {"123457189.123123456789000", 27, "123457189123123456789000000000000000", nil}, - {"123457189.123123456789000", 28, "1234571891231234567890000000000000000", nil}, - {"000000000000000000000000123457189.123123456789000", 26, "12345718912312345678900000000000000", nil}, - {"00000000123457189.123123456789000", 27, "123457189123123456789000000000000000", nil}, - {"00000000000000000123457189.123123456789000", 28, "1234571891231234567890000000000000000", nil}, - {"123", 1, "1230", nil}, - {"123", 10, "1230000000000", nil}, - {".123", 1, "1.23", nil}, - {".123", 10, "1230000000", nil}, - {".123", 14, "12300000000000", nil}, - {"000.000", 1000, "0", nil}, - {"000.", 1000, "0", nil}, - {".000", 1000, "0", nil}, - {"1", 1000, "1", ErrOverflow}, - {"123.123", -1, "12.3123", nil}, - {"123987654321.123456789000", -1, "12398765432.1123456789", nil}, - {"123987654321.123456789000", -2, "1239876543.21123456789", nil}, - {"123987654321.123456789000", -3, "123987654.321123456789", nil}, - {"123987654321.123456789000", -8, "1239.87654321123456789", nil}, - {"123987654321.123456789000", -9, "123.987654321123456789", nil}, - {"123987654321.123456789000", -10, "12.3987654321123456789", nil}, - {"123987654321.123456789000", -11, "1.23987654321123456789", nil}, - {"123987654321.123456789000", -12, "0.123987654321123456789", nil}, - {"123987654321.123456789000", -13, "0.0123987654321123456789", nil}, - {"123987654321.123456789000", -14, "0.00123987654321123456789", nil}, - {"00000087654321.123456789000", -14, "0.00000087654321123456789", nil}, - } - dotest(tests) - - wordBufLen = 2 - tests = []tcase{ - {"123.123", -2, "1.23123", nil}, - {"123.123", -3, "0.123123", nil}, - {"123.123", -6, "0.000123123", nil}, - {"123.123", -7, "0.0000123123", nil}, - {"123.123", -15, "0.000000000000123123", nil}, - {"123.123", -16, "0.000000000000012312", ErrTruncated}, - {"123.123", -17, "0.000000000000001231", ErrTruncated}, - {"123.123", -18, "0.000000000000000123", ErrTruncated}, - {"123.123", -19, "0.000000000000000012", ErrTruncated}, - {"123.123", -20, "0.000000000000000001", ErrTruncated}, - {"123.123", -21, "0", ErrTruncated}, - {".000000000123", -1, "0.0000000000123", nil}, - {".000000000123", -6, "0.000000000000000123", nil}, - {".000000000123", -7, "0.000000000000000012", ErrTruncated}, - {".000000000123", -8, "0.000000000000000001", ErrTruncated}, - {".000000000123", -9, "0", ErrTruncated}, - {".000000000123", 1, "0.00000000123", nil}, - {".000000000123", 8, "0.0123", nil}, - {".000000000123", 9, "0.123", nil}, - {".000000000123", 10, "1.23", nil}, - {".000000000123", 17, "12300000", nil}, - {".000000000123", 18, "123000000", nil}, - {".000000000123", 19, "1230000000", nil}, - {".000000000123", 20, "12300000000", nil}, - {".000000000123", 21, "123000000000", nil}, - {".000000000123", 22, "1230000000000", nil}, - {".000000000123", 23, "12300000000000", nil}, - {".000000000123", 24, "123000000000000", nil}, - {".000000000123", 25, "1230000000000000", nil}, - {".000000000123", 26, "12300000000000000", nil}, - {".000000000123", 27, "123000000000000000", nil}, - {".000000000123", 28, "0.000000000123", ErrOverflow}, - {"123456789.987654321", -1, "12345678.998765432", ErrTruncated}, - {"123456789.987654321", -2, "1234567.899876543", ErrTruncated}, - {"123456789.987654321", -8, "1.234567900", ErrTruncated}, - {"123456789.987654321", -9, "0.123456789987654321", nil}, - {"123456789.987654321", -10, "0.012345678998765432", ErrTruncated}, - {"123456789.987654321", -17, "0.000000001234567900", ErrTruncated}, - {"123456789.987654321", -18, "0.000000000123456790", ErrTruncated}, - {"123456789.987654321", -19, "0.000000000012345679", ErrTruncated}, - {"123456789.987654321", -26, "0.000000000000000001", ErrTruncated}, - {"123456789.987654321", -27, "0", ErrTruncated}, - {"123456789.987654321", 1, "1234567900", ErrTruncated}, - {"123456789.987654321", 2, "12345678999", ErrTruncated}, - {"123456789.987654321", 4, "1234567899877", ErrTruncated}, - {"123456789.987654321", 8, "12345678998765432", ErrTruncated}, - {"123456789.987654321", 9, "123456789987654321", nil}, - {"123456789.987654321", 10, "123456789.987654321", ErrOverflow}, - {"123456789.987654321", 0, "123456789.987654321", nil}, - } - dotest(tests) - - // reset - wordBufLen = maxWordBufLen -} - -// this test will change global variable `wordBufLen`, so it must run in serial -func TestFromStringMyDecimal(t *testing.T) { - type tcase struct { - input string - output string - err error - } - - var dotest = func(tests []tcase) { - for _, test := range tests { - t.Run(fmt.Sprintf("%v (wordBufLen: %v)", test.input, wordBufLen), func(t *testing.T) { - var dec MyDecimal - require.Equal(t, test.err, dec.FromString([]byte(test.input))) - require.Equal(t, test.output, string(dec.ToString())) - }) - } - } - - wordBufLen = maxWordBufLen - tests := []tcase{ - {"12345", "12345", nil}, - {"12345.", "12345", nil}, - {"123.45.", "123.45", ErrTruncated}, - {"-123.45.", "-123.45", ErrTruncated}, - {".00012345000098765", "0.00012345000098765", nil}, - {".12345000098765", "0.12345000098765", nil}, - {"-.000000012345000098765", "-0.000000012345000098765", nil}, - {"1234500009876.5", "1234500009876.5", nil}, - {"123E5", "12300000", nil}, - {"123E-2", "1.23", nil}, - {"1e1073741823", "999999999999999999999999999999999999999999999999999999999999999999999999999999999", ErrOverflow}, - {"-1e1073741823", "-999999999999999999999999999999999999999999999999999999999999999999999999999999999", ErrOverflow}, - {"1e18446744073709551620", "0", ErrBadNumber}, - {"1e", "1", ErrTruncated}, - {"1e001", "10", nil}, - {"1e00", "1", nil}, - {"1eabc", "1", ErrTruncated}, - {"1e 1dddd ", "10", ErrTruncated}, - {"1e - 1", "1", ErrTruncated}, - {"1e -1", "0.1", nil}, - {"0.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0.000000000000000000000000000000000000000000000000000000000000000000000000", ErrTruncated}, - {"1asf", "1", ErrTruncated}, - {"1.1.1.1.1", "1.1", ErrTruncated}, - {"1 1", "1", ErrTruncated}, - {"1 ", "1", nil}, - } - dotest(tests) - - wordBufLen = 1 - tests = []tcase{ - {"123450000098765", "98765", ErrOverflow}, - {"123450.000098765", "123450", ErrTruncated}, - } - dotest(tests) - - // reset - wordBufLen = maxWordBufLen -} diff --git a/types/mydecimal_test.go b/types/mydecimal_test.go index 61e86ffb1dc7a..13da9284ad395 100644 --- a/types/mydecimal_test.go +++ b/types/mydecimal_test.go @@ -15,6 +15,7 @@ package types import ( + "fmt" "strconv" "strings" "testing" @@ -814,3 +815,180 @@ func TestReset(t *testing.T) { require.NoError(t, DecimalAdd(&x2, &y2, &z1)) require.Equal(t, z2, z1) } + +// this test will change global variable `wordBufLen`, so it must run in serial +func TestShiftMyDecimal(t *testing.T) { + type tcase struct { + input string + shift int + output string + err error + } + + var dotest = func(tests []tcase) { + for _, test := range tests { + t.Run(fmt.Sprintf("%v (shift: %v, wordBufLen: %v)", test.input, test.shift, wordBufLen), func(t *testing.T) { + var dec MyDecimal + require.NoError(t, dec.FromString([]byte(test.input))) + require.Equal(t, test.err, dec.Shift(test.shift)) + require.Equal(t, test.output, string(dec.ToString())) + }) + } + } + + wordBufLen = maxWordBufLen + tests := []tcase{ + {"123.123", 1, "1231.23", nil}, + {"123457189.123123456789000", 1, "1234571891.23123456789", nil}, + {"123457189.123123456789000", 8, "12345718912312345.6789", nil}, + {"123457189.123123456789000", 9, "123457189123123456.789", nil}, + {"123457189.123123456789000", 10, "1234571891231234567.89", nil}, + {"123457189.123123456789000", 17, "12345718912312345678900000", nil}, + {"123457189.123123456789000", 18, "123457189123123456789000000", nil}, + {"123457189.123123456789000", 19, "1234571891231234567890000000", nil}, + {"123457189.123123456789000", 26, "12345718912312345678900000000000000", nil}, + {"123457189.123123456789000", 27, "123457189123123456789000000000000000", nil}, + {"123457189.123123456789000", 28, "1234571891231234567890000000000000000", nil}, + {"000000000000000000000000123457189.123123456789000", 26, "12345718912312345678900000000000000", nil}, + {"00000000123457189.123123456789000", 27, "123457189123123456789000000000000000", nil}, + {"00000000000000000123457189.123123456789000", 28, "1234571891231234567890000000000000000", nil}, + {"123", 1, "1230", nil}, + {"123", 10, "1230000000000", nil}, + {".123", 1, "1.23", nil}, + {".123", 10, "1230000000", nil}, + {".123", 14, "12300000000000", nil}, + {"000.000", 1000, "0", nil}, + {"000.", 1000, "0", nil}, + {".000", 1000, "0", nil}, + {"1", 1000, "1", ErrOverflow}, + {"123.123", -1, "12.3123", nil}, + {"123987654321.123456789000", -1, "12398765432.1123456789", nil}, + {"123987654321.123456789000", -2, "1239876543.21123456789", nil}, + {"123987654321.123456789000", -3, "123987654.321123456789", nil}, + {"123987654321.123456789000", -8, "1239.87654321123456789", nil}, + {"123987654321.123456789000", -9, "123.987654321123456789", nil}, + {"123987654321.123456789000", -10, "12.3987654321123456789", nil}, + {"123987654321.123456789000", -11, "1.23987654321123456789", nil}, + {"123987654321.123456789000", -12, "0.123987654321123456789", nil}, + {"123987654321.123456789000", -13, "0.0123987654321123456789", nil}, + {"123987654321.123456789000", -14, "0.00123987654321123456789", nil}, + {"00000087654321.123456789000", -14, "0.00000087654321123456789", nil}, + } + dotest(tests) + + wordBufLen = 2 + tests = []tcase{ + {"123.123", -2, "1.23123", nil}, + {"123.123", -3, "0.123123", nil}, + {"123.123", -6, "0.000123123", nil}, + {"123.123", -7, "0.0000123123", nil}, + {"123.123", -15, "0.000000000000123123", nil}, + {"123.123", -16, "0.000000000000012312", ErrTruncated}, + {"123.123", -17, "0.000000000000001231", ErrTruncated}, + {"123.123", -18, "0.000000000000000123", ErrTruncated}, + {"123.123", -19, "0.000000000000000012", ErrTruncated}, + {"123.123", -20, "0.000000000000000001", ErrTruncated}, + {"123.123", -21, "0", ErrTruncated}, + {".000000000123", -1, "0.0000000000123", nil}, + {".000000000123", -6, "0.000000000000000123", nil}, + {".000000000123", -7, "0.000000000000000012", ErrTruncated}, + {".000000000123", -8, "0.000000000000000001", ErrTruncated}, + {".000000000123", -9, "0", ErrTruncated}, + {".000000000123", 1, "0.00000000123", nil}, + {".000000000123", 8, "0.0123", nil}, + {".000000000123", 9, "0.123", nil}, + {".000000000123", 10, "1.23", nil}, + {".000000000123", 17, "12300000", nil}, + {".000000000123", 18, "123000000", nil}, + {".000000000123", 19, "1230000000", nil}, + {".000000000123", 20, "12300000000", nil}, + {".000000000123", 21, "123000000000", nil}, + {".000000000123", 22, "1230000000000", nil}, + {".000000000123", 23, "12300000000000", nil}, + {".000000000123", 24, "123000000000000", nil}, + {".000000000123", 25, "1230000000000000", nil}, + {".000000000123", 26, "12300000000000000", nil}, + {".000000000123", 27, "123000000000000000", nil}, + {".000000000123", 28, "0.000000000123", ErrOverflow}, + {"123456789.987654321", -1, "12345678.998765432", ErrTruncated}, + {"123456789.987654321", -2, "1234567.899876543", ErrTruncated}, + {"123456789.987654321", -8, "1.234567900", ErrTruncated}, + {"123456789.987654321", -9, "0.123456789987654321", nil}, + {"123456789.987654321", -10, "0.012345678998765432", ErrTruncated}, + {"123456789.987654321", -17, "0.000000001234567900", ErrTruncated}, + {"123456789.987654321", -18, "0.000000000123456790", ErrTruncated}, + {"123456789.987654321", -19, "0.000000000012345679", ErrTruncated}, + {"123456789.987654321", -26, "0.000000000000000001", ErrTruncated}, + {"123456789.987654321", -27, "0", ErrTruncated}, + {"123456789.987654321", 1, "1234567900", ErrTruncated}, + {"123456789.987654321", 2, "12345678999", ErrTruncated}, + {"123456789.987654321", 4, "1234567899877", ErrTruncated}, + {"123456789.987654321", 8, "12345678998765432", ErrTruncated}, + {"123456789.987654321", 9, "123456789987654321", nil}, + {"123456789.987654321", 10, "123456789.987654321", ErrOverflow}, + {"123456789.987654321", 0, "123456789.987654321", nil}, + } + dotest(tests) + + // reset + wordBufLen = maxWordBufLen +} + +// this test will change global variable `wordBufLen`, so it must run in serial +func TestFromStringMyDecimal(t *testing.T) { + type tcase struct { + input string + output string + err error + } + + var dotest = func(tests []tcase) { + for _, test := range tests { + t.Run(fmt.Sprintf("%v (wordBufLen: %v)", test.input, wordBufLen), func(t *testing.T) { + var dec MyDecimal + require.Equal(t, test.err, dec.FromString([]byte(test.input))) + require.Equal(t, test.output, string(dec.ToString())) + }) + } + } + + wordBufLen = maxWordBufLen + tests := []tcase{ + {"12345", "12345", nil}, + {"12345.", "12345", nil}, + {"123.45.", "123.45", ErrTruncated}, + {"-123.45.", "-123.45", ErrTruncated}, + {".00012345000098765", "0.00012345000098765", nil}, + {".12345000098765", "0.12345000098765", nil}, + {"-.000000012345000098765", "-0.000000012345000098765", nil}, + {"1234500009876.5", "1234500009876.5", nil}, + {"123E5", "12300000", nil}, + {"123E-2", "1.23", nil}, + {"1e1073741823", "999999999999999999999999999999999999999999999999999999999999999999999999999999999", ErrOverflow}, + {"-1e1073741823", "-999999999999999999999999999999999999999999999999999999999999999999999999999999999", ErrOverflow}, + {"1e18446744073709551620", "0", ErrBadNumber}, + {"1e", "1", ErrTruncated}, + {"1e001", "10", nil}, + {"1e00", "1", nil}, + {"1eabc", "1", ErrTruncated}, + {"1e 1dddd ", "10", ErrTruncated}, + {"1e - 1", "1", ErrTruncated}, + {"1e -1", "0.1", nil}, + {"0.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0.000000000000000000000000000000000000000000000000000000000000000000000000", ErrTruncated}, + {"1asf", "1", ErrTruncated}, + {"1.1.1.1.1", "1.1", ErrTruncated}, + {"1 1", "1", ErrTruncated}, + {"1 ", "1", nil}, + } + dotest(tests) + + wordBufLen = 1 + tests = []tcase{ + {"123450000098765", "98765", ErrOverflow}, + {"123450.000098765", "123450", ErrTruncated}, + } + dotest(tests) + + // reset + wordBufLen = maxWordBufLen +} diff --git a/types/set_serial_test.go b/types/set_test.go similarity index 100% rename from types/set_serial_test.go rename to types/set_test.go