Skip to content

Commit

Permalink
Merge branch 'master' into flashback-2-phase
Browse files Browse the repository at this point in the history
  • Loading branch information
Defined2014 committed Oct 8, 2022
2 parents 135e67c + 556daf7 commit f9ea4db
Show file tree
Hide file tree
Showing 137 changed files with 11,459 additions and 7,282 deletions.
3 changes: 2 additions & 1 deletion .bazelrc
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
startup --host_jvm_args=-Xmx8g
startup --host_jvm_args=-Xmx6g
startup --unlimit_coredumps

run:ci --color=yes

build --announce_rc
build --experimental_guard_against_concurrent_changes
build --experimental_remote_merkle_tree_cache
build --java_language_version=17
build --java_runtime_version=17
Expand Down
15 changes: 15 additions & 0 deletions DEPS.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -1872,6 +1872,13 @@ def go_deps():
sum = "h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_jarcoal_httpmock",
build_file_proto_mode = "disable",
importpath = "github.com/jarcoal/httpmock",
sum = "h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc=",
version = "v1.2.0",
)

go_repository(
name = "com_github_jcmturner_aescts_v2",
Expand Down Expand Up @@ -2326,6 +2333,14 @@ def go_deps():
sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=",
version = "v1.0.1",
)
go_repository(
name = "com_github_maxatome_go_testdeep",
build_file_proto_mode = "disable",
importpath = "github.com/maxatome/go-testdeep",
sum = "h1:Tgh5efyCYyJFGUYiT0qxBSIDeXw0F5zSoatlou685kk=",
version = "v1.11.0",
)

go_repository(
name = "com_github_mbilski_exhaustivestruct",
build_file_proto_mode = "disable",
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ dev: checklist check explaintest gogenerate br_unit_test test_part_parser_dev ut
# Install the check tools.
check-setup:tools/bin/revive

check: check-parallel lint tidy testSuite errdoc bazel_all_build
check: parser_yacc check-parallel lint tidy testSuite errdoc bazel_all_build

fmt:
@echo "gofmt (simplify)"
Expand Down
4 changes: 2 additions & 2 deletions bindinfo/bind_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,8 +352,8 @@ func TestBindCTEMerge(t *testing.T) {
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(id int)")
require.True(t, tk.HasPlan("with cte as (select * from t1) select * from cte", "CTEFullScan"))
require.False(t, tk.HasPlan("with cte as (select /*+ MERGE() */ * from t1) select * from cte", "CTEFullScan"))
require.True(t, tk.HasPlan("with cte as (select * from t1) select * from cte a, cte b", "CTEFullScan"))
require.False(t, tk.HasPlan("with cte as (select /*+ MERGE() */ * from t1) select * from cte a, cte b", "CTEFullScan"))
tk.MustExec(`
create global binding for
with cte as (select * from t1) select * from cte
Expand Down
4 changes: 2 additions & 2 deletions bindinfo/capture_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -563,9 +563,9 @@ func TestIssue25505(t *testing.T) {
spmMap["with recursive `cte` ( `a` ) as ( select ? union select `a` + ? from `test` . `t1` where `a` > ? ) select * from `cte`"] =
"WITH RECURSIVE `cte` (`a`) AS (SELECT 2 UNION SELECT `a` + 1 FROM `test`.`t1` WHERE `a` > 5) SELECT /*+ hash_agg(@`sel_1`), use_index(@`sel_3` `test`.`t1` `idx_b`)*/ * FROM `cte`"
spmMap["with `cte` as ( with `cte1` as ( select * from `test` . `t2` where `a` > ? and `b` > ? ) select * from `cte1` ) select * from `cte` join `test` . `t1` on `t1` . `a` = `cte` . `a`"] =
"WITH `cte` AS (WITH `cte1` AS (SELECT * FROM `test`.`t2` WHERE `a` > 1 AND `b` > 1) SELECT * FROM `cte1`) SELECT /*+ inl_join(@`sel_1` `test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_ab`), use_index(@`sel_3` `test`.`t2` `idx_ab`)*/ * FROM `cte` JOIN `test`.`t1` ON `t1`.`a` = `cte`.`a`"
"WITH `cte` AS (WITH `cte1` AS (SELECT * FROM `test`.`t2` WHERE `a` > 1 AND `b` > 1) SELECT * FROM `cte1`) SELECT /*+ use_index(@`sel_3` `test`.`t2` `idx_ab`), use_index(@`sel_1` `test`.`t1` `idx_ab`)*/ * FROM `cte` JOIN `test`.`t1` ON `t1`.`a` = `cte`.`a`"
spmMap["with `cte` as ( with `cte1` as ( select * from `test` . `t2` where `a` = ? and `b` = ? ) select * from `cte1` ) select * from `cte` join `test` . `t1` on `t1` . `a` = `cte` . `a`"] =
"WITH `cte` AS (WITH `cte1` AS (SELECT * FROM `test`.`t2` WHERE `a` = 1 AND `b` = 1) SELECT * FROM `cte1`) SELECT /*+ inl_join(@`sel_1` `test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`)*/ * FROM `cte` JOIN `test`.`t1` ON `t1`.`a` = `cte`.`a`"
"WITH `cte` AS (WITH `cte1` AS (SELECT * FROM `test`.`t2` WHERE `a` = 1 AND `b` = 1) SELECT * FROM `cte1`) SELECT /*+ use_index(@`sel_3` `test`.`t2` `idx_a`), use_index(@`sel_1` `test`.`t1` `idx_a`)*/ * FROM `cte` JOIN `test`.`t1` ON `t1`.`a` = `cte`.`a`"

tk.MustExec("with cte as (with cte1 as (select /*+use_index(t2 idx_a)*/ * from t2 where a = 1 and b = 1) select * from cte1) select /*+use_index(t1 idx_a)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("with cte as (with cte1 as (select /*+use_index(t2 idx_a)*/ * from t2 where a = 1 and b = 1) select * from cte1) select /*+use_index(t1 idx_a)*/ * from cte join t1 on t1.a=cte.a;")
Expand Down
31 changes: 19 additions & 12 deletions bindinfo/handle.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,20 +108,27 @@ type bindRecordUpdate struct {
// NewBindHandle creates a new BindHandle.
func NewBindHandle(ctx sessionctx.Context) *BindHandle {
handle := &BindHandle{}
handle.sctx.Context = ctx
handle.bindInfo.Value.Store(newBindCache())
handle.bindInfo.parser = parser.New()
handle.invalidBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
handle.invalidBindRecordMap.flushFunc = func(record *BindRecord) error {
return handle.DropBindRecord(record.OriginalSQL, record.Db, &record.Bindings[0])
}
handle.pendingVerifyBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
handle.pendingVerifyBindRecordMap.flushFunc = func(record *BindRecord) error {
handle.Reset(ctx)
return handle
}

// Reset is to reset the BindHandle and clean old info.
func (h *BindHandle) Reset(ctx sessionctx.Context) {
h.bindInfo.Lock()
defer h.bindInfo.Unlock()
h.sctx.Context = ctx
h.bindInfo.Value.Store(newBindCache())
h.bindInfo.parser = parser.New()
h.invalidBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
h.invalidBindRecordMap.flushFunc = func(record *BindRecord) error {
return h.DropBindRecord(record.OriginalSQL, record.Db, &record.Bindings[0])
}
h.pendingVerifyBindRecordMap.Value.Store(make(map[string]*bindRecordUpdate))
h.pendingVerifyBindRecordMap.flushFunc = func(record *BindRecord) error {
// BindSQL has already been validated when coming here, so we use nil sctx parameter.
return handle.AddBindRecord(nil, record)
return h.AddBindRecord(nil, record)
}
variable.RegisterStatistics(handle)
return handle
variable.RegisterStatistics(h)
}

// Update updates the global sql bind cache.
Expand Down
10 changes: 5 additions & 5 deletions br/pkg/lightning/backend/kv/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,8 +253,11 @@ func NewSession(options *SessionOptions, logger log.Logger) sessionctx.Context {
}

func newSession(options *SessionOptions, logger log.Logger) *session {
s := &session{
values: make(map[fmt.Stringer]interface{}, 1),
}
sqlMode := options.SQLMode
vars := variable.NewSessionVars()
vars := variable.NewSessionVars(s)
vars.SkipUTF8Check = true
vars.StmtCtx.InInsertStmt = true
vars.StmtCtx.BatchCheck = true
Expand Down Expand Up @@ -289,10 +292,7 @@ func newSession(options *SessionOptions, logger log.Logger) *session {
log.ShortError(err))
}
vars.TxnCtx = nil
s := &session{
vars: vars,
values: make(map[fmt.Stringer]interface{}, 1),
}
s.vars = vars
s.txn.kvPairs = &KvPairs{}

return s
Expand Down
2 changes: 2 additions & 0 deletions br/pkg/streamhelper/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ go_test(
"tsheap_test.go",
],
flaky = True,
race = "on",
shard_count = 20,
deps = [
":streamhelper",
"//br/pkg/errors",
Expand Down
34 changes: 17 additions & 17 deletions br/pkg/streamhelper/basic_lib_for_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"sort"
"strings"
"sync"
"sync/atomic"
"testing"

backup "github.com/pingcap/kvproto/pkg/brpb"
Expand Down Expand Up @@ -61,7 +62,7 @@ type region struct {
leader uint64
epoch uint64
id uint64
checkpoint uint64
checkpoint atomic.Uint64

fsim flushSimulator
}
Expand Down Expand Up @@ -93,13 +94,13 @@ func overlaps(a, b kv.KeyRange) bool {

func (r *region) splitAt(newID uint64, k string) *region {
newRegion := &region{
rng: kv.KeyRange{StartKey: []byte(k), EndKey: r.rng.EndKey},
leader: r.leader,
epoch: r.epoch + 1,
id: newID,
checkpoint: r.checkpoint,
fsim: r.fsim.fork(),
rng: kv.KeyRange{StartKey: []byte(k), EndKey: r.rng.EndKey},
leader: r.leader,
epoch: r.epoch + 1,
id: newID,
fsim: r.fsim.fork(),
}
newRegion.checkpoint.Store(r.checkpoint.Load())
r.rng.EndKey = []byte(k)
r.epoch += 1
r.fsim = r.fsim.fork()
Expand Down Expand Up @@ -151,7 +152,7 @@ func (f *fakeStore) GetLastFlushTSOfRegion(ctx context.Context, in *logbackup.Ge
continue
}
resp.Checkpoints = append(resp.Checkpoints, &logbackup.RegionCheckpoint{
Checkpoint: region.checkpoint,
Checkpoint: region.checkpoint.Load(),
Region: &logbackup.RegionIdentity{
Id: region.id,
EpochVersion: region.epoch,
Expand Down Expand Up @@ -315,9 +316,9 @@ func (f *fakeCluster) advanceCheckpoints() uint64 {
f.updateRegion(r.id, func(r *region) {
// The current implementation assumes that the server never returns checkpoint with value 0.
// This assumption is true for the TiKV implementation, simulating it here.
r.checkpoint += rand.Uint64()%256 + 1
if r.checkpoint < minCheckpoint {
minCheckpoint = r.checkpoint
cp := r.checkpoint.Add(rand.Uint64()%256 + 1)
if cp < minCheckpoint {
minCheckpoint = cp
}
r.fsim.flushedEpoch = 0
})
Expand All @@ -340,11 +341,10 @@ func createFakeCluster(t *testing.T, n int, simEnabled bool) *fakeCluster {
stores = append(stores, s)
}
initialRegion := &region{
rng: kv.KeyRange{},
leader: stores[0].id,
epoch: 0,
id: c.idAlloc(),
checkpoint: 0,
rng: kv.KeyRange{},
leader: stores[0].id,
epoch: 0,
id: c.idAlloc(),
fsim: flushSimulator{
enabled: simEnabled,
},
Expand All @@ -367,7 +367,7 @@ func (r *region) String() string {
r.epoch,
hex.EncodeToString(r.rng.StartKey),
hex.EncodeToString(r.rng.EndKey),
r.checkpoint,
r.checkpoint.Load(),
r.leader,
r.fsim.flushedEpoch)
}
Expand Down
15 changes: 15 additions & 0 deletions build/nogo_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -378,8 +378,23 @@
"planner/util": "planner code",
"planner/implementation": "planner code",
"planner/cascades": "planner code",
"planner/core/explain.go": "planner/core/explain.go",
"planner/core/handle_cols.go": "planner/core/handle_cols.go",
"planner/core/hints.go": "planner/core/hints.go",
"planner/core/logical_plan_builder.go": "planner/core/logical_plan_builder.go",
"planner/core/memtable_predicate_extractor.go": "planner/core/memtable_predicate_extractor.go",
"planner/core/pb_to_plan.go": "planner/core/pb_to_plan.go",
"planner/core/plan_stats.go": "planner/core/plan_stats.go",
"planner/core/plan_cache.go": "planner code",
"planner/core/plan.go": "planner/core/plan.go",
"planner/core/point_get_plan.go": "planner/core/point_get_plan.go",
"planner/core/property_cols_prune.go": "planner/core/property_cols_prune.go",
"planner/core/rule_aggregation_elimination.go": "planner/core/rule_aggregation_elimination.go",
"planner/core/rule_build_key_info.go": "planner/core/rule_build_key_info.go",
"planner/core/rule_column_pruning.go": "planner/core/rule_column_pruning.go",
"planner/core/rule_join_elimination.go": "planner/core/rule_join_elimination.go",
"planner/core/rule_semi_join_rewrite.go": "planner/core/rule_semi_join_rewrite.go",
"planner/core/util.go": "planner/core/util.go",
"util/": "util code",
"parser/": "parser code",
"meta/": "parser code"
Expand Down
Loading

0 comments on commit f9ea4db

Please sign in to comment.