diff --git a/config/config.go b/config/config.go index b095e2a0b9196..1ed44ee9acf77 100644 --- a/config/config.go +++ b/config/config.go @@ -62,7 +62,6 @@ type Config struct { Status Status `toml:"status" json:"status"` Performance Performance `toml:"performance" json:"performance"` XProtocol XProtocol `toml:"xprotocol" json:"xprotocol"` - PlanCache PlanCache `toml:"plan-cache" json:"plan-cache"` PreparedPlanCache PreparedPlanCache `toml:"prepared-plan-cache" json:"prepared-plan-cache"` OpenTracing OpenTracing `toml:"opentracing" json:"opentracing"` ProxyProtocol ProxyProtocol `toml:"proxy-protocol" json:"proxy-protocol"` @@ -287,11 +286,6 @@ var defaultConf = Config{ Networks: "", HeaderTimeout: 5, }, - PlanCache: PlanCache{ - Enabled: false, - Capacity: 2560, - Shards: 256, - }, PreparedPlanCache: PreparedPlanCache{ Enabled: false, Capacity: 100, diff --git a/config/config.toml.example b/config/config.toml.example index 19e6e796254ac..a950a2c5ba53a 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -148,11 +148,6 @@ networks = "" # PROXY protocol header read timeout, unit is second header-timeout = 5 -[plan-cache] -enabled = false -capacity = 2560 -shards = 256 - [prepared-plan-cache] enabled = false capacity = 100 diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index 18ecc92c0da19..107b626c9824f 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -17,7 +17,6 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/tidb/plan" "github.com/pingcap/tidb/terror" - "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/testkit" ) @@ -550,22 +549,6 @@ func (s *testSuite) TestAggEliminator(c *C) { tk.MustQuery("select min(b*b) from t").Check(testkit.Rows("1")) } -func (s *testSuite) TestIssue5663(c *C) { - tk := testkit.NewTestKitWithInit(c, s.store) - plan.GlobalPlanCache = kvcache.NewShardedLRUCache(2, 1) - planCahche := tk.Se.GetSessionVars().PlanCacheEnabled - defer func() { - tk.Se.GetSessionVars().PlanCacheEnabled = planCahche - }() - - tk.Se.GetSessionVars().PlanCacheEnabled = true - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1 (i int unsigned, primary key(i));") - tk.MustExec("insert into t1 values (1),(2),(3);") - tk.MustQuery("select group_concat(i) from t1 where i > 1;").Check(testkit.Rows("2,3")) - tk.MustQuery("select group_concat(i) from t1 where i > 1;").Check(testkit.Rows("2,3")) -} - func (s *testSuite) TestMaxMinFloatScalaFunc(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) diff --git a/plan/cache.go b/plan/cache.go index c85fc363c0f05..af753faa0391c 100644 --- a/plan/cache.go +++ b/plan/cache.go @@ -16,7 +16,6 @@ package plan import ( "time" - "github.com/pingcap/tidb/ast" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/codec" @@ -25,82 +24,12 @@ import ( ) var ( - // GlobalPlanCache stores the global plan cache for every session in a tidb-server. - GlobalPlanCache *kvcache.ShardedLRUCache - // PreparedPlanCacheEnabled stores the global config "prepared-plan-cache-enabled". PreparedPlanCacheEnabled bool // PreparedPlanCacheCapacity stores the global config "prepared-plan-cache-capacity". PreparedPlanCacheCapacity uint ) -type sqlCacheKey struct { - user string - host string - database string - sql string - snapshot uint64 - schemaVersion int64 - sqlMode mysql.SQLMode - timezoneOffset int - readOnly bool // stores the current tidb-server status. - - hash []byte -} - -// Hash implements Key interface. -func (key *sqlCacheKey) Hash() []byte { - if key.hash == nil { - var ( - userBytes = hack.Slice(key.user) - hostBytes = hack.Slice(key.host) - dbBytes = hack.Slice(key.database) - sqlBytes = hack.Slice(key.sql) - bufferSize = len(userBytes) + len(hostBytes) + len(dbBytes) + len(sqlBytes) + 8*4 + 1 - ) - - key.hash = make([]byte, 0, bufferSize) - key.hash = append(key.hash, userBytes...) - key.hash = append(key.hash, hostBytes...) - key.hash = append(key.hash, dbBytes...) - key.hash = append(key.hash, sqlBytes...) - key.hash = codec.EncodeInt(key.hash, int64(key.snapshot)) - key.hash = codec.EncodeInt(key.hash, key.schemaVersion) - key.hash = codec.EncodeInt(key.hash, int64(key.sqlMode)) - key.hash = codec.EncodeInt(key.hash, int64(key.timezoneOffset)) - if key.readOnly { - key.hash = append(key.hash, '1') - } else { - key.hash = append(key.hash, '0') - } - } - return key.hash -} - -// NewSQLCacheKey creates a new sqlCacheKey object. -func NewSQLCacheKey(sessionVars *variable.SessionVars, sql string, schemaVersion int64, readOnly bool) kvcache.Key { - timezoneOffset, user, host := 0, "", "" - if sessionVars.TimeZone != nil { - _, timezoneOffset = time.Now().In(sessionVars.TimeZone).Zone() - } - if sessionVars.User != nil { - user = sessionVars.User.Username - host = sessionVars.User.Hostname - } - - return &sqlCacheKey{ - user: user, - host: host, - database: sessionVars.CurrentDB, - sql: sql, - snapshot: sessionVars.SnapshotTS, - schemaVersion: schemaVersion, - sqlMode: sessionVars.SQLMode, - timezoneOffset: timezoneOffset, - readOnly: readOnly, - } -} - type pstmtPlanCacheKey struct { database string connID uint64 @@ -149,22 +78,6 @@ func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, sch } } -// SQLCacheValue stores the cached Statement and StmtNode. -type SQLCacheValue struct { - StmtNode ast.StmtNode - Plan Plan - Expensive bool -} - -// NewSQLCacheValue creates a SQLCacheValue. -func NewSQLCacheValue(ast ast.StmtNode, plan Plan, expensive bool) *SQLCacheValue { - return &SQLCacheValue{ - StmtNode: ast, - Plan: plan, - Expensive: expensive, - } -} - // PSTMTPlanCacheValue stores the cached Statement and StmtNode. type PSTMTPlanCacheValue struct { Plan Plan diff --git a/plan/cache_test.go b/plan/cache_test.go index b02f1bb13f6a9..67c15005d8f58 100644 --- a/plan/cache_test.go +++ b/plan/cache_test.go @@ -38,8 +38,6 @@ func (s *testCacheSuite) SetUpSuite(c *C) { func (s *testCacheSuite) TestCacheKey(c *C) { defer testleak.AfterTest(c)() - key1 := NewSQLCacheKey(s.ctx.GetSessionVars(), "select * from t", 0, false) - c.Assert(key1.Hash(), DeepEquals, []byte{0x74, 0x65, 0x73, 0x74, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x20, 0x2a, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x30}) - key2 := NewPSTMTPlanCacheKey(s.ctx.GetSessionVars(), 1, 1) - c.Assert(key2.Hash(), DeepEquals, []byte{0x74, 0x65, 0x73, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}) + key := NewPSTMTPlanCacheKey(s.ctx.GetSessionVars(), 1, 1) + c.Assert(key.Hash(), DeepEquals, []byte{0x74, 0x65, 0x73, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}) } diff --git a/session/session.go b/session/session.go index 79f22119c7e42..2c40fa1a7886b 100644 --- a/session/session.go +++ b/session/session.go @@ -767,86 +767,45 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []ast.Rec } s.PrepareTxnCtx(ctx) - var ( - cacheKey kvcache.Key - cacheValue kvcache.Value - hitCache = false - connID = s.sessionVars.ConnectionID - planCacheEnabled = s.sessionVars.PlanCacheEnabled // Its value is read from the global configuration, and it will be only updated in tests. - ) + connID := s.sessionVars.ConnectionID + err = s.loadCommonGlobalVariablesIfNeeded() + if err != nil { + return nil, errors.Trace(err) + } - if planCacheEnabled { - schemaVersion := domain.GetDomain(s).InfoSchema().SchemaMetaVersion() - readOnly := s.Txn() == nil || s.Txn().IsReadOnly() + charsetInfo, collation := s.sessionVars.GetCharsetInfo() - cacheKey = plan.NewSQLCacheKey(s.sessionVars, sql, schemaVersion, readOnly) - cacheValue, hitCache = plan.GlobalPlanCache.Get(cacheKey) + // Step1: Compile query string to abstract syntax trees(ASTs). + startTS := time.Now() + stmtNodes, err := s.ParseSQL(ctx, sql, charsetInfo, collation) + if err != nil { + s.rollbackOnError(ctx) + log.Warnf("con:%d parse error:\n%v\n%s", connID, err, sql) + return nil, errors.Trace(err) } + metrics.SessionExecuteParseDuration.Observe(time.Since(startTS).Seconds()) - if hitCache { - metrics.PlanCacheCounter.WithLabelValues("select").Inc() - stmtNode := cacheValue.(*plan.SQLCacheValue).StmtNode - stmt := &executor.ExecStmt{ - InfoSchema: executor.GetInfoSchema(s), - Plan: cacheValue.(*plan.SQLCacheValue).Plan, - Expensive: cacheValue.(*plan.SQLCacheValue).Expensive, - Text: stmtNode.Text(), - StmtNode: stmtNode, - Ctx: s, - } - + compiler := executor.Compiler{Ctx: s} + for _, stmtNode := range stmtNodes { s.PrepareTxnCtx(ctx) - if err = executor.ResetStmtCtx(s, stmtNode); err != nil { - return nil, errors.Trace(err) - } - if recordSets, err = s.executeStatement(ctx, connID, stmtNode, stmt, recordSets); err != nil { - return nil, errors.Trace(err) - } - } else { - err = s.loadCommonGlobalVariablesIfNeeded() - if err != nil { + + // Step2: Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). + startTS = time.Now() + // Some executions are done in compile stage, so we reset them before compile. + if err := executor.ResetStmtCtx(s, stmtNode); err != nil { return nil, errors.Trace(err) } - - charsetInfo, collation := s.sessionVars.GetCharsetInfo() - - // Step1: Compile query string to abstract syntax trees(ASTs). - startTS := time.Now() - stmtNodes, err := s.ParseSQL(ctx, sql, charsetInfo, collation) + stmt, err := compiler.Compile(ctx, stmtNode) if err != nil { s.rollbackOnError(ctx) - log.Warnf("con:%d parse error:\n%v\n%s", connID, err, sql) + log.Warnf("con:%d compile error:\n%v\n%s", connID, err, sql) return nil, errors.Trace(err) } - metrics.SessionExecuteParseDuration.Observe(time.Since(startTS).Seconds()) - - compiler := executor.Compiler{Ctx: s} - for _, stmtNode := range stmtNodes { - s.PrepareTxnCtx(ctx) + metrics.SessionExecuteCompileDuration.Observe(time.Since(startTS).Seconds()) - // Step2: Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). - startTS = time.Now() - // Some executions are done in compile stage, so we reset them before compile. - if err := executor.ResetStmtCtx(s, stmtNode); err != nil { - return nil, errors.Trace(err) - } - stmt, err := compiler.Compile(ctx, stmtNode) - if err != nil { - s.rollbackOnError(ctx) - log.Warnf("con:%d compile error:\n%v\n%s", connID, err, sql) - return nil, errors.Trace(err) - } - metrics.SessionExecuteCompileDuration.Observe(time.Since(startTS).Seconds()) - - // Step3: Cache the physical plan if possible. - if planCacheEnabled && stmt.Cacheable && len(stmtNodes) == 1 && !s.GetSessionVars().StmtCtx.HistogramsNotLoad() { - plan.GlobalPlanCache.Put(cacheKey, plan.NewSQLCacheValue(stmtNode, stmt.Plan, stmt.Expensive)) - } - - // Step4: Execute the physical plan. - if recordSets, err = s.executeStatement(ctx, connID, stmtNode, stmt, recordSets); err != nil { - return nil, errors.Trace(err) - } + // Step3: Execute the physical plan. + if recordSets, err = s.executeStatement(ctx, connID, stmtNode, stmt, recordSets); err != nil { + return nil, errors.Trace(err) } } diff --git a/session/session_test.go b/session/session_test.go index 276413eb870ae..ed4b0aa5ea622 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/parser" - "github.com/pingcap/tidb/plan" "github.com/pingcap/tidb/privilege/privileges" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" @@ -38,7 +37,6 @@ import ( "github.com/pingcap/tidb/terror" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/auth" - "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" @@ -110,18 +108,11 @@ func (p *mockBinlogPump) PullBinlogs(ctx context.Context, in *binlog.PullBinlogR } func (s *testSessionSuite) TestForCoverage(c *C) { - plan.GlobalPlanCache = kvcache.NewShardedLRUCache(2, 1) - // Just for test coverage. tk := testkit.NewTestKitWithInit(c, s.store) - planCache := tk.Se.GetSessionVars().PlanCacheEnabled - defer func() { - tk.Se.GetSessionVars().PlanCacheEnabled = planCache - }() tk.MustExec("drop table if exists t") tk.MustExec("create table t (id int auto_increment, v int, index (id))") tk.MustExec("insert t values ()") - tk.Se.GetSessionVars().PlanCacheEnabled = true tk.MustExec("insert t values ()") tk.MustExec("insert t values ()") diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index ca85b8f6694ba..daa5a456a3fe3 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -206,9 +206,6 @@ type SessionVars struct { // PlanID is the unique id of logical and physical plan. PlanID int - // PlanCacheEnabled stores the global config "plan-cache-enabled", and it will be only updated in tests. - PlanCacheEnabled bool - // User is the user identity with which the session login. User *auth.UserIdentity @@ -341,7 +338,6 @@ func NewSessionVars() *SessionVars { } else { enableStreaming = "0" } - vars.PlanCacheEnabled = config.GetGlobalConfig().PlanCache.Enabled terror.Log(vars.SetSystemVar(TiDBEnableStreaming, enableStreaming)) return vars } diff --git a/tidb-server/main.go b/tidb-server/main.go index 9893cf1d55735..b4e2cf22c6372 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -44,7 +44,6 @@ import ( "github.com/pingcap/tidb/store/tikv/gcworker" "github.com/pingcap/tidb/terror" "github.com/pingcap/tidb/util" - "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/printer" "github.com/pingcap/tidb/util/systimemon" @@ -380,10 +379,6 @@ func setGlobalVars() { plan.AllowCartesianProduct = cfg.Performance.CrossJoin privileges.SkipWithGrant = cfg.Security.SkipGrantTable - if cfg.PlanCache.Enabled { - plan.GlobalPlanCache = kvcache.NewShardedLRUCache(cfg.PlanCache.Capacity, cfg.PlanCache.Shards) - } - plan.PreparedPlanCacheEnabled = cfg.PreparedPlanCache.Enabled if plan.PreparedPlanCacheEnabled { plan.PreparedPlanCacheCapacity = cfg.PreparedPlanCache.Capacity diff --git a/util/kvcache/sharded_lru.go b/util/kvcache/sharded_lru.go deleted file mode 100644 index 449dd2259d9cd..0000000000000 --- a/util/kvcache/sharded_lru.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2017 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package kvcache - -import ( - "sync" - - "github.com/spaolacci/murmur3" -) - -// ShardedLRUCache is a sharded LRU Cache, thread safe. -type ShardedLRUCache struct { - shards []*SimpleLRUCache - locks []sync.RWMutex -} - -// NewShardedLRUCache creates a ShardedLRUCache. -func NewShardedLRUCache(capacity, shardCount uint) *ShardedLRUCache { - shardedLRUCache := &ShardedLRUCache{ - shards: make([]*SimpleLRUCache, 0, shardCount), - locks: make([]sync.RWMutex, shardCount), - } - for i := uint(0); i < shardCount; i++ { - shardedLRUCache.shards = append(shardedLRUCache.shards, NewSimpleLRUCache(capacity/shardCount)) - } - return shardedLRUCache -} - -// Get gets a value from a ShardedLRUCache. -func (s *ShardedLRUCache) Get(key Key) (Value, bool) { - id := int(murmur3.Sum32(key.Hash())) % len(s.shards) - - s.locks[id].Lock() - value, ok := s.shards[id].Get(key) - s.locks[id].Unlock() - - return value, ok -} - -// Put puts a (key, value) pair to a ShardedLRUCache. -func (s *ShardedLRUCache) Put(key Key, value Value) { - id := int(murmur3.Sum32(key.Hash())) % len(s.shards) - - s.locks[id].Lock() - s.shards[id].Put(key, value) - s.locks[id].Unlock() -}