Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: add execution retry time in slow log and slow_query and statement_summary (#19625) #20495

Merged
merged 6 commits into from
Oct 21, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions executor/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ type ExecStmt struct {
isPreparedStmt bool
isSelectForUpdate bool
retryCount uint
retryStartTime time.Time

// OutputNames will be set if using cached plan
OutputNames []*types.FieldName
Expand Down Expand Up @@ -641,6 +642,7 @@ func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, err error) (E
return nil, errors.New("pessimistic lock retry limit reached")
}
a.retryCount++
a.retryStartTime = time.Now()
err = UpdateForUpdateTS(a.Ctx, newForUpdateTS)
if err != nil {
return nil, err
Expand Down Expand Up @@ -896,6 +898,9 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) {
WriteSQLRespTotal: stmtDetail.WriteSQLRespDuration,
RewriteInfo: sessVars.RewritePhaseInfo,
}
if a.retryCount > 0 {
slowItems.ExecRetryTime = costTime - sessVars.DurationParse - sessVars.DurationCompile - time.Since(a.retryStartTime)
}
if _, ok := a.StmtNode.(*ast.CommitStmt); ok {
slowItems.PrevStmt = sessVars.PrevStmt.String()
}
Expand Down Expand Up @@ -974,7 +979,7 @@ func (a *ExecStmt) SummaryStmt(succ bool) {
}
stmtCtx := sessVars.StmtCtx
normalizedSQL, digest := stmtCtx.SQLDigest()
costTime := time.Since(sessVars.StartTime)
costTime := time.Since(sessVars.StartTime) + sessVars.DurationParse

var prevSQL, prevSQLDigest string
if _, ok := a.StmtNode.(*ast.CommitStmt); ok {
Expand Down Expand Up @@ -1010,7 +1015,7 @@ func (a *ExecStmt) SummaryStmt(succ bool) {
memMax := stmtCtx.MemTracker.MaxConsumed()
diskMax := stmtCtx.DiskTracker.MaxConsumed()
sql := a.GetTextToLog()
stmtsummary.StmtSummaryByDigestMap.AddStatement(&stmtsummary.StmtExecInfo{
stmtExecInfo := &stmtsummary.StmtExecInfo{
SchemaName: strings.ToLower(sessVars.CurrentDB),
OriginalSQL: sql,
NormalizedSQL: normalizedSQL,
Expand All @@ -1033,7 +1038,12 @@ func (a *ExecStmt) SummaryStmt(succ bool) {
IsInternal: sessVars.InRestrictedSQL,
Succeed: succ,
PlanInCache: sessVars.FoundInPlanCache,
})
ExecRetryCount: a.retryCount,
}
if a.retryCount > 0 {
stmtExecInfo.ExecRetryTime = costTime - sessVars.DurationParse - sessVars.DurationCompile - time.Since(a.retryStartTime)
}
stmtsummary.StmtSummaryByDigestMap.AddStatement(stmtExecInfo)
}

// GetTextToLog return the query text to log.
Expand Down
17 changes: 17 additions & 0 deletions executor/slow_query.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,11 @@ type slowQueryTuple struct {
user string
host string
connID uint64
<<<<<<< HEAD
=======
crazycs520 marked this conversation as resolved.
Show resolved Hide resolved
execRetryCount uint64
execRetryTime float64
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
queryTime float64
parseTime float64
compileTime float64
Expand Down Expand Up @@ -417,6 +422,13 @@ func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string,
}
case variable.SlowLogConnIDStr:
st.connID, err = strconv.ParseUint(value, 10, 64)
<<<<<<< HEAD
=======
case variable.SlowLogExecRetryCount:
st.execRetryCount, err = strconv.ParseUint(value, 10, 64)
case variable.SlowLogExecRetryTime:
st.execRetryTime, err = strconv.ParseFloat(value, 64)
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
case variable.SlowLogQueryTimeStr:
st.queryTime, err = strconv.ParseFloat(value, 64)
case variable.SlowLogParseTimeStr:
Expand Down Expand Up @@ -523,6 +535,11 @@ func (st *slowQueryTuple) convertToDatumRow() []types.Datum {
record = append(record, types.NewStringDatum(st.user))
record = append(record, types.NewStringDatum(st.host))
record = append(record, types.NewUintDatum(st.connID))
<<<<<<< HEAD
=======
record = append(record, types.NewUintDatum(st.execRetryCount))
record = append(record, types.NewFloat64Datum(st.execRetryTime))
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
record = append(record, types.NewFloat64Datum(st.queryTime))
record = append(record, types.NewFloat64Datum(st.parseTime))
record = append(record, types.NewFloat64Datum(st.compileTime))
Expand Down
8 changes: 8 additions & 0 deletions executor/slow_query_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ func (s *testExecSuite) TestParseSlowLogFile(c *C) {
`# Time: 2019-04-28T15:24:04.309074+08:00
# Txn_start_ts: 405888132465033227
# User@Host: root[root] @ localhost [127.0.0.1]
<<<<<<< HEAD
=======
# Exec_retry_time: 0.12 Exec_retry_count: 57
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
# Query_time: 0.216905
# Cop_time: 0.38 Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436
# Is_internal: true
Expand Down Expand Up @@ -79,7 +83,11 @@ select * from t;`
}
recordString += str
}
<<<<<<< HEAD
expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,root,localhost,0,0.216905,0,0,0,0,0,0,0,0,0,0,,0,0,0,0,0,0,0.38,0.021,0,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,65536,0,1,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,update t set i = 1;,select * from t;"
=======
expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,root,localhost,0,57,0.12,0.216905,0,0,0,0,0,0,0,0,0,0,0,0,,0,0,0,0,0,0,0.38,0.021,0,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,65536,0,1,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,update t set i = 1;,select * from t;"
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
c.Assert(expectRecordString, Equals, recordString)

// fix sql contain '# ' bug
Expand Down
7 changes: 7 additions & 0 deletions infoschema/tables.go
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,11 @@ var slowQueryCols = []columnInfo{
{name: variable.SlowLogUserStr, tp: mysql.TypeVarchar, size: 64},
{name: variable.SlowLogHostStr, tp: mysql.TypeVarchar, size: 64},
{name: variable.SlowLogConnIDStr, tp: mysql.TypeLonglong, size: 20, flag: mysql.UnsignedFlag},
<<<<<<< HEAD
=======
{name: variable.SlowLogExecRetryCount, tp: mysql.TypeLonglong, size: 20, flag: mysql.UnsignedFlag},
{name: variable.SlowLogExecRetryTime, tp: mysql.TypeDouble, size: 22},
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
{name: variable.SlowLogQueryTimeStr, tp: mysql.TypeDouble, size: 22},
{name: variable.SlowLogParseTimeStr, tp: mysql.TypeDouble, size: 22},
{name: variable.SlowLogCompileTimeStr, tp: mysql.TypeDouble, size: 22},
Expand Down Expand Up @@ -1204,6 +1209,8 @@ var tableStatementsSummaryCols = []columnInfo{
{name: "MAX_PREWRITE_REGIONS", tp: mysql.TypeLong, size: 11, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Max number of involved regions in prewrite phase"},
{name: "AVG_TXN_RETRY", tp: mysql.TypeDouble, size: 22, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Average number of transaction retries"},
{name: "MAX_TXN_RETRY", tp: mysql.TypeLong, size: 11, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Max number of transaction retries"},
{name: "SUM_EXEC_RETRY", tp: mysql.TypeLonglong, size: 20, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Sum number of execution retries in pessimistic transactions"},
{name: "SUM_EXEC_RETRY_TIME", tp: mysql.TypeLonglong, size: 20, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Sum time of execution retries in pessimistic transactions"},
{name: "SUM_BACKOFF_TIMES", tp: mysql.TypeLonglong, size: 20, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Sum of retries"},
{name: "BACKOFF_TYPES", tp: mysql.TypeVarchar, size: 1024, comment: "Types of errors and the number of retries for each type"},
{name: "AVG_MEM", tp: mysql.TypeLonglong, size: 20, flag: mysql.NotNullFlag | mysql.UnsignedFlag, comment: "Average memory(byte) used"},
Expand Down
11 changes: 11 additions & 0 deletions infoschema/tables_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,10 @@ func prepareSlowLogfile(c *C, slowLogFileName string) {
# Txn_start_ts: 406315658548871171
# User@Host: root[root] @ localhost [127.0.0.1]
# Conn_ID: 6
<<<<<<< HEAD
=======
# Exec_retry_time: 0.12 Exec_retry_count: 57
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
# Query_time: 4.895492
# Parse_time: 0.4
# Compile_time: 0.2
Expand Down Expand Up @@ -635,10 +639,17 @@ func (s *testTableSuite) TestSlowQuery(c *C) {
tk.MustExec("set time_zone = '+08:00';")
re := tk.MustQuery("select * from information_schema.slow_query")
re.Check(testutil.RowsWithSep("|",
<<<<<<< HEAD
"2019-02-12 19:33:56.571953|406315658548871171|root|localhost|6|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|1|1|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;"))
tk.MustExec("set time_zone = '+00:00';")
re = tk.MustQuery("select * from information_schema.slow_query")
re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|localhost|6|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|1|1|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;"))
=======
"2019-02-12 19:33:56.571953|406315658548871171|root|localhost|6|57|0.12|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.00000001|0.000000003|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|1|1|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;"))
tk.MustExec("set time_zone = '+00:00';")
re = tk.MustQuery("select * from information_schema.slow_query")
re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|localhost|6|57|0.12|4.895492|0.4|0.2|0.000000003|2|0.000000002|0.00000001|0.000000003|0.19|0.21|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.3824278|0.161|0.101|0.092|1.71|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|65536|1|1|abcd|60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4|update t set i = 2;|select * from t_slim;"))
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)

// Test for long query.
f, err := os.OpenFile(slowLogFileName, os.O_CREATE|os.O_WRONLY, 0644)
Expand Down
26 changes: 26 additions & 0 deletions sessionctx/variable/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -1610,6 +1610,13 @@ const (
SlowLogBackoffTotal = "Backoff_total"
// SlowLogWriteSQLRespTotal is the total time used to write response to client.
SlowLogWriteSQLRespTotal = "Write_sql_response_total"
<<<<<<< HEAD
=======
// SlowLogExecRetryCount is the execution retry count.
SlowLogExecRetryCount = "Exec_retry_count"
// SlowLogExecRetryTime is the execution retry time.
SlowLogExecRetryTime = "Exec_retry_time"
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
)

// SlowQueryLogItems is a collection of items that should be included in the
Expand Down Expand Up @@ -1638,7 +1645,12 @@ type SlowQueryLogItems struct {
PDTotal time.Duration
BackoffTotal time.Duration
WriteSQLRespTotal time.Duration
<<<<<<< HEAD
RewriteInfo RewritePhaseInfo
=======
ExecRetryCount uint
ExecRetryTime time.Duration
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
}

// SlowLogFormat uses for formatting slow log.
Expand Down Expand Up @@ -1676,6 +1688,20 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string {
if s.ConnectionID != 0 {
writeSlowLogItem(&buf, SlowLogConnIDStr, strconv.FormatUint(s.ConnectionID, 10))
}
<<<<<<< HEAD
=======
if logItems.ExecRetryCount > 0 {
buf.WriteString(SlowLogRowPrefixStr)
buf.WriteString(SlowLogExecRetryTime)
buf.WriteString(SlowLogSpaceMarkStr)
buf.WriteString(strconv.FormatFloat(logItems.ExecRetryTime.Seconds(), 'f', -1, 64))
buf.WriteString(" ")
buf.WriteString(SlowLogExecRetryCount)
buf.WriteString(SlowLogSpaceMarkStr)
buf.WriteString(strconv.Itoa(int(logItems.ExecRetryCount)))
buf.WriteString("\n")
}
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
writeSlowLogItem(&buf, SlowLogQueryTimeStr, strconv.FormatFloat(logItems.TimeTotal.Seconds(), 'f', -1, 64))
writeSlowLogItem(&buf, SlowLogParseTimeStr, strconv.FormatFloat(logItems.TimeParse.Seconds(), 'f', -1, 64))
writeSlowLogItem(&buf, SlowLogCompileTimeStr, strconv.FormatFloat(logItems.TimeCompile.Seconds(), 'f', -1, 64))
Expand Down
9 changes: 9 additions & 0 deletions sessionctx/variable/session_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,10 @@ func (*testSessionSuite) TestSlowLogFormat(c *C) {
resultFields := `# Txn_start_ts: 406649736972468225
# User@Host: root[root] @ 192.168.0.1 [192.168.0.1]
# Conn_ID: 1
<<<<<<< HEAD
=======
# Exec_retry_time: 5.1 Exec_retry_count: 3
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
# Query_time: 1
# Parse_time: 0.00000001
# Compile_time: 0.00000001
Expand Down Expand Up @@ -232,6 +236,11 @@ func (*testSessionSuite) TestSlowLogFormat(c *C) {
DurationPreprocessSubQuery: 2,
PreprocessSubQueries: 2,
},
<<<<<<< HEAD
=======
ExecRetryCount: 3,
ExecRetryTime: 5*time.Second + time.Millisecond*100,
>>>>>>> 5d3cdf5d1... *: add execution retry time in slow log and slow_query and statement_summary (#19625)
}
logString := seVar.SlowLogFormat(logItems)
c.Assert(logString, Equals, resultFields+"\n"+sql)
Expand Down
13 changes: 13 additions & 0 deletions util/stmtsummary/statement_summary.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,8 @@ type stmtSummaryByDigestElement struct {
maxPrewriteRegionNum int32
sumTxnRetry int64
maxTxnRetry int
sumExecRetryCount int64
sumExecRetryTime time.Duration
sumBackoffTimes int64
backoffTypes map[fmt.Stringer]int
authUsers map[string]struct{}
Expand All @@ -173,6 +175,9 @@ type stmtSummaryByDigestElement struct {
// plan cache
planInCache bool
planCacheHits int64
// pessimistic execution retry information.
execRetryCount uint
execRetryTime time.Duration
}

// StmtExecInfo records execution information of each statement.
Expand All @@ -199,6 +204,8 @@ type StmtExecInfo struct {
IsInternal bool
Succeed bool
PlanInCache bool
ExecRetryCount uint
ExecRetryTime time.Duration
}

// newStmtSummaryByDigestMap creates an empty stmtSummaryByDigestMap.
Expand Down Expand Up @@ -754,6 +761,10 @@ func (ssElement *stmtSummaryByDigestElement) add(sei *StmtExecInfo, intervalSeco
if ssElement.lastSeen.Before(sei.StartTime) {
ssElement.lastSeen = sei.StartTime
}
if sei.ExecRetryCount > 0 {
ssElement.execRetryCount += sei.ExecRetryCount
ssElement.execRetryTime += sei.ExecRetryTime
}
}

func (ssElement *stmtSummaryByDigestElement) toDatum(ssbd *stmtSummaryByDigest) []types.Datum {
Expand Down Expand Up @@ -829,6 +840,8 @@ func (ssElement *stmtSummaryByDigestElement) toDatum(ssbd *stmtSummaryByDigest)
int(ssElement.maxPrewriteRegionNum),
avgFloat(ssElement.sumTxnRetry, ssElement.commitCount),
ssElement.maxTxnRetry,
int(ssElement.execRetryCount),
int64(ssElement.execRetryTime),
ssElement.sumBackoffTimes,
formatBackoffTypes(ssElement.backoffTypes),
avgInt(ssElement.sumMem, ssElement.execCount),
Expand Down
2 changes: 1 addition & 1 deletion util/stmtsummary/statement_summary_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ func (s *testStmtSummarySuite) TestToDatum(c *C) {
stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys,
stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize,
stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum,
stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, 1,
stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, 0, 0, 1,
"txnLock:1", stmtExecInfo1.MemMax, stmtExecInfo1.MemMax, stmtExecInfo1.DiskMax, stmtExecInfo1.DiskMax, stmtExecInfo1.StmtCtx.AffectedRows(),
t, t, 0, 0, stmtExecInfo1.OriginalSQL, stmtExecInfo1.PrevSQL, "plan_digest", ""}
match(c, datums[0], expectedDatum...)
Expand Down