Skip to content

Commit

Permalink
executor: calibrate resource support tpch10 (#47095)
Browse files Browse the repository at this point in the history
close #47094
  • Loading branch information
guo-shaoge authored Sep 22, 2023
1 parent 47bc019 commit 7a116f3
Show file tree
Hide file tree
Showing 15 changed files with 10,823 additions and 10,555 deletions.
2 changes: 1 addition & 1 deletion ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -8480,7 +8480,7 @@ func checkIgnorePlacementDDL(ctx sessionctx.Context) bool {
// AddResourceGroup implements the DDL interface, creates a resource group.
func (d *ddl) AddResourceGroup(ctx sessionctx.Context, stmt *ast.CreateResourceGroupStmt) (err error) {
groupName := stmt.ResourceGroupName
groupInfo := &model.ResourceGroupInfo{Name: groupName, ResourceGroupSettings: &model.ResourceGroupSettings{}}
groupInfo := &model.ResourceGroupInfo{Name: groupName, ResourceGroupSettings: model.NewResourceGroupSettings()}
groupInfo, err = buildResourceGroup(groupInfo, stmt.ResourceGroupOptionList)
if err != nil {
return err
Expand Down
1 change: 1 addition & 0 deletions executor/internal/calibrateresource/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ go_library(
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_tikv_client_go_v2//oracle",
"@com_github_tikv_pd_client//resource_group/controller",
],
)

Expand Down
111 changes: 101 additions & 10 deletions executor/internal/calibrateresource/calibrate_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/tikv/client-go/v2/oracle"
resourceControlClient "github.com/tikv/pd/client/resource_group/controller"
)

var (
Expand Down Expand Up @@ -252,29 +253,40 @@ func (e *Executor) dynamicCalibrate(ctx context.Context, req *chunk.Chunk, exec
if err != nil {
return err
}
tidbQuota, err1 := e.getTiDBQuota(ctx, exec, startTs, endTs)
tiflashQuota, err2 := e.getTiFlashQuota(ctx, exec, startTs, endTs)
if err1 != nil && err2 != nil {
return err1
}
req.AppendUint64(0, uint64(tidbQuota+tiflashQuota))
return nil
}

func (e *Executor) getTiDBQuota(ctx context.Context, exec sqlexec.RestrictedSQLExecutor, startTs, endTs time.Time) (float64, error) {
startTime := startTs.In(e.Ctx().GetSessionVars().Location()).Format(time.DateTime)
endTime := endTs.In(e.Ctx().GetSessionVars().Location()).Format(time.DateTime)

totalKVCPUQuota, err := getTiKVTotalCPUQuota(ctx, exec)
if err != nil {
return errNoCPUQuotaMetrics.FastGenByArgs(err.Error())
return 0, errNoCPUQuotaMetrics.FastGenByArgs(err.Error())
}
totalTiDBCPU, err := getTiDBTotalCPUQuota(ctx, exec)
if err != nil {
return errNoCPUQuotaMetrics.FastGenByArgs(err.Error())
return 0, errNoCPUQuotaMetrics.FastGenByArgs(err.Error())
}
rus, err := getRUPerSec(ctx, e.Ctx(), exec, startTime, endTime)
if err != nil {
return err
return 0, err
}
tikvCPUs, err := getComponentCPUUsagePerSec(ctx, e.Ctx(), exec, "tikv", startTime, endTime)
if err != nil {
return err
return 0, err
}
tidbCPUs, err := getComponentCPUUsagePerSec(ctx, e.Ctx(), exec, "tidb", startTime, endTime)
if err != nil {
return err
return 0, err
}

failpoint.Inject("mockMetricsDataFilter", func() {
ret := make([]*timePointValue, 0)
for _, point := range tikvCPUs.vals {
Expand Down Expand Up @@ -332,8 +344,16 @@ func (e *Executor) dynamicCalibrate(ctx context.Context, req *chunk.Chunk, exec
tidbCPUs.next()
tikvCPUs.next()
}
quota, err := setupQuotas(quotas)
if err != nil {
return 0, err
}
return quota, nil
}

func setupQuotas(quotas []float64) (float64, error) {
if len(quotas) < 2 {
return errLowUsage
return 0, errLowUsage
}
sort.Slice(quotas, func(i, j int) bool {
return quotas[i] > quotas[j]
Expand All @@ -344,9 +364,46 @@ func (e *Executor) dynamicCalibrate(ctx context.Context, req *chunk.Chunk, exec
for i := lowerBound; i < upperBound; i++ {
sum += quotas[i]
}
quota := sum / float64(upperBound-lowerBound)
req.AppendUint64(0, uint64(quota))
return nil
return sum / float64(upperBound-lowerBound), nil
}

func (e *Executor) getTiFlashQuota(ctx context.Context, exec sqlexec.RestrictedSQLExecutor, startTs, endTs time.Time) (float64, error) {
startTime := startTs.In(e.Ctx().GetSessionVars().Location()).Format(time.DateTime)
endTime := endTs.In(e.Ctx().GetSessionVars().Location()).Format(time.DateTime)

quotas := make([]float64, 0)
totalTiFlashLogicalCores, err := getTiFlashLogicalCores(ctx, exec)
if err != nil {
return 0, errNoCPUQuotaMetrics.FastGenByArgs(err.Error())
}
tiflashCPUs, err := getTiFlashCPUUsagePerSec(ctx, e.Ctx(), exec, startTime, endTime)
if err != nil {
return 0, err
}
tiflashRUs, err := getTiFlashRUPerSec(ctx, e.Ctx(), exec, startTime, endTime)
if err != nil {
return 0, err
}
for {
if tiflashRUs.isEnd() || tiflashCPUs.isEnd() {
break
}
// make time point match
maxTime := tiflashRUs.getTime()
if tiflashCPUs.getTime().After(maxTime) {
maxTime = tiflashCPUs.getTime()
}
if !tiflashRUs.advance(maxTime) || !tiflashCPUs.advance(maxTime) {
continue
}
tiflashQuota := tiflashCPUs.getValue() / totalTiFlashLogicalCores
if tiflashQuota > lowUsageThreshold {
quotas = append(quotas, tiflashRUs.getValue()/tiflashQuota)
}
tiflashRUs.next()
tiflashCPUs.next()
}
return setupQuotas(quotas)
}

func (e *Executor) staticCalibrate(ctx context.Context, req *chunk.Chunk, exec sqlexec.RestrictedSQLExecutor) error {
Expand All @@ -358,6 +415,10 @@ func (e *Executor) staticCalibrate(ctx context.Context, req *chunk.Chunk, exec s
if resourceGroupCtl == nil {
return errors.New("resource group controller is not initialized")
}
ruCfg := resourceGroupCtl.GetConfig()
if e.WorkloadType == ast.TPCH10 {
return staticCalibrateTpch10(ctx, req, exec, ruCfg)
}

totalKVCPUQuota, err := getTiKVTotalCPUQuota(ctx, exec)
if err != nil {
Expand All @@ -380,7 +441,6 @@ func (e *Executor) staticCalibrate(ctx context.Context, req *chunk.Chunk, exec s
if totalTiDBCPU/baseCost.tidbToKVCPURatio < totalKVCPUQuota {
totalKVCPUQuota = totalTiDBCPU / baseCost.tidbToKVCPURatio
}
ruCfg := resourceGroupCtl.GetConfig()
ruPerKVCPU := float64(ruCfg.ReadBaseCost)*float64(baseCost.readReqCount) +
float64(ruCfg.CPUMsCost)*baseCost.kvCPU*1000 + // convert to ms
float64(ruCfg.ReadBytesCost)*float64(baseCost.readBytes) +
Expand All @@ -391,6 +451,22 @@ func (e *Executor) staticCalibrate(ctx context.Context, req *chunk.Chunk, exec s
return nil
}

func staticCalibrateTpch10(ctx context.Context, req *chunk.Chunk, exec sqlexec.RestrictedSQLExecutor, ruCfg *resourceControlClient.RUConfig) error {
// TPCH10 only considers the resource usage of the TiFlash including cpu and read bytes. Others are ignored.
// cpu usage: 105494.666484 / 20 / 20 = 263.74
// read bytes: 401799161689.0 / 20 / 20 = 1004497904.22
const cpuTimePerCPUPerSec float64 = 263.74
const readBytesPerCPUPerSec float64 = 1004497904.22
ruPerCPU := float64(ruCfg.CPUMsCost)*cpuTimePerCPUPerSec + float64(ruCfg.ReadBytesCost)*readBytesPerCPUPerSec
totalTiFlashLogicalCores, err := getTiFlashLogicalCores(ctx, exec)
if err != nil {
return err
}
quota := totalTiFlashLogicalCores * ruPerCPU
req.AppendUint64(0, uint64(quota))
return nil
}

func getTiKVTotalCPUQuota(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) (float64, error) {
query := "SELECT SUM(value) FROM METRICS_SCHEMA.tikv_cpu_quota GROUP BY time ORDER BY time desc limit 1"
return getNumberFromMetrics(ctx, exec, query, "tikv_cpu_quota")
Expand All @@ -401,6 +477,21 @@ func getTiDBTotalCPUQuota(ctx context.Context, exec sqlexec.RestrictedSQLExecuto
return getNumberFromMetrics(ctx, exec, query, "tidb_server_maxprocs")
}

func getTiFlashLogicalCores(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) (float64, error) {
query := "SELECT SUM(value) FROM METRICS_SCHEMA.tiflash_cpu_quota GROUP BY time ORDER BY time desc limit 1"
return getNumberFromMetrics(ctx, exec, query, "tiflash_cpu_quota")
}

func getTiFlashRUPerSec(ctx context.Context, sctx sessionctx.Context, exec sqlexec.RestrictedSQLExecutor, startTime, endTime string) (*timeSeriesValues, error) {
query := fmt.Sprintf("SELECT time, value FROM METRICS_SCHEMA.tiflash_resource_manager_resource_unit where time >= '%s' and time <= '%s' ORDER BY time asc", startTime, endTime)
return getValuesFromMetrics(ctx, sctx, exec, query)
}

func getTiFlashCPUUsagePerSec(ctx context.Context, sctx sessionctx.Context, exec sqlexec.RestrictedSQLExecutor, startTime, endTime string) (*timeSeriesValues, error) {
query := fmt.Sprintf("SELECT time, sum(value) FROM METRICS_SCHEMA.tiflash_process_cpu_usage where time >= '%s' and time <= '%s' and job = 'tiflash' GROUP BY time ORDER BY time asc", startTime, endTime)
return getValuesFromMetrics(ctx, sctx, exec, query)
}

type timePointValue struct {
tp time.Time
val float64
Expand Down
119 changes: 119 additions & 0 deletions executor/internal/calibrateresource/calibrate_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,126 @@ func TestCalibrateResource(t *testing.T) {
}
tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE START_TIME '2020-02-12 10:35:00' END_TIME '2020-02-12 10:45:00'").Check(testkit.Rows("5492"))

// tiflash
mockData["resource_manager_resource_unit"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.322000"), 465919.8102127319),
types.MakeDatums(datetime("2023-09-19 19:51:39.322000"), 819764.9742611333),
types.MakeDatums(datetime("2023-09-19 19:52:39.322000"), 520180.7089147462),
types.MakeDatums(datetime("2023-09-19 19:53:39.322000"), 790496.4071700446),
types.MakeDatums(datetime("2023-09-19 19:54:39.322000"), 545216.2174551424),
types.MakeDatums(datetime("2023-09-19 19:55:39.322000"), 714332.5760632281),
types.MakeDatums(datetime("2023-09-19 19:56:39.322000"), 577119.1037253677),
types.MakeDatums(datetime("2023-09-19 19:57:39.322000"), 678005.0740038564),
types.MakeDatums(datetime("2023-09-19 19:58:39.322000"), 592239.6784597588),
types.MakeDatums(datetime("2023-09-19 19:59:39.322000"), 666552.6950822703),
types.MakeDatums(datetime("2023-09-19 20:00:39.322000"), 689703.5663975218),
}

mockData["process_cpu_usage"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.324000"), "127.0.0.1:10080", "tidb", 0.10511111111111152),
types.MakeDatums(datetime("2023-09-19 19:51:39.324000"), "127.0.0.1:10080", "tidb", 0.1293333333333332),
types.MakeDatums(datetime("2023-09-19 19:52:39.324000"), "127.0.0.1:10080", "tidb", 0.11088888888888908),
types.MakeDatums(datetime("2023-09-19 19:53:39.324000"), "127.0.0.1:10080", "tidb", 0.12333333333333357),
types.MakeDatums(datetime("2023-09-19 19:54:39.324000"), "127.0.0.1:10080", "tidb", 0.1160000000000006),
types.MakeDatums(datetime("2023-09-19 19:55:39.324000"), "127.0.0.1:10080", "tidb", 0.11888888888888813),
types.MakeDatums(datetime("2023-09-19 19:56:39.324000"), "127.0.0.1:10080", "tidb", 0.1106666666666658),
types.MakeDatums(datetime("2023-09-19 19:57:39.324000"), "127.0.0.1:10080", "tidb", 0.11311111111111055),
types.MakeDatums(datetime("2023-09-19 19:58:39.324000"), "127.0.0.1:10080", "tidb", 0.11222222222222247),
types.MakeDatums(datetime("2023-09-19 19:59:39.324000"), "127.0.0.1:10080", "tidb", 0.11488888888888923),
types.MakeDatums(datetime("2023-09-19 20:00:39.324000"), "127.0.0.1:10080", "tidb", 0.12733333333333371),
types.MakeDatums(datetime("2023-09-19 19:50:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:51:39.325000"), "127.0.0.1:20180", "tikv", 0.02222222222222222),
types.MakeDatums(datetime("2023-09-19 19:52:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:53:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:54:39.325000"), "127.0.0.1:20180", "tikv", 0.08888888888888888),
types.MakeDatums(datetime("2023-09-19 19:55:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:56:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:57:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:58:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 19:59:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
types.MakeDatums(datetime("2023-09-19 20:00:39.325000"), "127.0.0.1:20180", "tikv", 0.04444444444444444),
}

mockData["tidb_server_maxprocs"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:51:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:52:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:53:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2022-09-19 19:54:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:55:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:56:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:57:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:58:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 19:59:39.329000"), "127.0.0.1:10080", 20.0),
types.MakeDatums(datetime("2023-09-19 20:00:39.329000"), "127.0.0.1:10080", 20.0),
}

mockData["tikv_cpu_quota"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:51:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:52:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:53:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:54:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:55:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:56:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:57:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:58:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 19:59:39.330000"), "127.0.0.1:20180", 20.0),
types.MakeDatums(datetime("2023-09-19 20:00:39.330000"), "127.0.0.1:20180", 20.0),
}
rs, err = tk.Exec("CALIBRATE RESOURCE START_TIME '2023-09-19 19:50:39' DURATION '10m'")
require.NoError(t, err)
require.NotNil(t, rs)
err = rs.Next(ctx, rs.NewChunk(nil))
require.ErrorContains(t, err, "The workload in selected time window is too low")

mockData["tiflash_process_cpu_usage"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.327000"), "127.0.0.1:20292", "tiflash", 18.577777777777776),
types.MakeDatums(datetime("2023-09-19 19:51:39.327000"), "127.0.0.1:20292", "tiflash", 17.666666666666668),
types.MakeDatums(datetime("2023-09-19 19:52:39.327000"), "127.0.0.1:20292", "tiflash", 18.339038812074868),
types.MakeDatums(datetime("2023-09-19 19:53:39.327000"), "127.0.0.1:20292", "tiflash", 17.82222222222222),
types.MakeDatums(datetime("2023-09-19 19:54:39.327000"), "127.0.0.1:20292", "tiflash", 18.177777777777774),
types.MakeDatums(datetime("2023-09-19 19:55:39.327000"), "127.0.0.1:20292", "tiflash", 17.911111111111108),
types.MakeDatums(datetime("2023-09-19 19:56:39.327000"), "127.0.0.1:20292", "tiflash", 17.177777777777774),
types.MakeDatums(datetime("2023-09-19 19:57:39.327000"), "127.0.0.1:20292", "tiflash", 16.17957550838982),
types.MakeDatums(datetime("2023-09-19 19:58:39.327000"), "127.0.0.1:20292", "tiflash", 16.844444444444445),
types.MakeDatums(datetime("2023-09-19 19:59:39.327000"), "127.0.0.1:20292", "tiflash", 17.71111111111111),
types.MakeDatums(datetime("2023-09-19 20:00:39.327000"), "127.0.0.1:20292", "tiflash", 18.066666666666666),
}

mockData["tiflash_resource_manager_resource_unit"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.318000"), 487049.3164728853),
types.MakeDatums(datetime("2023-09-19 19:51:39.318000"), 821600.8181867122),
types.MakeDatums(datetime("2023-09-19 19:52:39.318000"), 507566.26041673025),
types.MakeDatums(datetime("2023-09-19 19:53:39.318000"), 771038.8122556474),
types.MakeDatums(datetime("2023-09-19 19:54:39.318000"), 529128.4530634031),
types.MakeDatums(datetime("2023-09-19 19:55:39.318000"), 777912.9275530444),
types.MakeDatums(datetime("2023-09-19 19:56:39.318000"), 557595.6206041124),
types.MakeDatums(datetime("2023-09-19 19:57:39.318000"), 688658.1706168016),
types.MakeDatums(datetime("2023-09-19 19:58:39.318000"), 556400.2766714202),
types.MakeDatums(datetime("2023-09-19 19:59:39.318000"), 712467.4348424983),
types.MakeDatums(datetime("2023-09-19 20:00:39.318000"), 659167.0340155548),
}

mockData["tiflash_cpu_quota"] = [][]types.Datum{
types.MakeDatums(datetime("2023-09-19 19:50:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:51:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:52:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:53:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:54:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:55:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:56:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:57:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:58:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 19:59:39.502000"), "127.0.0.1:8234 ", 20.0),
types.MakeDatums(datetime("2023-09-19 20:00:39.502000"), "127.0.0.1:8234 ", 20.0),
}
tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE START_TIME '2023-09-19 19:50:39' DURATION '10m'").Check(testkit.Rows("729439"))

delete(mockData, "process_cpu_usage")
tk.MustQueryWithContext(ctx, "CALIBRATE RESOURCE START_TIME '2020-02-12 10:35:00' END_TIME '2020-02-12 10:45:00'").Check(testkit.Rows("729439"))

delete(mockData, "tiflash_process_cpu_usage")
rs, err = tk.Exec("CALIBRATE RESOURCE START_TIME '2020-02-12 10:35:00' END_TIME '2020-02-12 10:45:00'")
require.NoError(t, err)
require.NotNil(t, rs)
Expand Down
1 change: 1 addition & 0 deletions executor/internal/mpp/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ go_library(
"//kv",
"//planner/core",
"//sessionctx",
"//sessionctx/variable",
"//store/copr",
"//store/driver/backoff",
"//store/driver/error",
Expand Down
7 changes: 6 additions & 1 deletion executor/internal/mpp/local_mpp_coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/pingcap/tidb/kv"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/copr"
"github.com/pingcap/tidb/store/driver/backoff"
derr "github.com/pingcap/tidb/store/driver/error"
Expand Down Expand Up @@ -215,6 +216,10 @@ func (c *localMppCoordinator) appendMPPDispatchReq(pf *plannercore.Fragment) err
zap.String("exchange-compression-mode", pf.ExchangeSender.CompressionMode.Name()),
zap.Uint64("GatherID", c.gatherID),
)
rgName := c.sessionCtx.GetSessionVars().ResourceGroupName
if !variable.EnableResourceControl.Load() {
rgName = ""
}
req := &kv.MPPDispatchRequest{
Data: pbData,
Meta: mppTask.Meta,
Expand All @@ -229,7 +234,7 @@ func (c *localMppCoordinator) appendMPPDispatchReq(pf *plannercore.Fragment) err
CoordinatorAddress: c.coordinatorAddr,
ReportExecutionSummary: c.reportExecutionInfo,
State: kv.MppTaskReady,
ResourceGroupName: c.sessionCtx.GetSessionVars().ResourceGroupName,
ResourceGroupName: rgName,
}
c.reqMap[req.ID] = &mppRequestReport{mppReq: req, receivedReport: false, errMsg: "", executionSummaries: nil}
c.mppReqs = append(c.mppReqs, req)
Expand Down
11 changes: 11 additions & 0 deletions infoschema/metric_table_def.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,17 @@ var MetricTableMap = map[string]MetricTableDef{
PromQL: "rate(process_cpu_seconds_total{$LABEL_CONDITIONS}[$RANGE_DURATION])",
Labels: []string{"instance", "job"},
},
"tiflash_process_cpu_usage": {
PromQL: "rate(tiflash_proxy_process_cpu_seconds_total{$LABEL_CONDITIONS}[$RANGE_DURATION])",
Labels: []string{"instance", "job"},
},
"tiflash_cpu_quota": {
PromQL: "tiflash_system_current_metric_LogicalCPUCores{$LABEL_CONDITIONS}",
Labels: []string{"instance"},
},
"tiflash_resource_manager_resource_unit": {
PromQL: "sum(rate(tiflash_compute_request_unit[$RANGE_DURATION]))",
},
"tidb_connection_count": {
PromQL: "tidb_server_connections{$LABEL_CONDITIONS}",
Labels: []string{"instance"},
Expand Down
Loading

0 comments on commit 7a116f3

Please sign in to comment.