@@ -345,8 +345,9 @@ func (c *RPCClient) closeConns() {
345
345
}
346
346
347
347
var (
348
- sendReqHistCache sync.Map
349
- sendReqCounterCache sync.Map
348
+ sendReqHistCache sync.Map
349
+ sendReqCounterCache sync.Map
350
+ rpcNetLatencyHistmCache sync.Map
350
351
)
351
352
352
353
type sendReqHistCacheKey struct {
@@ -365,43 +366,65 @@ type sendReqCounterCacheValue struct {
365
366
timeCounter prometheus.Counter
366
367
}
367
368
368
- func (c * RPCClient ) updateTiKVSendReqHistogram (req * tikvrpc.Request , start time.Time , staleRead bool ) {
369
+ func (c * RPCClient ) updateTiKVSendReqHistogram (req * tikvrpc.Request , resp * tikvrpc.Response , start time.Time , staleRead bool ) {
370
+ elapsed := time .Since (start )
371
+ secs := elapsed .Seconds ()
372
+ storeID := req .Context .GetPeer ().GetStoreId ()
373
+
369
374
histKey := sendReqHistCacheKey {
370
375
req .Type ,
371
- req . Context . GetPeer (). GetStoreId () ,
376
+ storeID ,
372
377
staleRead ,
373
378
}
374
379
counterKey := sendReqCounterCacheKey {
375
380
histKey ,
376
381
req .GetRequestSource (),
377
382
}
378
383
384
+ reqType := req .Type .String ()
385
+ var storeIDStr string
386
+
379
387
hist , ok := sendReqHistCache .Load (histKey )
380
388
if ! ok {
381
- reqType := req .Type .String ()
382
- storeID := strconv .FormatUint (req .Context .GetPeer ().GetStoreId (), 10 )
383
- hist = metrics .TiKVSendReqHistogram .WithLabelValues (reqType , storeID , strconv .FormatBool (staleRead ))
389
+ if len (storeIDStr ) == 0 {
390
+ storeIDStr = strconv .FormatUint (storeID , 10 )
391
+ }
392
+ hist = metrics .TiKVSendReqHistogram .WithLabelValues (reqType , storeIDStr , strconv .FormatBool (staleRead ))
384
393
sendReqHistCache .Store (histKey , hist )
385
394
}
386
395
counter , ok := sendReqCounterCache .Load (counterKey )
387
396
if ! ok {
388
- reqType := req .Type .String ()
389
- storeID := strconv .FormatUint (req .Context .GetPeer ().GetStoreId (), 10 )
397
+ if len (storeIDStr ) == 0 {
398
+ storeIDStr = strconv .FormatUint (storeID , 10 )
399
+ }
390
400
counter = sendReqCounterCacheValue {
391
- metrics .TiKVSendReqCounter .WithLabelValues (reqType , storeID , strconv .FormatBool (staleRead ), counterKey .requestSource ),
392
- metrics .TiKVSendReqTimeCounter .WithLabelValues (reqType , storeID , strconv .FormatBool (staleRead ), counterKey .requestSource ),
401
+ metrics .TiKVSendReqCounter .WithLabelValues (reqType , storeIDStr , strconv .FormatBool (staleRead ), counterKey .requestSource ),
402
+ metrics .TiKVSendReqTimeCounter .WithLabelValues (reqType , storeIDStr , strconv .FormatBool (staleRead ), counterKey .requestSource ),
393
403
}
394
404
sendReqCounterCache .Store (counterKey , counter )
395
405
}
396
406
397
- secs := time .Since (start ).Seconds ()
398
407
hist .(prometheus.Observer ).Observe (secs )
399
408
counter .(sendReqCounterCacheValue ).counter .Inc ()
400
409
counter .(sendReqCounterCacheValue ).timeCounter .Add (secs )
410
+
411
+ if execDetail , err := resp .GetExecDetailsV2 (); err == nil &&
412
+ execDetail != nil && execDetail .TimeDetail != nil && execDetail .TimeDetail .TotalRpcWallTimeNs > 0 {
413
+ latHist , ok := rpcNetLatencyHistmCache .Load (storeID )
414
+ if ! ok {
415
+ if len (storeIDStr ) == 0 {
416
+ storeIDStr = strconv .FormatUint (storeID , 10 )
417
+ }
418
+ latHist = metrics .TiKVRPCNetLatencyHistogram .WithLabelValues (storeIDStr )
419
+ sendReqHistCache .Store (storeID , latHist )
420
+ }
421
+ latency := elapsed - time .Duration (execDetail .TimeDetail .TotalRpcWallTimeNs )* time .Nanosecond
422
+ latHist .(prometheus.Observer ).Observe (latency .Seconds ())
423
+ }
401
424
}
402
425
403
426
// SendRequest sends a Request to server and receives Response.
404
- func (c * RPCClient ) SendRequest (ctx context.Context , addr string , req * tikvrpc.Request , timeout time.Duration ) (* tikvrpc.Response , error ) {
427
+ func (c * RPCClient ) SendRequest (ctx context.Context , addr string , req * tikvrpc.Request , timeout time.Duration ) (resp * tikvrpc.Response , err error ) {
405
428
if span := opentracing .SpanFromContext (ctx ); span != nil && span .Tracer () != nil {
406
429
span1 := span .Tracer ().StartSpan (fmt .Sprintf ("rpcClient.SendRequest, region ID: %d, type: %s" , req .RegionId , req .Type ), opentracing .ChildOf (span .Context ()))
407
430
defer span1 .Finish ()
@@ -428,7 +451,7 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R
428
451
detail := stmtExec .(* util.ExecDetails )
429
452
atomic .AddInt64 (& detail .WaitKVRespDuration , int64 (time .Since (start )))
430
453
}
431
- c .updateTiKVSendReqHistogram (req , start , staleRead )
454
+ c .updateTiKVSendReqHistogram (req , resp , start , staleRead )
432
455
}()
433
456
434
457
// TiDB RPC server supports batch RPC, but batch connection will send heart beat, It's not necessary since
0 commit comments