-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathblock-comment-for-block-and-drivers-for-linux-5.3.patch
7748 lines (7262 loc) · 338 KB
/
block-comment-for-block-and-drivers-for-linux-5.3.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
From c2ddb6621deaea914d312cbddefa201ca7cb385a Mon Sep 17 00:00:00 2001
From: Dongli Zhang <dongli.zhang0129@gmail.com>
Date: Sun, 1 Dec 2019 23:44:29 -0800
Subject: [PATCH 1/1] block comment for block and drivers for linux-5.3
Signed-off-by: Dongli Zhang <dongli.zhang0129@gmail.com>
---
block/badblocks.c | 26 ++
block/bio-integrity.c | 8 +
block/bio.c | 43 +++
block/blk-cgroup.c | 34 ++
block/blk-core.c | 9 +
block/blk-flush.c | 97 +++++
block/blk-integrity.c | 13 +
block/blk-iolatency.c | 243 +++++++++++++
block/blk-map.c | 62 ++++
block/blk-merge.c | 55 +++
block/blk-mq-cpumap.c | 41 +++
block/blk-mq-pci.c | 32 ++
block/blk-mq-rdma.c | 18 +
block/blk-mq-sched.c | 15 +
block/blk-mq-tag.c | 113 ++++++
block/blk-mq-tag.h | 38 ++
block/blk-mq-virtio.c | 31 ++
block/blk-mq.c | 655 ++++++++++++++++++++++++++++++++++
block/blk-mq.h | 44 +++
block/blk-rq-qos.c | 31 ++
block/blk-rq-qos.h | 28 ++
block/blk-settings.c | 17 +
block/blk-softirq.c | 46 +++
block/blk-stat.c | 14 +
block/blk-throttle.c | 4 +
block/blk-timeout.c | 42 +++
block/blk-wbt.c | 10 +
block/blk.h | 16 +
block/bounce.c | 12 +
block/partitions/check.c | 4 +
drivers/block/virtio_blk.c | 86 +++++
drivers/nvme/host/core.c | 16 +
drivers/nvme/host/fabrics.c | 11 +
drivers/nvme/host/fabrics.h | 9 +
drivers/nvme/host/fault_inject.c | 4 +
drivers/nvme/host/nvme.h | 27 ++
drivers/nvme/host/pci.c | 6 +
drivers/nvme/host/rdma.c | 687 ++++++++++++++++++++++++++++++++++++
drivers/nvme/target/core.c | 67 ++++
drivers/nvme/target/io-cmd-file.c | 35 ++
drivers/nvme/target/loop.c | 56 +++
drivers/nvme/target/nvmet.h | 11 +
drivers/scsi/fcoe/fcoe.c | 10 +
drivers/scsi/fcoe/fcoe_transport.c | 5 +
drivers/scsi/scsi_transport_fc.c | 22 ++
drivers/scsi/scsi_transport_iscsi.c | 10 +
fs/block_dev.c | 14 +
fs/direct-io.c | 21 ++
fs/iomap/direct-io.c | 7 +
include/linux/bio.h | 179 ++++++++++
include/linux/blk-cgroup.h | 10 +
include/linux/blk-mq.h | 321 +++++++++++++++++
include/linux/blk_types.h | 114 ++++++
include/linux/blkdev.h | 218 ++++++++++++
include/linux/bvec.h | 136 +++++++
include/linux/fs.h | 12 +
include/linux/percpu-refcount.h | 63 ++++
include/linux/uio.h | 26 ++
include/rdma/rdma_cm.h | 12 +
lib/iov_iter.c | 27 ++
lib/sbitmap.c | 25 ++
61 files changed, 4048 insertions(+)
diff --git a/block/badblocks.c b/block/badblocks.c
index 2e5f569..e1c1eac 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -50,6 +50,13 @@
* -1: there are bad blocks which have not yet been acknowledged in metadata.
* plus the start/length of the first bad section we overlap.
*/
+/*
+ * called by:
+ * - drivers/block/null_blk_main.c|1176| <<null_handle_cmd>> if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
+ * - drivers/md/md.h|214| <<is_badblock>> int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
+ * - drivers/nvdimm/nd.h|425| <<is_bad_pmem>> return !!badblocks_check(bb, sector, len / 512, &first_bad,
+ * - drivers/nvdimm/pfn_devs.c|383| <<nd_pfn_clear_memmap_errors>> bb_present = badblocks_check(&nd_region->bb, meta_start,
+ */
int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
@@ -160,6 +167,14 @@ static void badblocks_update_acked(struct badblocks *bb)
* 0: success
* 1: failed to set badblocks (out of space)
*/
+/*
+ * called by:
+ * - block/badblocks.c|537| <<badblocks_store>> if (badblocks_set(bb, sector, length, !unack))
+ * - drivers/block/null_blk_main.c|384| <<nullb_device_badblocks_store>> ret = badblocks_set(&t_dev->badblocks, start,
+ * - drivers/md/md.c|1637| <<super_1_load>> if (badblocks_set(&rdev->badblocks, sector, count, 1))
+ * - drivers/md/md.c|9137| <<rdev_set_badblocks>> rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
+ * - drivers/nvdimm/badrange.c|170| <<set_badblock>> if (badblocks_set(bb, s, num, 1))
+ */
int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
int acknowledged)
{
@@ -514,6 +529,12 @@ EXPORT_SYMBOL_GPL(badblocks_show);
* Return:
* Length of the buffer processed or -ve error.
*/
+/*
+ * called by:
+ * - block/genhd.c|851| <<disk_badblocks_store>> return badblocks_store(disk->bb, page, len, 0);
+ * - drivers/md/md.c|3322| <<bb_store>> int rv = badblocks_store(&rdev->badblocks, page, len, 0);
+ * - drivers/md/md.c|3337| <<ubb_store>> return badblocks_store(&rdev->badblocks, page, len, 1);
+ */
ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
int unack)
{
@@ -572,6 +593,11 @@ static int __badblocks_init(struct device *dev, struct badblocks *bb,
* 0: success
* -ve errno: on error
*/
+/*
+ * called by:
+ * - drivers/block/null_blk_main.c|514| <<null_alloc_dev>> if (badblocks_init(&dev->badblocks, 0)) {
+ * - drivers/md/md.c|3503| <<md_rdev_init>> return badblocks_init(&rdev->badblocks, 0);
+ */
int badblocks_init(struct badblocks *bb, int enable)
{
return __badblocks_init(NULL, bb, enable);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index fb95dbb..6bb2874 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -34,6 +34,14 @@ void blk_flush_integrity(void)
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
+/*
+ * called by:
+ * - block/bio-integrity.c|249| <<bio_integrity_prep>> bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
+ * - block/bio-integrity.c|414| <<bio_integrity_clone>> bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
+ * - drivers/md/dm-crypt.c|1003| <<dm_crypt_integrity_io_alloc>> bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
+ * - drivers/nvme/host/core.c|838| <<nvme_add_user_metadata>> bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+ * - drivers/target/target_core_iblock.c|641| <<iblock_alloc_bip>> bip = bio_integrity_alloc(bio, GFP_NOIO,
+ */
struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
gfp_t gfp_mask,
unsigned int nr_vecs)
diff --git a/block/bio.c b/block/bio.c
index 299a0e7..9d766b1 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -298,6 +298,11 @@ void bio_reset(struct bio *bio)
}
EXPORT_SYMBOL(bio_reset);
+/*
+ * called by:
+ * - block/bio.c|313| <<bio_chain_endio>> bio_endio(__bio_chain_endio(bio));
+ * - block/bio.c|1816| <<bio_endio>> bio = __bio_chain_endio(bio);
+ */
static struct bio *__bio_chain_endio(struct bio *bio)
{
struct bio *parent = bio->bi_private;
@@ -570,6 +575,9 @@ EXPORT_SYMBOL(bio_put);
*
* Caller must ensure that @bio_src is not freed before @bio.
*/
+/*
+ * cloned bio must not modify vec list
+ */
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
{
BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
@@ -586,7 +594,13 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
+ /*
+ * 类型是struct bvec_iter bi_iter;
+ */
bio->bi_iter = bio_src->bi_iter;
+ /*
+ * 类型是struct bio_vec *bi_io_vec;
+ */
bio->bi_io_vec = bio_src->bi_io_vec;
bio_clone_blkg_association(bio, bio_src);
@@ -602,10 +616,17 @@ EXPORT_SYMBOL(__bio_clone_fast);
*
* Like __bio_clone_fast, only also allocates the returned bio
*/
+/*
+ * cloned bio must not modify vec list
+ */
struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
{
struct bio *b;
+ /*
+ * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ * backed by the @bs's mempool
+ */
b = bio_alloc_bioset(gfp_mask, 0, bs);
if (!b)
return NULL;
@@ -1348,6 +1369,10 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
+/*
+ * called by:
+ * - block/blk-map.c|75| <<__blk_rq_map_user_iov>> bio = bio_map_user_iov(q, iter, gfp_mask);
+ */
struct bio *bio_map_user_iov(struct request_queue *q,
struct iov_iter *iter,
gfp_t gfp_mask)
@@ -1356,6 +1381,9 @@ struct bio *bio_map_user_iov(struct request_queue *q,
struct bio *bio;
int ret;
+ /*
+ * 返回iov_iter->count
+ */
if (!iov_iter_count(iter))
return ERR_PTR(-EINVAL);
@@ -1363,6 +1391,9 @@ struct bio *bio_map_user_iov(struct request_queue *q,
if (!bio)
return ERR_PTR(-ENOMEM);
+ /*
+ * 只要iov_iter->count不为0
+ */
while (iov_iter_count(iter)) {
struct page **pages;
ssize_t bytes;
@@ -1409,6 +1440,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
break;
}
+ /*
+ * 在以下使用BIO_USER_MAPPED:
+ * - block/bio.c|1424| <<bio_map_user_iov>> bio_set_flag(bio, BIO_USER_MAPPED);
+ * - block/blk-map.c|64| <<__blk_rq_unmap_user>> if (bio_flagged(bio, BIO_USER_MAPPED))
+ * - block/blk-map.c|168| <<blk_rq_map_user_iov>> if (!bio_flagged(bio, BIO_USER_MAPPED))
+ */
bio_set_flag(bio, BIO_USER_MAPPED);
/*
@@ -1845,6 +1882,12 @@ EXPORT_SYMBOL(bio_endio);
* to @bio's bi_io_vec; it is the caller's responsibility to ensure that
* @bio is not freed before the split.
*/
+/*
+ * 制作一个新的bio (split)
+ * split是从start到参数的sectors
+ * bio则变成从参数的sectors到最后
+ * 返回split
+ */
struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
{
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 55a7dc2..4a68a57 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1165,6 +1165,10 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
* RETURNS:
* 0 on success, -errno on failure.
*/
+/*
+ * called by:
+ * - block/blk-core.c|535| <<blk_alloc_queue_node>> if (blkcg_init_queue(q))
+ */
int blkcg_init_queue(struct request_queue *q)
{
struct blkcg_gq *new_blkg, *blkg;
@@ -1295,6 +1299,21 @@ static void blkcg_exit(struct task_struct *tsk)
tsk->throttle_queue = NULL;
}
+/*
+ * 在以下使用io_cgrp_subsys:
+ * - block/blk-cgroup.c|1321| <<global>> EXPORT_SYMBOL_GPL(io_cgrp_subsys);
+ * - block/bfq-cgroup.c|495| <<bfq_cpd_init>> d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
+ * - block/bio.c|2137| <<bio_associate_blkg_from_page>> css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+ * - block/blk-cgroup.c|1497| <<blkcg_policy_register>> WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
+ * - block/blk-cgroup.c|1500| <<blkcg_policy_register>> WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
+ * - block/blk-throttle.c|298| <<tg_bps_limit>> if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
+ * - block/blk-throttle.c|328| <<tg_iops_limit>> if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
+ * - block/blk-throttle.c|537| <<throtl_pd_init>> if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
+ * - block/blk-throttle.c|1389| <<tg_conf_updated>> if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
+ * - include/linux/backing-dev.h|253| <<inode_cgwb_enabled>> cgroup_subsys_on_dfl(io_cgrp_subsys) &&
+ * - mm/backing-dev.c|535| <<cgwb_create>> blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
+ * - mm/backing-dev.c|660| <<wb_get_create>> &io_cgrp_subsys);
+ */
struct cgroup_subsys io_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline,
@@ -1436,6 +1455,12 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
* Register @pol with blkcg core. Might sleep and @pol may be modified on
* successful registration. Returns 0 on success and -errno on failure.
*/
+/*
+ * called by:
+ * - block/bfq-iosched.c|6762| <<bfq_init>> ret = blkcg_policy_register(&blkcg_policy_bfq);
+ * - block/blk-iolatency.c|1045| <<iolatency_init>> return blkcg_policy_register(&blkcg_policy_iolatency);
+ * - block/blk-throttle.c|2482| <<throtl_init>> return blkcg_policy_register(&blkcg_policy_throtl);
+ */
int blkcg_policy_register(struct blkcg_policy *pol)
{
struct blkcg *blkcg;
@@ -1548,6 +1573,10 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
+/*
+ * called by:
+ * - include/linux/blk-cgroup.h|714| <<blkcg_punt_bio_submit>> return __blkcg_punt_bio_submit(bio);
+ */
bool __blkcg_punt_bio_submit(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
@@ -1737,6 +1766,11 @@ void blkcg_maybe_throttle_current(void)
* throttle once. If the task needs to be throttled again it'll need to be
* re-set at the next time we see the task.
*/
+/*
+ * called by:
+ * - block/blk-iolatency.c|289| <<__blkcg_iolatency_throttle>> blkcg_schedule_throttle(rqos->q, use_memdelay);
+ * - mm/swapfile.c|3753| <<mem_cgroup_throttle_swaprate>> blkcg_schedule_throttle(bdev_get_queue(si->bdev),
+ */
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
{
if (unlikely(current->flags & PF_KTHREAD))
diff --git a/block/blk-core.c b/block/blk-core.c
index d0cc6e1..bb92ff5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -305,6 +305,15 @@ void blk_put_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_put_queue);
+/*
+ * called by:
+ * - block/blk-core.c|338| <<blk_cleanup_queue>> blk_set_queue_dying(q);
+ * - drivers/block/mtip32xx/mtip32xx.c|4216| <<mtip_pci_remove>> blk_set_queue_dying(dd->queue);
+ * - drivers/block/rbd.c|7237| <<do_rbd_remove>> blk_set_queue_dying(rbd_dev->disk->queue);
+ * - drivers/md/dm.c|2377| <<__dm_destroy>> blk_set_queue_dying(md->queue);
+ * - drivers/nvme/host/core.c|107| <<nvme_set_queue_dying>> blk_set_queue_dying(ns->queue);
+ * - drivers/nvme/host/multipath.c|673| <<nvme_mpath_remove_disk>> blk_set_queue_dying(head->disk->queue);
+ */
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index aedd932..513f347 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -75,6 +75,37 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
+/*
+ * 硬盘在控制器上的一块内存芯片,其类型一般以SDRAM为主,具有极快的存取速度,
+ * 它是硬盘内部存储和外界接口之间的缓冲器.由于硬盘的内部数据传输速度和外界
+ * 介面传输速度不同,缓存在其中起到一个缓冲的作用.缓存的大小与速度是直接关
+ * 系到硬盘的传输速度的重要因素,能够大幅度地提高硬盘整体性能.
+ *
+ * 如果硬盘的cache启用了,那么很有可能写入的数据是写到了硬盘的cache中,而没
+ * 有真正写到磁盘介质上.
+ *
+ * 在linux下,查看磁盘cache是否开启可通过hdparm命令:
+ *
+ * #hdparm -W /dev/sdx //是否开启cache,1为enable
+ * #hdparm -W 0 /dev/sdx //关闭cache
+ * #hdparm -W 1 /dev/sdx //enable cache
+ *
+ * REQ_FUA : forced unit access,绕过磁盘cache,直接把数据写到磁盘介质中.
+ * REQ_PREFLUSH: request for cache flush, 表示把磁盘cache中的data刷新到磁盘介质中,防止掉电丢失
+ *
+ * blk_insert_flush()是非常重要的入口!
+ */
+
+/*
+ * 冲刷的过程中request_queue为什么要使用双缓冲队列来存放fs_request?
+ *
+ * 双缓冲队列可以做到只执行一次冲刷请求就可以完成多个fs_request的冲刷要求.队列自
+ * 带的冲刷request在执行的过程中,blk_insert_flush()可以被调用多次,来自上层的
+ * fs_request被添加到pending1队列,等待冲刷request的下一次执行,当冲刷requst可以再
+ * 次被执行时,pending1队列不再接收新的fs_request(fs_request被加入到pending2队列),
+ * 冲刷request执行完毕后,pending1队列所有的fs_request的PREFLUSH/POSTFLUSH执行完毕.
+ */
+
/* PREFLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
@@ -95,13 +126,37 @@ enum {
static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, unsigned int flags);
+/*
+ * called by:
+ * - block/blk-flush.c|358| <<blk_insert_flush>> unsigned int policy = blk_flush_policy(fflags, rq);
+ */
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
+ /*
+ * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
+ * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
+ * that the device cache should be flushed before the data is executed, and
+ * REQ_FUA means that the data must be on non-volatile media on request
+ * completion.
+ *
+ * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
+ * difference. The requests are either completed immediately if there's no data
+ * or executed as normal requests otherwise.
+ *
+ * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
+ * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
+ *
+ * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
+ * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
+ */
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
+ /*
+ * QUEUE_FLAG_WC: Write back caching
+ */
if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_PREFLUSH)
policy |= REQ_FSEQ_PREFLUSH;
@@ -313,6 +368,10 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
blk_flush_queue_rq(flush_rq, false);
}
+/*
+ * 在以下使用:
+ * - block/blk-flush.c|409| <<blk_insert_flush>> rq->end_io = mq_flush_data_end_io;
+ */
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
{
struct request_queue *q = rq->q;
@@ -346,6 +405,11 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*/
+/*
+ * called by:
+ * - block/blk-mq-sched.c|388| <<blk_mq_sched_insert_request>> blk_insert_flush(rq);
+ * - block/blk-mq.c|2001| <<blk_mq_make_request>> blk_insert_flush(rq);
+ */
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -419,6 +483,31 @@ void blk_insert_flush(struct request *rq)
* room for storing the error offset in case of a flush error, if they
* wish to.
*/
+/*
+ * called by:
+ * - drivers/md/dm-integrity.c|2536| <<bitmap_flush_work>> blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+ * - drivers/md/dm-zoned-metadata.c|660| <<dmz_write_sb>> ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ * - drivers/md/dm-zoned-metadata.c|701| <<dmz_write_dirty_mblocks>> ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ * - drivers/md/dm-zoned-metadata.c|770| <<dmz_flush_metadata>> ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
+ * - drivers/md/raid5-ppl.c|1040| <<ppl_recover>> ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
+ * - drivers/nvme/target/io-cmd-bdev.c|221| <<nvmet_bdev_flush>> if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ * - fs/block_dev.c|689| <<blkdev_fsync>> error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
+ * - fs/ext4/fsync.c|157| <<ext4_sync_file>> err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/ext4/ialloc.c|1427| <<ext4_init_inode_table>> blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
+ * - fs/ext4/super.c|5142| <<ext4_sync_fs>> err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/fat/file.c|207| <<fat_file_fsync>> return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/hfsplus/inode.c|343| <<hfsplus_file_fsync>> blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/hfsplus/super.c|242| <<hfsplus_sync_fs>> blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/jbd2/checkpoint.c|417| <<jbd2_cleanup_journal_tail>> blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+ * - fs/jbd2/commit.c|781| <<jbd2_journal_commit_transaction>> blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+ * - fs/jbd2/commit.c|885| <<jbd2_journal_commit_transaction>> blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
+ * - fs/jbd2/recovery.c|289| <<jbd2_journal_recover>> err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ * - fs/libfs.c|1040| <<generic_file_fsync>> return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/nilfs2/the_nilfs.h|378| <<nilfs_flush_device>> err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL);
+ * - fs/ocfs2/file.c|197| <<ocfs2_sync_file>> ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/reiserfs/file.c|168| <<reiserfs_sync_file>> blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ * - fs/xfs/xfs_super.c|658| <<xfs_blkdev_issue_flush>> blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
+ */
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector)
{
@@ -461,6 +550,10 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
}
EXPORT_SYMBOL(blkdev_issue_flush);
+/*
+ * called by:
+ * - block/blk-mq.c|2563| <<blk_mq_alloc_hctx>> hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+ */
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags)
{
@@ -490,6 +583,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
return NULL;
}
+/*
+ * called by:
+ * - block/blk-mq-sysfs.c|43| <<blk_mq_hw_sysfs_release>> blk_free_flush_queue(hctx->fq);
+ */
void blk_free_flush_queue(struct blk_flush_queue *fq)
{
/* bio based request queue hasn't flush queue */
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index ca39b46..5dbeee2 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -385,6 +385,15 @@ static const struct blk_integrity_profile nop_profile = {
* struct with values appropriate for the underlying hardware. See
* Documentation/block/data-integrity.rst.
*/
+/*
+ * called by:
+ * - drivers/md/dm-integrity.c|3140| <<dm_integrity_set>> blk_integrity_register(disk, &bi);
+ * - drivers/md/dm-table.c|1265| <<dm_table_register_integrity>> blk_integrity_register(dm_disk(md),
+ * - drivers/md/md.c|2185| <<md_integrity_register>> blk_integrity_register(mddev->gendisk,
+ * - drivers/nvdimm/core.c|412| <<nd_integrity_init>> blk_integrity_register(disk, &bi);
+ * - drivers/nvme/host/core.c|1537| <<nvme_init_integrity>> blk_integrity_register(disk, &integrity);
+ * - drivers/scsi/sd_dif.c|81| <<sd_dif_config_host>> blk_integrity_register(disk, &bi);
+ */
void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{
struct blk_integrity *bi = &disk->queue->integrity;
@@ -415,6 +424,10 @@ void blk_integrity_unregister(struct gendisk *disk)
}
EXPORT_SYMBOL(blk_integrity_unregister);
+/*
+ * called by:
+ * - block/genhd.c|747| <<__device_add_disk>> blk_integrity_add(disk);
+ */
void blk_integrity_add(struct gendisk *disk)
{
if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 0fff7b5..67791d3 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -78,6 +78,19 @@
#include "blk-stat.h"
#include "blk.h"
+/*
+ * 在以下使用DEFAULT_SCALE_COOKIE:
+ * - block/blk-iolatency.c|442| <<scale_cookie_change>> if (old < DEFAULT_SCALE_COOKIE)
+ * - block/blk-iolatency.c|443| <<scale_cookie_change>> diff = DEFAULT_SCALE_COOKIE - old;
+ * - block/blk-iolatency.c|446| <<scale_cookie_change>> if (scale + old > DEFAULT_SCALE_COOKIE)
+ * - block/blk-iolatency.c|448| <<scale_cookie_change>> DEFAULT_SCALE_COOKIE);
+ * - block/blk-iolatency.c|567| <<check_scale_change>> if (cur_cookie == DEFAULT_SCALE_COOKIE) {
+ * - block/blk-iolatency.c|670| <<iolatency_check_latencies>> atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
+ * - block/blk-iolatency.c|806| <<blkiolatency_timer_fn>> if (cookie >= DEFAULT_SCALE_COOKIE)
+ * - block/blk-iolatency.c|904| <<iolatency_clear_scaling>> atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
+ * - block/blk-iolatency.c|1142| <<iolatency_pd_init>> atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
+ * - block/blk-iolatency.c|1145| <<iolatency_pd_init>> atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
+ */
#define DEFAULT_SCALE_COOKIE 1000000U
static struct blkcg_policy blkcg_policy_iolatency;
@@ -89,11 +102,22 @@ struct blk_iolatency {
atomic_t enabled;
};
+/*
+ * called by:
+ * - block/blk-iolatency.c|462| <<blkcg_iolatency_throttle>> struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ * - block/blk-iolatency.c|647| <<blkcg_iolatency_exit>> struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ * - block/blk-iolatency.c|991| <<iolatency_pd_init>> struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+ */
static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
{
return container_of(rqos, struct blk_iolatency, rqos);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|466| <<blkcg_iolatency_throttle>> if (!blk_iolatency_enabled(blkiolat))
+ * - block/blk-iolatency.c|611| <<blkcg_iolatency_done_bio>> enabled = blk_iolatency_enabled(iolat->blkiolat);
+ */
static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
{
return atomic_read(&blkiolat->enabled) > 0;
@@ -152,7 +176,16 @@ struct iolatency_grp {
struct child_latency_info child_lat;
};
+/*
+ * 在以下使用BLKIOLATENCY_MIN_WIN_SIZE:
+ * - block/blk-iolatency.c|881| <<iolatency_set_min_lat_nsec>> iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
+ */
#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
+/*
+ * 在以下使用BLKIOLATENCY_MAX_WIN_SIZE:
+ * - block/blk-iolatency.c|178| <<BLKIOLATENCY_EXP_BUCKET_SIZE>> #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
+ * - block/blk-iolatency.c|883| <<iolatency_set_min_lat_nsec>> BLKIOLATENCY_MAX_WIN_SIZE);
+ */
#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
/*
* These are the constants used to fake the fixed-point moving average
@@ -163,9 +196,23 @@ struct iolatency_grp {
* elapse immediately. Note, windows only elapse with IO activity. Idle
* periods extend the most recent window.
*/
+/*
+ * 在以下使用BLKIOLATENCY_NR_EXP_FACTORS:
+ * - block/blk-iolatency.c|184| <<global>> static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
+ * - block/blk-iolatency.c|179| <<BLKIOLATENCY_EXP_BUCKET_SIZE>> (BLKIOLATENCY_NR_EXP_FACTORS - 1))
+ * - block/blk-iolatency.c|346| <<iolat_update_total_lat_avg>> exp_idx = min_t(int , BLKIOLATENCY_NR_EXP_FACTORS - 1,
+ */
#define BLKIOLATENCY_NR_EXP_FACTORS 5
+/*
+ * 在以下使用BLKIOLATENCY_EXP_BUCKET_SIZE:
+ * - block/blk-iolatency.c|348| <<iolat_update_total_lat_avg>> BLKIOLATENCY_EXP_BUCKET_SIZE));
+ */
#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
(BLKIOLATENCY_NR_EXP_FACTORS - 1))
+/*
+ * used by:
+ * - block/blk-iolatency.c|264| <<iolat_update_total_lat_avg>> iolatency_exp_factors[exp_idx],
+ */
static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
2045, // exp(1/600) - 600 samples
2039, // exp(1/240) - 240 samples
@@ -174,21 +221,66 @@ static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
2014, // exp(1/60) - 60 samples
};
+/*
+ * called by:
+ * - block/blk-iolatency.c|184| <<blkg_to_lat>> return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
+ * - block/blk-iolatency.c|878| <<iolatency_prfill_limit>> struct iolatency_grp *iolat = pd_to_lat(pd);
+ * - block/blk-iolatency.c|934| <<iolatency_pd_stat>> struct iolatency_grp *iolat = pd_to_lat(pd);
+ * - block/blk-iolatency.c|988| <<iolatency_pd_init>> struct iolatency_grp *iolat = pd_to_lat(pd);
+ * - block/blk-iolatency.c|1040| <<iolatency_pd_offline>> struct iolatency_grp *iolat = pd_to_lat(pd);
+ * - block/blk-iolatency.c|1063| <<iolatency_pd_free>> struct iolatency_grp *iolat = pd_to_lat(pd);
+ */
static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
{
return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|404| <<check_scale_change>> parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
+ * - block/blk-iolatency.c|470| <<blkcg_iolatency_throttle>> struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ * - block/blk-iolatency.c|543| <<iolatency_check_latencies>> parent = blkg_to_lat(blkg->parent);
+ * - block/blk-iolatency.c|607| <<blkcg_iolatency_done_bio>> iolat = blkg_to_lat(bio->bi_blkg);
+ * - block/blk-iolatency.c|616| <<blkcg_iolatency_done_bio>> iolat = blkg_to_lat(blkg);
+ * - block/blk-iolatency.c|682| <<blkiolatency_timer_fn>> iolat = blkg_to_lat(blkg);
+ * - block/blk-iolatency.c|760| <<iolatency_set_min_lat_nsec>> struct iolatency_grp *iolat = blkg_to_lat(blkg);
+ * - block/blk-iolatency.c|780| <<iolatency_clear_scaling>> struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
+ * - block/blk-iolatency.c|812| <<iolatency_set_limit>> iolat = blkg_to_lat(ctx.blkg);
+ * - block/blk-iolatency.c|855| <<iolatency_set_limit>> struct iolatency_grp *tmp = blkg_to_lat(blkg);
+ * - block/blk-iolatency.c|1021| <<iolatency_pd_init>> struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
+ */
static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
{
return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|324| <<__blkcg_iolatency_throttle>> unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
+ * - block/blk-iolatency.c|413| <<scale_change>> if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
+ * - block/blk-iolatency.c|439| <<check_scale_change>> if (lat_to_blkg(iolat)->parent == NULL)
+ * - block/blk-iolatency.c|442| <<check_scale_change>> parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
+ * - block/blk-iolatency.c|483| <<check_scale_change>> blkcg_use_delay(lat_to_blkg(iolat));
+ * - block/blk-iolatency.c|489| <<check_scale_change>> blkcg_clear_delay(lat_to_blkg(iolat));
+ * - block/blk-iolatency.c|548| <<iolatency_record_time>> blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
+ * - block/blk-iolatency.c|564| <<iolatency_check_latencies>> struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ * - block/blk-iolatency.c|1027| <<iolatency_pd_init>> struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ * - block/blk-iolatency.c|1079| <<iolatency_pd_offline>> struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ */
static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
{
return pd_to_blkg(&iolat->pd);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|571| <<iolatency_check_latencies>> latency_stat_init(iolat, &stat);
+ * - block/blk-iolatency.c|577| <<iolatency_check_latencies>> latency_stat_init(iolat, s);
+ * - block/blk-iolatency.c|625| <<iolatency_check_latencies>> latency_stat_init(iolat, &iolat->cur_stat);
+ * - block/blk-iolatency.c|944| <<iolatency_ssd_stat>> latency_stat_init(iolat, &stat);
+ * - block/blk-iolatency.c|1041| <<iolatency_pd_init>> latency_stat_init(iolat, stat);
+ * - block/blk-iolatency.c|1044| <<iolatency_pd_init>> latency_stat_init(iolat, &iolat->cur_stat);
+ */
static inline void latency_stat_init(struct iolatency_grp *iolat,
struct latency_stat *stat)
{
@@ -199,6 +291,12 @@ static inline void latency_stat_init(struct iolatency_grp *iolat,
blk_rq_stat_init(&stat->rqs);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|576| <<iolatency_check_latencies>> latency_stat_sum(iolat, &stat, s);
+ * - block/blk-iolatency.c|597| <<iolatency_check_latencies>> latency_stat_sum(iolat, &iolat->cur_stat, &stat);
+ * - block/blk-iolatency.c|949| <<iolatency_ssd_stat>> latency_stat_sum(iolat, &stat, s);
+ */
static inline void latency_stat_sum(struct iolatency_grp *iolat,
struct latency_stat *sum,
struct latency_stat *stat)
@@ -210,6 +308,10 @@ static inline void latency_stat_sum(struct iolatency_grp *iolat,
blk_rq_stat_sum(&sum->rqs, &stat->rqs);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|580| <<iolatency_record_time>> latency_stat_record_time(iolat, req_time);
+ */
static inline void latency_stat_record_time(struct iolatency_grp *iolat,
u64 req_time)
{
@@ -223,6 +325,12 @@ static inline void latency_stat_record_time(struct iolatency_grp *iolat,
put_cpu_ptr(stat);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|618| <<iolatency_check_latencies>> if (latency_sum_ok(iolat, &stat) &&
+ * - block/blk-iolatency.c|634| <<iolatency_check_latencies>> if (latency_sum_ok(iolat, &iolat->cur_stat) &&
+ * - block/blk-iolatency.c|635| <<iolatency_check_latencies>> latency_sum_ok(iolat, &stat)) {
+ */
static inline bool latency_sum_ok(struct iolatency_grp *iolat,
struct latency_stat *stat)
{
@@ -234,6 +342,12 @@ static inline bool latency_sum_ok(struct iolatency_grp *iolat,
return stat->rqs.mean <= iolat->min_lat_nsec;
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|627| <<iolatency_check_latencies>> lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
+ * - block/blk-iolatency.c|628| <<iolatency_check_latencies>> iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
+ * - block/blk-iolatency.c|636| <<iolatency_check_latencies>> if (latency_stat_samples(iolat, &iolat->cur_stat) <
+ */
static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
struct latency_stat *stat)
{
@@ -242,6 +356,10 @@ static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
return stat->rqs.nr_samples;
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|615| <<iolatency_check_latencies>> iolat_update_total_lat_avg(iolat, &stat);
+ */
static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
struct latency_stat *stat)
{
@@ -265,18 +383,30 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
stat->rqs.mean);
}
+/*
+ * used by:
+ * - block/blk-iolatency.c|369| <<__blkcg_iolatency_throttle>> rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
+ */
static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
{
atomic_dec(&rqw->inflight);
wake_up(&rqw->wait);
}
+/*
+ * used by:
+ * - block/blk-iolatency.c|369| <<__blkcg_iolatency_throttle>> rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
+ */
static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
{
struct iolatency_grp *iolat = private_data;
return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|543| <<blkcg_iolatency_throttle>> __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
+ */
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
struct iolatency_grp *iolat,
bool issue_as_root,
@@ -306,6 +436,11 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
#define SCALE_DOWN_FACTOR 2
#define SCALE_UP_FACTOR 4
+/*
+ * called by:
+ * - block/blk-iolatency.c|426| <<scale_cookie_change>> unsigned long scale = scale_amount(qd, up);
+ * - block/blk-iolatency.c|466| <<scale_change>> unsigned long scale = scale_amount(qd, up);
+ */
static inline unsigned long scale_amount(unsigned long qd, bool up)
{
return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
@@ -320,6 +455,12 @@ static inline unsigned long scale_amount(unsigned long qd, bool up)
* the global scale cookie goes up or down they know which way they need to go
* based on their last knowledge of it.
*/
+/*
+ * called by:
+ * - block/blk-iolatency.c|673| <<iolatency_check_latencies>> scale_cookie_change(iolat->blkiolat, lat_info, true);
+ * - block/blk-iolatency.c|683| <<iolatency_check_latencies>> scale_cookie_change(iolat->blkiolat, lat_info, false);
+ * - block/blk-iolatency.c|799| <<blkiolatency_timer_fn>> scale_cookie_change(iolat->blkiolat, lat_info, true);
+ */
static void scale_cookie_change(struct blk_iolatency *blkiolat,
struct child_latency_info *lat_info,
bool up)
@@ -362,6 +503,10 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
* queue depth at a time so we don't get wild swings and hopefully dial in to
* fairer distribution of the overall queue depth.
*/
+/*
+ * called by:
+ * - block/blk-iolatency.c|555| <<check_scale_change>> scale_change(iolat, direction > 0);
+ */
static void scale_change(struct iolatency_grp *iolat, bool up)
{
unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
@@ -388,6 +533,10 @@ static void scale_change(struct iolatency_grp *iolat, bool up)
}
/* Check our parent and see if the scale cookie has changed. */
+/*
+ * called by:
+ * - block/blk-iolatency.c|574| <<blkcg_iolatency_throttle>> check_scale_change(iolat);
+ */
static void check_scale_change(struct iolatency_grp *iolat)
{
struct iolatency_grp *parent;
@@ -457,6 +606,12 @@ static void check_scale_change(struct iolatency_grp *iolat)
scale_change(iolat, direction > 0);
}
+/*
+ * called by:
+ * - block/blk-rq-qos.c|76| <<__rq_qos_throttle>> rqos->ops->throttle(rqos, bio);
+ *
+ * struct rq_qos_ops blkcg_iolatency_ops.throttle = blkcg_iolatency_throttle()
+ */
static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
@@ -482,6 +637,10 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
mod_timer(&blkiolat->timer, jiffies + HZ);
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|779| <<blkcg_iolatency_done_bio>> iolatency_record_time(iolat, &bio->bi_issue, now,
+ */
static void iolatency_record_time(struct iolatency_grp *iolat,
struct bio_issue *issue, u64 now,
bool issue_as_root)
@@ -517,6 +676,10 @@ static void iolatency_record_time(struct iolatency_grp *iolat,
#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
+/*
+ * called by:
+ * - block/blk-iolatency.c|633| <<blkcg_iolatency_done_bio>> iolatency_check_latencies(iolat, now);
+ */
static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
{
struct blkcg_gq *blkg = lat_to_blkg(iolat);
@@ -585,6 +748,12 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
spin_unlock_irqrestore(&lat_info->lock, flags);
}
+/*
+ * called by:
+ * - block/blk-rq-qos.c|94| <<__rq_qos_done_bio>> rqos->ops->done_bio(rqos, bio);
+ *
+ * struct rq_qos_ops blkcg_iolatency_ops.done_bio = blkcg_iolatency_done_bio()
+ */
static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
{
struct blkcg_gq *blkg;
@@ -638,6 +807,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
}
}
+/*
+ * struct rq_qos_ops blkcg_iolatency_ops.exit = blkcg_iolatency_exit()
+ */
static void blkcg_iolatency_exit(struct rq_qos *rqos)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
@@ -647,12 +819,20 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
kfree(blkiolat);
}
+/*
+ * 在以下使用blkcg_iolatency_ops:
+ * - block/blk-iolatency.c|886| <<blk_iolatency_init>> rqos->ops = &blkcg_iolatency_ops;
+ */
static struct rq_qos_ops blkcg_iolatency_ops = {
.throttle = blkcg_iolatency_throttle,
.done_bio = blkcg_iolatency_done_bio,
.exit = blkcg_iolatency_exit,
};
+/*
+ * used by:
+ * - block/blk-iolatency.c|898| <<blk_iolatency_init>> timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
+ */
static void blkiolatency_timer_fn(struct timer_list *t)
{
struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
@@ -714,6 +894,10 @@ static void blkiolatency_timer_fn(struct timer_list *t)
rcu_read_unlock();
}
+/*
+ * called by only:
+ * - block/blk-cgroup.c|1193| <<blkcg_init_queue>> ret = blk_iolatency_init(q);
+ */
int blk_iolatency_init(struct request_queue *q)
{
struct blk_iolatency *blkiolat;
@@ -747,6 +931,11 @@ int blk_iolatency_init(struct request_queue *q)
* return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
* return 0.
*/
+/*
+ * called by:
+ * - block/blk-iolatency.c|990| <<iolatency_set_limit>> enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+ * - block/blk-iolatency.c|1194| <<iolatency_pd_offline>> ret = iolatency_set_min_lat_nsec(blkg, 0);
+ */
static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
@@ -766,6 +955,11 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
return 0;
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|997| <<iolatency_set_limit>> iolatency_clear_scaling(blkg);
+ * - block/blk-iolatency.c|1199| <<iolatency_pd_offline>> iolatency_clear_scaling(blkg);
+ */
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
{
if (blkg->parent) {
@@ -885,6 +1079,10 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
return 0;
}
+/*
+ * called by:
+ * - block/blk-iolatency.c|932| <<iolatency_pd_stat>> return iolatency_ssd_stat(iolat, buf, size);
+ */
static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
size_t size)
{
@@ -910,6 +1108,12 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
iolat->rq_depth.max_depth);
}
+/*
+ * called by:
+ * - block/blk-cgroup.c|962| <<blkcg_print_stat>> written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
+ *
+ * struct blkcg_policy blkcg_policy_iolatency.pd_stat_fn = iolatency_pd_stat()
+ */
static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
size_t size)
{
@@ -934,6 +1138,14 @@ static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
}
+/*
+ * called by:
+ * - block/blk-cgroup.c|178| <<blkg_alloc>> pd = pol->pd_alloc_fn(gfp_mask, q->node);
+ * - block/blk-cgroup.c|1368| <<blkcg_activate_policy>> pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
+ * - block/blk-cgroup.c|1384| <<blkcg_activate_policy>> pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
+ *
+ * struct blkcg_policy blkcg_policy_iolatency.pd_alloc_fn = iolatency_pd_alloc()
+ */
static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
{
struct iolatency_grp *iolat;
@@ -950,6 +1162,13 @@ static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
return &iolat->pd;
}
+/*
+ * called by:
+ * - block/blk-cgroup.c|279| <<blkg_create>> pol->pd_init_fn(blkg->pd[i]);
+ * - block/blk-cgroup.c|1396| <<blkcg_activate_policy>> pol->pd_init_fn(pd);
+ *
+ * struct blkcg_policy blkcg_policy_iolatency.pd_init_fn = iolatency_pd_init()