From a3d0a918502cc73af4f60da2cc4c5cac5573f183 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 2 Feb 2016 16:57:08 -0800 Subject: [PATCH 01/17] thp: make split_queue per-node Andrea Arcangeli suggested to make split queue per-node to improve scalability. Let's do it. Signed-off-by: Kirill A. Shutemov Suggested-by: Andrea Arcangeli Reviewed-by: Andrea Arcangeli Cc: Hugh Dickins Cc: Dave Hansen Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: "Aneesh Kumar K.V" Cc: Johannes Weiner Cc: Michal Hocko Cc: Jerome Marchand Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 6 ++++++ mm/huge_memory.c | 49 ++++++++++++++++++++++-------------------- mm/page_alloc.c | 5 +++++ 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 33bb1b19273e3a..7b6c2cfee390e9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -682,6 +682,12 @@ typedef struct pglist_data { */ unsigned long first_deferred_pfn; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +#endif } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fd3a07b3e6f4e0..253a25e007d71e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -138,9 +138,6 @@ static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; -static DEFINE_SPINLOCK(split_queue_lock); -static LIST_HEAD(split_queue); -static unsigned long split_queue_len; static struct shrinker deferred_split_shrinker; static void set_recommended_min_free_kbytes(void) @@ -3358,6 +3355,7 @@ int total_mapcount(struct page *page) int split_huge_page_to_list(struct page *page, struct list_head *list) { struct page *head = compound_head(page); + struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); struct anon_vma *anon_vma; int count, mapcount, ret; bool mlocked; @@ -3401,19 +3399,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) lru_add_drain(); /* Prevent deferred_split_scan() touching ->_count */ - spin_lock_irqsave(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); count = page_count(head); mapcount = total_mapcount(head); if (!mapcount && count == 1) { if (!list_empty(page_deferred_list(head))) { - split_queue_len--; + pgdata->split_queue_len--; list_del(page_deferred_list(head)); } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); __split_huge_page(page, list); ret = 0; } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); pr_alert("total_mapcount: %u, page_count(): %u\n", mapcount, count); if (PageTail(page)) @@ -3421,7 +3419,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) dump_page(page, "total_mapcount(head) > 0"); BUG(); } else { - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); unfreeze_page(anon_vma, head); ret = -EBUSY; } @@ -3436,52 +3434,56 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) void free_transhuge_page(struct page *page) { + struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); unsigned long flags; - spin_lock_irqsave(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); if (!list_empty(page_deferred_list(page))) { - split_queue_len--; + pgdata->split_queue_len--; list_del(page_deferred_list(page)); } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); free_compound_page(page); } void deferred_split_huge_page(struct page *page) { + struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); unsigned long flags; VM_BUG_ON_PAGE(!PageTransHuge(page), page); - spin_lock_irqsave(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); if (list_empty(page_deferred_list(page))) { - list_add_tail(page_deferred_list(page), &split_queue); - split_queue_len++; + list_add_tail(page_deferred_list(page), &pgdata->split_queue); + pgdata->split_queue_len++; } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); } static unsigned long deferred_split_count(struct shrinker *shrink, struct shrink_control *sc) { + struct pglist_data *pgdata = NODE_DATA(sc->nid); /* * Split a page from split_queue will free up at least one page, * at most HPAGE_PMD_NR - 1. We don't track exact number. * Let's use HPAGE_PMD_NR / 2 as ballpark. */ - return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2; + return ACCESS_ONCE(pgdata->split_queue_len) * HPAGE_PMD_NR / 2; } static unsigned long deferred_split_scan(struct shrinker *shrink, struct shrink_control *sc) { + struct pglist_data *pgdata = NODE_DATA(sc->nid); unsigned long flags; LIST_HEAD(list), *pos, *next; struct page *page; int split = 0; - spin_lock_irqsave(&split_queue_lock, flags); - list_splice_init(&split_queue, &list); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); + list_splice_init(&pgdata->split_queue, &list); /* Take pin on all head pages to avoid freeing them under us */ list_for_each_safe(pos, next, &list) { @@ -3490,10 +3492,10 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, /* race with put_compound_page() */ if (!get_page_unless_zero(page)) { list_del_init(page_deferred_list(page)); - split_queue_len--; + pgdata->split_queue_len--; } } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); list_for_each_safe(pos, next, &list) { page = list_entry((void *)pos, struct page, mapping); @@ -3505,9 +3507,9 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, put_page(page); } - spin_lock_irqsave(&split_queue_lock, flags); - list_splice_tail(&list, &split_queue); - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); + list_splice_tail(&list, &pgdata->split_queue); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); return split * HPAGE_PMD_NR / 2; } @@ -3516,6 +3518,7 @@ static struct shrinker deferred_split_shrinker = { .count_objects = deferred_split_count, .scan_objects = deferred_split_scan, .seeks = DEFAULT_SEEKS, + .flags = SHRINKER_NUMA_AWARE, }; #ifdef CONFIG_DEBUG_FS diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 63358d9f9aa98e..ea2c4d3e0c0354 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5209,6 +5209,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) spin_lock_init(&pgdat->numabalancing_migrate_lock); pgdat->numabalancing_migrate_nr_pages = 0; pgdat->numabalancing_migrate_next_window = jiffies; +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + spin_lock_init(&pgdat->split_queue_lock); + INIT_LIST_HEAD(&pgdat->split_queue); + pgdat->split_queue_len = 0; #endif init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->pfmemalloc_wait); From cb8d68ec16a511f8be7e1028fd8f869ef7c6a1a8 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 2 Feb 2016 16:57:12 -0800 Subject: [PATCH 02/17] thp: change deferred_split_count() to return number of THP in queue I've got meaning of shrinker::count_objects() wrong: it should return number of potentially freeable objects, which is not necessary correlate with freeable memory. Returning 256 per THP in queue is not reasonable: shrinker::scan_objects() never called with nr_to_scan > 128 in my setup. Let's return 1 per THP and correct scan_object accordingly. Signed-off-by: Kirill A. Shutemov Reviewed-by: Andrea Arcangeli Cc: Hugh Dickins Cc: Dave Hansen Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: "Aneesh Kumar K.V" Cc: Johannes Weiner Cc: Michal Hocko Cc: Jerome Marchand Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 253a25e007d71e..7aae721145832e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3465,12 +3465,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink, struct shrink_control *sc) { struct pglist_data *pgdata = NODE_DATA(sc->nid); - /* - * Split a page from split_queue will free up at least one page, - * at most HPAGE_PMD_NR - 1. We don't track exact number. - * Let's use HPAGE_PMD_NR / 2 as ballpark. - */ - return ACCESS_ONCE(pgdata->split_queue_len) * HPAGE_PMD_NR / 2; + return ACCESS_ONCE(pgdata->split_queue_len); } static unsigned long deferred_split_scan(struct shrinker *shrink, @@ -3511,7 +3506,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, list_splice_tail(&list, &pgdata->split_queue); spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); - return split * HPAGE_PMD_NR / 2; + /* + * Stop shrinker if we didn't split any page, but the queue is empty. + * This can happen if pages were freed under us. + */ + if (!split && list_empty(&pgdata->split_queue)) + return SHRINK_STOP; + return split; } static struct shrinker deferred_split_shrinker = { From e3ae19535c665771e2c03cdd63df9bc4d6b37941 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 2 Feb 2016 16:57:15 -0800 Subject: [PATCH 03/17] thp: limit number of object to scan on deferred_split_scan() If we have a lot of pages in queue to be split, deferred_split_scan() can spend unreasonable amount of time under spinlock with disabled interrupts. Let's cap number of pages to split on scan by sc->nr_to_scan. Signed-off-by: Kirill A. Shutemov Reported-by: Andrea Arcangeli Reviewed-by: Andrea Arcangeli Cc: Hugh Dickins Cc: Dave Hansen Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: "Aneesh Kumar K.V" Cc: Johannes Weiner Cc: Michal Hocko Cc: Jerome Marchand Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7aae721145832e..c1411961167e12 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3478,17 +3478,19 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, int split = 0; spin_lock_irqsave(&pgdata->split_queue_lock, flags); - list_splice_init(&pgdata->split_queue, &list); - /* Take pin on all head pages to avoid freeing them under us */ list_for_each_safe(pos, next, &list) { page = list_entry((void *)pos, struct page, mapping); page = compound_head(page); - /* race with put_compound_page() */ - if (!get_page_unless_zero(page)) { + if (get_page_unless_zero(page)) { + list_move(page_deferred_list(page), &list); + } else { + /* We lost race with put_compound_page() */ list_del_init(page_deferred_list(page)); pgdata->split_queue_len--; } + if (!--sc->nr_to_scan) + break; } spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); From 72676bb53f33fd0ef3a1484fc1ecfd306dc6ff40 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 2 Feb 2016 16:57:18 -0800 Subject: [PATCH 04/17] lib/test-string_helpers.c: fix and improve string_get_size() tests Recently added commit 564b026fbd0d ("string_helpers: fix precision loss for some inputs") fixed precision issues for string_get_size() and broke tests. Fix and improve them: test both STRING_UNITS_2 and STRING_UNITS_10 at a time, better failure reporting, test small an huge values. Fixes: 564b026fbd0d28e9 ("string_helpers: fix precision loss for some inputs") Signed-off-by: Vitaly Kuznetsov Cc: Andy Shevchenko Cc: Rasmus Villemoes Cc: James Bottomley Cc: James Bottomley Cc: "James E.J. Bottomley" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test-string_helpers.c | 67 ++++++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 18 deletions(-) diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c index 98866a770770c8..25b5cbfb7615bd 100644 --- a/lib/test-string_helpers.c +++ b/lib/test-string_helpers.c @@ -327,36 +327,67 @@ static __init void test_string_escape(const char *name, } #define string_get_size_maxbuf 16 -#define test_string_get_size_one(size, blk_size, units, exp_result) \ +#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \ do { \ - BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf); \ - __test_string_get_size((size), (blk_size), (units), \ - (exp_result)); \ + BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \ + BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \ + __test_string_get_size((size), (blk_size), (exp_result10), \ + (exp_result2)); \ } while (0) -static __init void __test_string_get_size(const u64 size, const u64 blk_size, - const enum string_size_units units, - const char *exp_result) +static __init void test_string_get_size_check(const char *units, + const char *exp, + char *res, + const u64 size, + const u64 blk_size) { - char buf[string_get_size_maxbuf]; - - string_get_size(size, blk_size, units, buf, sizeof(buf)); - if (!memcmp(buf, exp_result, strlen(exp_result) + 1)) + if (!memcmp(res, exp, strlen(exp) + 1)) return; - buf[sizeof(buf) - 1] = '\0'; - pr_warn("Test 'test_string_get_size_one' failed!\n"); - pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n", + res[string_get_size_maxbuf - 1] = '\0'; + + pr_warn("Test 'test_string_get_size' failed!\n"); + pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n", size, blk_size, units); - pr_warn("expected: '%s', got '%s'\n", exp_result, buf); + pr_warn("expected: '%s', got '%s'\n", exp, res); +} + +static __init void __test_string_get_size(const u64 size, const u64 blk_size, + const char *exp_result10, + const char *exp_result2) +{ + char buf10[string_get_size_maxbuf]; + char buf2[string_get_size_maxbuf]; + + string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10)); + string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2)); + + test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10, + size, blk_size); + + test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2, + size, blk_size); } static __init void test_string_get_size(void) { - test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB"); - test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB"); - test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B"); + /* small values */ + test_string_get_size_one(0, 512, "0 B", "0 B"); + test_string_get_size_one(1, 512, "512 B", "512 B"); + test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB"); + + /* normal values */ + test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB"); + test_string_get_size_one(500118192, 512, "256 GB", "238 GiB"); + test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB"); + + /* weird block sizes */ + test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB"); + + /* huge values */ + test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB"); + test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB"); } static int __init test_string_helpers_init(void) From a4a1dfa4bb8b1d76d1ed464432ed95bb5956408f Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 2 Feb 2016 16:57:21 -0800 Subject: [PATCH 05/17] ocfs2/cluster: fix memory leak in o2hb_region_release o2hb_region_release currently doesn't free o2hb_debug_buf hr_db_elapsed_time and hr_db_pinned malloced in o2hb_debug_create. Also we should call debugfs_remove before freeing its data, to prevent the risk accessing debugfs rightly after its data has been freed. Signed-off-by: Joseph Qi Reviewed-by: Jiufei Xue Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/heartbeat.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a3cc6d2fc896ce..a76b9ea7722e6d 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1254,15 +1254,15 @@ static const struct file_operations o2hb_debug_fops = { void o2hb_exit(void) { - kfree(o2hb_db_livenodes); - kfree(o2hb_db_liveregions); - kfree(o2hb_db_quorumregions); - kfree(o2hb_db_failedregions); debugfs_remove(o2hb_debug_failedregions); debugfs_remove(o2hb_debug_quorumregions); debugfs_remove(o2hb_debug_liveregions); debugfs_remove(o2hb_debug_livenodes); debugfs_remove(o2hb_debug_dir); + kfree(o2hb_db_livenodes); + kfree(o2hb_db_liveregions); + kfree(o2hb_db_quorumregions); + kfree(o2hb_db_failedregions); } static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir, @@ -1438,13 +1438,15 @@ static void o2hb_region_release(struct config_item *item) kfree(reg->hr_slots); - kfree(reg->hr_db_regnum); - kfree(reg->hr_db_livenodes); debugfs_remove(reg->hr_debug_livenodes); debugfs_remove(reg->hr_debug_regnum); debugfs_remove(reg->hr_debug_elapsed_time); debugfs_remove(reg->hr_debug_pinned); debugfs_remove(reg->hr_debug_dir); + kfree(reg->hr_db_livenodes); + kfree(reg->hr_db_regnum); + kfree(reg->hr_debug_elapsed_time); + kfree(reg->hr_debug_pinned); spin_lock(&o2hb_live_lock); list_del(®->hr_all_item); From 0bb181c7ddc0340ef8d4a3b082b1f1bc7ae21636 Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Tue, 2 Feb 2016 16:57:23 -0800 Subject: [PATCH 06/17] MAINTAINERS: update Seth email Update/unify my contact info. The old email address will no longer work soon. Signed-off-by: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index f678c37107f5bb..c9f800afd6545d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12133,7 +12133,7 @@ F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h ZBUD COMPRESSED PAGE ALLOCATOR -M: Seth Jennings +M: Seth Jennings L: linux-mm@kvack.org S: Maintained F: mm/zbud.c @@ -12188,7 +12188,7 @@ F: include/linux/zsmalloc.h F: Documentation/vm/zsmalloc.txt ZSWAP COMPRESSED SWAP CACHING -M: Seth Jennings +M: Seth Jennings L: linux-mm@kvack.org S: Maintained F: mm/zswap.c From 5c2ff95e41c9290d16556cd02e35b25d81be8fe0 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Tue, 2 Feb 2016 16:57:26 -0800 Subject: [PATCH 07/17] numa: fix /proc//numa_maps for hugetlbfs on s390 When working with hugetlbfs ptes (which are actually pmds) is not valid to directly use pte functions like pte_present() because the hardware bit layout of pmds and ptes can be different. This is the case on s390. Therefore we have to convert the hugetlbfs ptes first into a valid pte encoding with huge_ptep_get(). Currently the /proc//numa_maps code uses hugetlbfs ptes without huge_ptep_get(). On s390 this leads to the following two problems: 1) The pte_present() function returns false (instead of true) for PROT_NONE hugetlb ptes. Therefore PROT_NONE vmas are missing completely in the "numa_maps" output. 2) The pte_dirty() function always returns false for all hugetlb ptes. Therefore these pages are reported as "mapped=xxx" instead of "dirty=xxx". Therefore use huge_ptep_get() to correctly convert the hugetlb ptes. Signed-off-by: Michael Holzheu Reviewed-by: Gerald Schaefer Cc: [4.3+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 85d16c67c33eaa..4a0c31f904a6da 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1552,18 +1552,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { + pte_t huge_pte = huge_ptep_get(pte); struct numa_maps *md; struct page *page; - if (!pte_present(*pte)) + if (!pte_present(huge_pte)) return 0; - page = pte_page(*pte); + page = pte_page(huge_pte); if (!page) return 0; md = walk->private; - gather_stats(page, md, pte_dirty(*pte), 1); + gather_stats(page, md, pte_dirty(huge_pte), 1); return 0; } From 65376df582174ffcec9e6471bf5b0dd79ba05e4a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 2 Feb 2016 16:57:29 -0800 Subject: [PATCH 08/17] proc: revert /proc//maps [stack:TID] annotation Commit b76437579d13 ("procfs: mark thread stack correctly in proc//maps") added [stack:TID] annotation to /proc//maps. Finding the task of a stack VMA requires walking the entire thread list, turning this into quadratic behavior: a thousand threads means a thousand stacks, so the rendering of /proc//maps needs to look at a million combinations. The cost is not in proportion to the usefulness as described in the patch. Drop the [stack:TID] annotation to make /proc//maps (and /proc//numa_maps) usable again for higher thread counts. The [stack] annotation inside /proc//task//maps is retained, as identifying the stack VMA there is an O(1) operation. Siddesh said: "The end users needed a way to identify thread stacks programmatically and there wasn't a way to do that. I'm afraid I no longer remember (or have access to the resources that would aid my memory since I changed employers) the details of their requirement. However, I did do this on my own time because I thought it was an interesting project for me and nobody really gave any feedback then as to its utility, so as far as I am concerned you could roll back the main thread maps information since the information is available in the thread-specific files" Signed-off-by: Johannes Weiner Cc: "Kirill A. Shutemov" Cc: Siddhesh Poyarekar Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 9 ++-- fs/proc/task_mmu.c | 66 +++++++++++------------------- fs/proc/task_nommu.c | 49 +++++++++------------- include/linux/mm.h | 3 +- mm/util.c | 27 +----------- 5 files changed, 48 insertions(+), 106 deletions(-) diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index fde9fd06fa988b..eaebf27539f5f7 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -356,7 +356,7 @@ address perms offset dev inode pathname a7cb1000-a7cb2000 ---p 00000000 00:00 0 a7cb2000-a7eb2000 rw-p 00000000 00:00 0 a7eb2000-a7eb3000 ---p 00000000 00:00 0 -a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001] +a7eb3000-a7ed5000 rw-p 00000000 00:00 0 a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6 a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6 @@ -388,7 +388,6 @@ is not associated with a file: [heap] = the heap of the program [stack] = the stack of the main process - [stack:1001] = the stack of the thread with tid 1001 [vdso] = the "virtual dynamic shared object", the kernel system call handler @@ -396,10 +395,8 @@ is not associated with a file: The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint of the individual tasks of a process. In this file you will see a mapping marked -as [stack] if that task sees it as a stack. This is a key difference from the -content of /proc/PID/maps, where you will see all mappings that are being used -as stack by all of those tasks. Hence, for the example above, the task-level -map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this: +as [stack] if that task sees it as a stack. Hence, for the example above, the +task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this: 08048000-08049000 r-xp 00000000 03:00 8312 /opt/test 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4a0c31f904a6da..fa95ab2d367408 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -259,23 +259,29 @@ static int do_maps_open(struct inode *inode, struct file *file, sizeof(struct proc_maps_private)); } -static pid_t pid_of_stack(struct proc_maps_private *priv, - struct vm_area_struct *vma, bool is_pid) +/* + * Indicate if the VMA is a stack for the given task; for + * /proc/PID/maps that is the stack of the main task. + */ +static int is_stack(struct proc_maps_private *priv, + struct vm_area_struct *vma, int is_pid) { - struct inode *inode = priv->inode; - struct task_struct *task; - pid_t ret = 0; + int stack = 0; + + if (is_pid) { + stack = vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack; + } else { + struct inode *inode = priv->inode; + struct task_struct *task; - rcu_read_lock(); - task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) { - task = task_of_stack(task, vma, is_pid); + rcu_read_lock(); + task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) - ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); + stack = vma_is_stack_for_task(vma, task); + rcu_read_unlock(); } - rcu_read_unlock(); - - return ret; + return stack; } static void @@ -335,8 +341,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) name = arch_vma_name(vma); if (!name) { - pid_t tid; - if (!mm) { name = "[vdso]"; goto done; @@ -348,21 +352,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) goto done; } - tid = pid_of_stack(priv, vma, is_pid); - if (tid != 0) { - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ - if (!is_pid || (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack)) { - name = "[stack]"; - } else { - /* Thread stack in /proc/PID/maps */ - seq_pad(m, ' '); - seq_printf(m, "[stack:%d]", tid); - } - } + if (is_stack(priv, vma, is_pid)) + name = "[stack]"; } done: @@ -1618,19 +1609,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) seq_file_path(m, file, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_puts(m, " heap"); - } else { - pid_t tid = pid_of_stack(proc_priv, vma, is_pid); - if (tid != 0) { - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ - if (!is_pid || (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack)) - seq_puts(m, " stack"); - else - seq_printf(m, " stack:%d", tid); - } + } else if (is_stack(proc_priv, vma, is_pid)) { + seq_puts(m, " stack"); } if (is_vm_hugetlb_page(vma)) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index e0d64c92e4f657..faacb0c0d85760 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm, return size; } -static pid_t pid_of_stack(struct proc_maps_private *priv, - struct vm_area_struct *vma, bool is_pid) +static int is_stack(struct proc_maps_private *priv, + struct vm_area_struct *vma, int is_pid) { - struct inode *inode = priv->inode; - struct task_struct *task; - pid_t ret = 0; - - rcu_read_lock(); - task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) { - task = task_of_stack(task, vma, is_pid); + struct mm_struct *mm = vma->vm_mm; + int stack = 0; + + if (is_pid) { + stack = vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack; + } else { + struct inode *inode = priv->inode; + struct task_struct *task; + + rcu_read_lock(); + task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) - ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); + stack = vma_is_stack_for_task(vma, task); + rcu_read_unlock(); } - rcu_read_unlock(); - - return ret; + return stack; } /* @@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, if (file) { seq_pad(m, ' '); seq_file_path(m, file, ""); - } else if (mm) { - pid_t tid = pid_of_stack(priv, vma, is_pid); - - if (tid != 0) { - seq_pad(m, ' '); - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ - if (!is_pid || (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack)) - seq_printf(m, "[stack]"); - else - seq_printf(m, "[stack:%d]", tid); - } + } else if (mm && is_stack(priv, vma, is_pid)) { + seq_pad(m, ' '); + seq_printf(m, "[stack]"); } seq_putc(m, '\n'); diff --git a/include/linux/mm.h b/include/linux/mm.h index f1cd22f2df1ac5..0b50d7848e3af4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1341,8 +1341,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -extern struct task_struct *task_of_stack(struct task_struct *task, - struct vm_area_struct *vma, bool in_group); +int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, diff --git a/mm/util.c b/mm/util.c index c108a6542d05d3..4fb14ca5a41967 100644 --- a/mm/util.c +++ b/mm/util.c @@ -230,36 +230,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, } /* Check if the vma is being used as a stack by this task */ -static int vm_is_stack_for_task(struct task_struct *t, - struct vm_area_struct *vma) +int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) { return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } -/* - * Check if the vma is being used as a stack. - * If is_group is non-zero, check in the entire thread group or else - * just check in the current task. Returns the task_struct of the task - * that the vma is stack for. Must be called under rcu_read_lock(). - */ -struct task_struct *task_of_stack(struct task_struct *task, - struct vm_area_struct *vma, bool in_group) -{ - if (vm_is_stack_for_task(task, vma)) - return task; - - if (in_group) { - struct task_struct *t; - - for_each_thread(task, t) { - if (vm_is_stack_for_task(t, vma)) - return t; - } - } - - return NULL; -} - #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm) { From 461c7fa126794157484dca48e88effa4963e3af3 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 2 Feb 2016 16:57:35 -0800 Subject: [PATCH 09/17] drivers/scsi/sg.c: mark VMA as VM_IO to prevent migration Reduced testcase: #include #include #include #include #define SIZE 0x2000 int main() { int fd; void *p; fd = open("/dev/sg0", O_RDWR); p = mmap(NULL, SIZE, PROT_EXEC, MAP_PRIVATE | MAP_LOCKED, fd, 0); mbind(p, SIZE, 0, NULL, 0, MPOL_MF_MOVE); return 0; } We shouldn't try to migrate pages in sg VMA as we don't have a way to update Sg_scatter_hold::pages accordingly from mm core. Let's mark the VMA as VM_IO to indicate to mm core that the VMA is not migratable. Signed-off-by: Kirill A. Shutemov Reported-by: Dmitry Vyukov Acked-by: Vlastimil Babka Cc: Doug Gilbert Cc: David Rientjes Cc: Naoya Horiguchi Cc: "Kirill A. Shutemov" Cc: Shiraz Hashim Cc: Hugh Dickins Cc: Sasha Levin Cc: syzkaller Cc: Kostya Serebryany Cc: Alexander Potapenko Cc: James Bottomley Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/scsi/sg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 503ab8b46c0b4e..5e820674432ca3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) } sfp->mmap_called = 1; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; From c792e8240338e41eda4d06a3a71a7bb7af4e6156 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 2 Feb 2016 16:57:38 -0800 Subject: [PATCH 10/17] mm: memcontrol: drop superfluous entry in the per-memcg stats array MEM_CGROUP_STAT_NSTATS is just a delimiter for cgroup1 statistics, not an actual array entry. Reuse it for the first cgroup2 stat entry, like in the event array. Fixes: b2807f07f4f8 ("mm: memcontrol: add "sock" to cgroup2 memory.stat") Signed-off-by: Johannes Weiner Cc: Vladimir Davydov Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9ae48d4aeb5ec7..792c8981e63365 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -51,7 +51,7 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, /* default hierarchy stats */ - MEMCG_SOCK, + MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, MEMCG_NR_STAT, }; From 4758e198ad3bcbc1c468e0632c53a63a7d201649 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 2 Feb 2016 16:57:41 -0800 Subject: [PATCH 11/17] Documentation: cgroup-v2: add memory.stat::sock description Signed-off-by: Johannes Weiner Cc: Vladimir Davydov Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroup-v2.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index 65b3eac8856cf7..e8d25e78421454 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt @@ -843,6 +843,10 @@ PAGE_SIZE multiple when read back. Amount of memory used to cache filesystem data, including tmpfs and shared memory. + sock + + Amount of memory used in network transmission buffers + file_mapped Amount of cached filesystem data mapped with mmap() From d977d56ce5b3e8842236f2f9e7483d4914c9592e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 2 Feb 2016 16:57:43 -0800 Subject: [PATCH 12/17] mm: warn about VmData over RLIMIT_DATA This patch provides a way of working around a slight regression introduced by commit 84638335900f ("mm: rework virtual memory accounting"). Before that commit RLIMIT_DATA have control only over size of the brk region. But that change have caused problems with all existing versions of valgrind, because it set RLIMIT_DATA to zero. This patch fixes rlimit check (limit actually in bytes, not pages) and by default turns it into warning which prints at first VmData misuse: "mmap: top (795): VmData 516096 exceed data ulimit 512000. Will be forbidden soon." Behavior is controlled by boot param ignore_rlimit_data=y/n and by sysfs /sys/module/kernel/parameters/ignore_rlimit_data. For now it set to "y". [akpm@linux-foundation.org: tweak kernel-parameters.txt text[ Signed-off-by: Konstantin Khlebnikov Link: http://lkml.kernel.org/r/20151228211015.GL2194@uranus Reported-by: Christian Borntraeger Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Vegard Nossum Cc: Peter Zijlstra Cc: Vladimir Davydov Cc: Andy Lutomirski Cc: Quentin Casasnovas Cc: Kees Cook Cc: Willy Tarreau Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kernel-parameters.txt | 5 +++++ mm/internal.h | 16 ++++++++++++++++ mm/mmap.c | 23 +++++++++++++++++------ 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 87d40a72f6a1be..551ecf09c8dd82 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1496,6 +1496,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. could change it dynamically, usually by /sys/module/printk/parameters/ignore_loglevel. + ignore_rlimit_data + Ignore RLIMIT_DATA setting for data mappings, + print warning at first misuse. Can be changed via + /sys/module/kernel/parameters/ignore_rlimit_data. + ihash_entries= [KNL] Set number of hash buckets for inode cache. diff --git a/mm/internal.h b/mm/internal.h index ed8b5ffcf9b16f..6e976302ddd800 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -216,6 +216,22 @@ static inline bool is_cow_mapping(vm_flags_t flags) return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } +static inline bool is_exec_mapping(vm_flags_t flags) +{ + return (flags & (VM_EXEC | VM_WRITE)) == VM_EXEC; +} + +static inline bool is_stack_mapping(vm_flags_t flags) +{ + return (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN))) != 0; +} + +static inline bool is_data_mapping(vm_flags_t flags) +{ + return (flags & ((VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)) | + VM_WRITE | VM_SHARED)) == VM_WRITE; +} + /* mm/util.c */ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent); diff --git a/mm/mmap.c b/mm/mmap.c index 84b12624ceb01d..cfc0cdca421ec4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -69,6 +70,8 @@ const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif +static bool ignore_rlimit_data = true; +core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, @@ -2982,9 +2985,17 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; - if ((flags & (VM_WRITE | VM_SHARED | (VM_STACK_FLAGS & - (VM_GROWSUP | VM_GROWSDOWN)))) == VM_WRITE) - return mm->data_vm + npages <= rlimit(RLIMIT_DATA); + if (is_data_mapping(flags) && + mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { + if (ignore_rlimit_data) + pr_warn_once("%s (%d): VmData %lu exceed data ulimit " + "%lu. Will be forbidden soon.\n", + current->comm, current->pid, + (mm->data_vm + npages) << PAGE_SHIFT, + rlimit(RLIMIT_DATA)); + else + return false; + } return true; } @@ -2993,11 +3004,11 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { mm->total_vm += npages; - if ((flags & (VM_EXEC | VM_WRITE)) == VM_EXEC) + if (is_exec_mapping(flags)) mm->exec_vm += npages; - else if (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN))) + else if (is_stack_mapping(flags)) mm->stack_vm += npages; - else if ((flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) + else if (is_data_mapping(flags)) mm->data_vm += npages; } From 30bdbb78009e67767983085e302bec6d97afc679 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 2 Feb 2016 16:57:46 -0800 Subject: [PATCH 13/17] mm: polish virtual memory accounting * add VM_STACK as alias for VM_GROWSUP/DOWN depending on architecture * always account VMAs with flag VM_STACK as stack (as it was before) * cleanup classifying helpers * update comments and documentation Signed-off-by: Konstantin Khlebnikov Tested-by: Sudip Mukherjee Cc: Cyrill Gorcunov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 4 ++-- include/linux/mm.h | 6 ++++-- include/linux/mm_types.h | 6 +++--- mm/internal.h | 23 +++++++++++++++++++---- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index eaebf27539f5f7..843b045b4069a6 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -240,8 +240,8 @@ Table 1-2: Contents of the status files (as of 4.1) RssFile size of resident file mappings RssShmem size of resident shmem memory (includes SysV shm, mapping of tmpfs and shared anonymous mappings) - VmData size of data, stack, and text segments - VmStk size of data, stack, and text segments + VmData size of private data segments + VmStk size of stack segments VmExe size of text segment VmLib size of shared library code VmPTE size of page table entries diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b50d7848e3af4..516e149443397d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp); #endif #ifdef CONFIG_STACK_GROWSUP -#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +#define VM_STACK VM_GROWSUP #else -#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +#define VM_STACK VM_GROWSDOWN #endif +#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) + /* * Special vmas that are non-mergable, non-mlock()able. * Note: mm/huge_memory.c VM_NO_THP depends on this definition. diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d3ebb9d21a5334..624b78b848b89f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -424,9 +424,9 @@ struct mm_struct { unsigned long total_vm; /* Total pages mapped */ unsigned long locked_vm; /* Pages that have PG_mlocked set */ unsigned long pinned_vm; /* Refcount permanently increased */ - unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */ - unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ - unsigned long stack_vm; /* VM_GROWSUP/DOWN */ + unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ + unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ + unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags; unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; diff --git a/mm/internal.h b/mm/internal.h index 6e976302ddd800..a38a21ebddb454 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -216,20 +216,35 @@ static inline bool is_cow_mapping(vm_flags_t flags) return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } +/* + * These three helpers classifies VMAs for virtual memory accounting. + */ + +/* + * Executable code area - executable, not writable, not stack + */ static inline bool is_exec_mapping(vm_flags_t flags) { - return (flags & (VM_EXEC | VM_WRITE)) == VM_EXEC; + return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; } +/* + * Stack area - atomatically grows in one direction + * + * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: + * do_mmap() forbids all other combinations. + */ static inline bool is_stack_mapping(vm_flags_t flags) { - return (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN))) != 0; + return (flags & VM_STACK) == VM_STACK; } +/* + * Data area - private, writable, not stack + */ static inline bool is_data_mapping(vm_flags_t flags) { - return (flags & ((VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)) | - VM_WRITE | VM_SHARED)) == VM_WRITE; + return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; } /* mm/util.c */ From 3c1da7beeee02560cd0f0c66c5a59fce3c6746e3 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 2 Feb 2016 16:57:49 -0800 Subject: [PATCH 14/17] mm/vmpressure.c: fix subtree pressure detection When vmpressure is called for the entire subtree under pressure we mistakenly use vmpressure->scanned instead of vmpressure->tree_scanned when checking if vmpressure work is to be scheduled. This results in suppressing all vmpressure events in the legacy cgroup hierarchy. Fix it. Fixes: 8e8ae645249b ("mm: memcontrol: hook up vmpressure to socket pressure") Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmpressure.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 9a6c0704211c85..149fdf6c5c56f9 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -248,9 +248,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, if (tree) { spin_lock(&vmpr->sr_lock); - vmpr->tree_scanned += scanned; + scanned = vmpr->tree_scanned += scanned; vmpr->tree_reclaimed += reclaimed; - scanned = vmpr->scanned; spin_unlock(&vmpr->sr_lock); if (scanned < vmpressure_win) From 46437f9a554fbe3e110580ca08ab703b59f2f95a Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 2 Feb 2016 16:57:52 -0800 Subject: [PATCH 15/17] radix-tree: fix race in gang lookup If the indirect_ptr bit is set on a slot, that indicates we need to redo the lookup. Introduce a new function radix_tree_iter_retry() which forces the loop to retry the lookup by setting 'slot' to NULL and turning the iterator back to point at the problematic entry. This is a pretty rare problem to hit at the moment; the lookup has to race with a grow of the radix tree from a height of 0. The consequences of hitting this race are that gang lookup could return a pointer to a radix_tree_node instead of a pointer to whatever the user had inserted in the tree. Fixes: cebbd29e1c2f ("radix-tree: rewrite gang lookup using iterator") Signed-off-by: Matthew Wilcox Cc: Hugh Dickins Cc: Ohad Ben-Cohen Cc: Konstantin Khlebnikov Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 16 ++++++++++++++++ lib/radix-tree.c | 12 ++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 7c88ad156a293c..00b17c526c1f4e 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -378,6 +378,22 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) void **radix_tree_next_chunk(struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags); +/** + * radix_tree_iter_retry - retry this chunk of the iteration + * @iter: iterator state + * + * If we iterate over a tree protected only by the RCU lock, a race + * against deletion or creation may result in seeing a slot for which + * radix_tree_deref_retry() returns true. If so, call this function + * and continue the iteration. + */ +static inline __must_check +void **radix_tree_iter_retry(struct radix_tree_iter *iter) +{ + iter->next_index = iter->index; + return NULL; +} + /** * radix_tree_chunk_size - get current chunk size * diff --git a/lib/radix-tree.c b/lib/radix-tree.c index fcf5d98574ce46..6b79e9026e2489 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, return 0; radix_tree_for_each_slot(slot, root, &iter, first_index) { - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); + results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; + if (radix_tree_is_indirect_ptr(results[ret])) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (++ret == max_items) break; } @@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); + results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; + if (radix_tree_is_indirect_ptr(results[ret])) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (++ret == max_items) break; } From c6400ba7e13a41539342f1b6e1f9e78419cb0148 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 2 Feb 2016 16:57:55 -0800 Subject: [PATCH 16/17] drivers/hwspinlock: fix race between radix tree insertion and lookup of_hwspin_lock_get_id() is protected by the RCU lock, which means that insertions can occur simultaneously with the lookup. If the radix tree transitions from a height of 0, we can see a slot with the indirect_ptr bit set, which will cause us to at least read random memory, and could cause other havoc. Fix this by using the newly introduced radix_tree_iter_retry(). Signed-off-by: Matthew Wilcox Cc: Hugh Dickins Cc: Ohad Ben-Cohen Cc: Konstantin Khlebnikov Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hwspinlock/hwspinlock_core.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 52f708bcf77f39..d50c701b19d678 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index) hwlock = radix_tree_deref_slot(slot); if (unlikely(!hwlock)) continue; + if (radix_tree_is_indirect_ptr(hwlock)) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (hwlock->bank->dev->of_node == args.np) { ret = 0; From 12c9d70bd5056b3ae84746fca973c286f48384cc Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 2 Feb 2016 16:57:57 -0800 Subject: [PATCH 17/17] mm: fix memory leak in copy_huge_pmd() We allocate a pgtable but do not attach it to anything if the PMD is in a DAX VMA, causing it to leak. We certainly try to not free pgtables associated with the huge zero page if the zero page is in a DAX VMA, so I think this is the right solution. This needs to be properly audited. Signed-off-by: Matthew Wilcox Cc: Dan Williams Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c1411961167e12..36c070167b7109 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -858,7 +858,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, return false; entry = mk_pmd(zero_page, vma->vm_page_prot); entry = pmd_mkhuge(entry); - pgtable_trans_huge_deposit(mm, pmd, pgtable); + if (pgtable) + pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, haddr, pmd, entry); atomic_long_inc(&mm->nr_ptes); return true; @@ -1036,13 +1037,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, spinlock_t *dst_ptl, *src_ptl; struct page *src_page; pmd_t pmd; - pgtable_t pgtable; + pgtable_t pgtable = NULL; int ret; - ret = -ENOMEM; - pgtable = pte_alloc_one(dst_mm, addr); - if (unlikely(!pgtable)) - goto out; + if (!vma_is_dax(vma)) { + ret = -ENOMEM; + pgtable = pte_alloc_one(dst_mm, addr); + if (unlikely(!pgtable)) + goto out; + } dst_ptl = pmd_lock(dst_mm, dst_pmd); src_ptl = pmd_lockptr(src_mm, src_pmd); @@ -1073,7 +1076,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, goto out_unlock; } - if (pmd_trans_huge(pmd)) { + if (!vma_is_dax(vma)) { /* thp accounting separate from pmd_devmap accounting */ src_page = pmd_page(pmd); VM_BUG_ON_PAGE(!PageHead(src_page), src_page);