Skip to content

Commit

Permalink
x86/kasan: Map shadow for percpu pages on demand
Browse files Browse the repository at this point in the history
KASAN maps shadow for the entire CPU-entry-area:
  [CPU_ENTRY_AREA_BASE, CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE]

This will explode once the per-cpu entry areas are randomized since it
will increase CPU_ENTRY_AREA_MAP_SIZE to 512 GB and KASAN fails to
allocate shadow for such big area.

Fix this by allocating KASAN shadow only for really used cpu entry area
addresses mapped by cea_map_percpu_pages()

Thanks to the 0day folks for finding and reporting this to be an issue.

[ dhansen: tweak changelog since this will get committed before peterz's
	   actual cpu-entry-area randomization ]

Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Tested-by: Yujie Liu <yujie.liu@intel.com>
Cc: kernel test robot <yujie.liu@intel.com>
Link: https://lore.kernel.org/r/202210241508.2e203c3d-yujie.liu@intel.com
  • Loading branch information
aryabinin authored and hansendc committed Oct 27, 2022
1 parent 247f34f commit 9fd429c
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 4 deletions.
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,12 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
int nid) { }
#endif

#endif
Expand Down
8 changes: 7 additions & 1 deletion arch/x86/mm/cpu_entry_area.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/kasan.h>

static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);

Expand Down Expand Up @@ -53,8 +54,13 @@ void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
static void __init
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
{
phys_addr_t pa = per_cpu_ptr_to_phys(ptr);

kasan_populate_shadow_for_vaddr(cea_vaddr, pages * PAGE_SIZE,
early_pfn_to_nid(PFN_DOWN(pa)));

for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
cea_set_pte(cea_vaddr, pa, prot);
}

static void __init percpu_setup_debug_store(unsigned int cpu)
Expand Down
15 changes: 12 additions & 3 deletions arch/x86/mm/kasan_init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,18 @@ void __init kasan_early_init(void)
kasan_map_early_shadow(init_top_pgt);
}

void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
{
unsigned long shadow_start, shadow_end;

shadow_start = (unsigned long)kasan_mem_to_shadow(va);
shadow_start = round_down(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
shadow_end = round_up(shadow_end, PAGE_SIZE);

kasan_populate_shadow(shadow_start, shadow_end, nid);
}

void __init kasan_init(void)
{
int i;
Expand Down Expand Up @@ -393,9 +405,6 @@ void __init kasan_init(void)
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cpu_entry_begin);

kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);

kasan_populate_early_shadow(shadow_cpu_entry_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));

Expand Down

0 comments on commit 9fd429c

Please sign in to comment.