Skip to content

Commit

Permalink
kasan: revert eviction of stack traces in generic mode
Browse files Browse the repository at this point in the history
This partially reverts commits cc478e0, 63b85ac, 08d7c94,
a414d42, and 773688a to make use of variable-sized stack depot
records, since eviction of stack entries from stack depot forces fixed-
sized stack records.  Care was taken to retain the code cleanups by the
above commits.

Eviction was added to generic KASAN as a response to alleviating the
additional memory usage from fixed-sized stack records, but this still
uses more memory than previously.

With the re-introduction of variable-sized records for stack depot, we can
just switch back to non-evictable stack records again, and return back to
the previous performance and memory usage baseline.

Before (observed after a KASAN kernel boot):

  pools: 597
  refcounted_allocations: 17547
  refcounted_frees: 6477
  refcounted_in_use: 11070
  freelist_size: 3497
  persistent_count: 12163
  persistent_bytes: 1717008

After:

  pools: 319
  refcounted_allocations: 0
  refcounted_frees: 0
  refcounted_in_use: 0
  freelist_size: 0
  persistent_count: 29397
  persistent_bytes: 5183536

As can be seen from the counters, with a generic KASAN config, refcounted
allocations and evictions are no longer used.  Due to using variable-sized
records, I observe a reduction of 278 stack depot pools (saving 4448 KiB)
with my test setup.

Link: https://lkml.kernel.org/r/20240129100708.39460-2-elver@google.com
Fixes: cc478e0 ("kasan: avoid resetting aux_lock")
Fixes: 63b85ac ("kasan: stop leaking stack trace handles")
Fixes: 08d7c94 ("kasan: memset free track in qlink_free")
Fixes: a414d42 ("kasan: handle concurrent kasan_record_aux_stack calls")
Fixes: 773688a ("kasan: use stack_depot_put for Generic mode")
Signed-off-by: Marco Elver <elver@google.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
melver authored and akpm00 committed Feb 21, 2024
1 parent 120513e commit 0a9e072
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 77 deletions.
8 changes: 3 additions & 5 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
{
depot_stack_handle_t stack;

stack = kasan_save_stack(flags,
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
kasan_set_track(track, stack);
}

Expand Down Expand Up @@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true;

/*
* If the object is not put into quarantine, it will likely be quickly
* reallocated. Thus, release its metadata now.
* Note: Keep per-object metadata to allow KASAN print stack traces for
* use-after-free-before-realloc bugs.
*/
kasan_release_object_meta(cache, object);

/* Let slab put the object onto the freelist. */
return false;
Expand Down
68 changes: 7 additions & 61 deletions mm/kasan/generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
if (alloc_meta) {
/* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta));

/*
* Prepare the lock for saving auxiliary stack traces.
* Temporarily disable KASAN bug reporting to allow instrumented
* raw_spin_lock_init to access aux_lock, which resides inside
* of a redzone.
*/
kasan_disable_current();
raw_spin_lock_init(&alloc_meta->aux_lock);
kasan_enable_current();
}

/*
Expand All @@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)

static void release_alloc_meta(struct kasan_alloc_meta *meta)
{
/* Evict the stack traces from stack depot. */
stack_depot_put(meta->alloc_track.stack);
stack_depot_put(meta->aux_stack[0]);
stack_depot_put(meta->aux_stack[1]);

/*
* Zero out alloc meta to mark it as invalid but keep aux_lock
* initialized to avoid having to reinitialize it when another object
* is allocated in the same slot.
*/
__memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
__memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
/* Zero out alloc meta to mark it as invalid. */
__memset(meta, 0, sizeof(*meta));
}

static void release_free_meta(const void *object, struct kasan_free_meta *meta)
Expand All @@ -529,27 +509,10 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta)
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;

/* Evict the stack trace from the stack depot. */
stack_depot_put(meta->free_track.stack);

/* Mark free meta as invalid. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
}

void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_meta;
struct kasan_free_meta *free_meta;

alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
release_alloc_meta(alloc_meta);

free_meta = kasan_get_free_meta(cache, object);
if (free_meta)
release_free_meta(object, free_meta);
}

size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
{
struct kasan_cache *info = &cache->kasan_info;
Expand All @@ -574,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
depot_stack_handle_t new_handle, old_handle;
unsigned long flags;

if (is_kfence_address(addr) || !slab)
return;
Expand All @@ -586,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta)
return;

new_handle = kasan_save_stack(0, depot_flags);

/*
* Temporarily disable KASAN bug reporting to allow instrumented
* spinlock functions to access aux_lock, which resides inside of a
* redzone.
*/
kasan_disable_current();
raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = new_handle;
raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
kasan_enable_current();

stack_depot_put(old_handle);
alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
}

void kasan_record_aux_stack(void *addr)
{
return __kasan_record_aux_stack(addr,
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
}

void kasan_record_aux_stack_noalloc(void *addr)
{
return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
return __kasan_record_aux_stack(addr, 0);
}

void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
Expand All @@ -623,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
if (!alloc_meta)
return;

/* Evict previous stack traces (might exist for krealloc or mempool). */
/* Invalidate previous stack traces (might exist for krealloc or mempool). */
release_alloc_meta(alloc_meta);

kasan_save_track(&alloc_meta->alloc_track, flags);
Expand All @@ -637,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta)
return;

/* Evict previous stack trace (might exist for mempool). */
/* Invalidate previous stack trace (might exist for mempool). */
release_free_meta(object, free_meta);

kasan_save_track(&free_meta->free_track, 0);
Expand Down
10 changes: 0 additions & 10 deletions mm/kasan/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
#include <linux/spinlock.h>
#include <linux/stackdepot.h>

#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
Expand Down Expand Up @@ -265,13 +264,6 @@ struct kasan_global {
struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
/*
* aux_lock protects aux_stack from accesses from concurrent
* kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
* on RT kernels, as kasan_record_aux_stack_noalloc can be called from
* non-sleepable contexts.
*/
raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2];
};

Expand Down Expand Up @@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
#else
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
#endif

depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
Expand Down
5 changes: 4 additions & 1 deletion mm/kasan/quarantine.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
void *object = qlink_to_object(qlink, cache);
struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);

kasan_release_object_meta(cache, object);
/*
* Note: Keep per-object metadata to allow KASAN print stack traces for
* use-after-free-before-realloc bugs.
*/

/*
* If init_on_free is enabled and KASAN's free metadata is stored in
Expand Down

0 comments on commit 0a9e072

Please sign in to comment.