Skip to content

Commit

Permalink
btrfs: keep private struct on stack for sync reads in btrfs_encoded_r…
Browse files Browse the repository at this point in the history
…ead_regular_fill_pages()

Only allocate the btrfs_encoded_read_private structure for asynchronous
(io_uring) mode.

There's no need to allocate an object from slab in the synchronous mode. In
such a case stack can be happily used as it used to be before 68d3b27
("btrfs: move priv off stack in btrfs_encoded_read_regular_fill_pages()")
which was a preparation for the async mode.

While at it, fix the comment to reflect the atomic => refcount change in
d296626 ("btrfs: fix use-after-free waiting for encoded read endios").

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Daniel Vacek <neelx@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
Daniel Vacek authored and kdave committed Jan 23, 2025
1 parent e909407 commit 0338954
Showing 1 changed file with 23 additions and 16 deletions.
39 changes: 23 additions & 16 deletions fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -9141,7 +9141,7 @@ static ssize_t btrfs_encoded_read_inline(
}

struct btrfs_encoded_read_private {
struct completion done;
struct completion *sync_reads;
void *uring_ctx;
refcount_t pending_refs;
blk_status_t status;
Expand All @@ -9153,11 +9153,10 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)

if (bbio->bio.bi_status) {
/*
* The memory barrier implied by the atomic_dec_return() here
* pairs with the memory barrier implied by the
* atomic_dec_return() or io_wait_event() in
* btrfs_encoded_read_regular_fill_pages() to ensure that this
* write is observed before the load of status in
* The memory barrier implied by the refcount_dec_and_test() here
* pairs with the memory barrier implied by the refcount_dec_and_test()
* in btrfs_encoded_read_regular_fill_pages() to ensure that
* this write is observed before the load of status in
* btrfs_encoded_read_regular_fill_pages().
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
Expand All @@ -9169,7 +9168,7 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
btrfs_uring_read_extent_endio(priv->uring_ctx, err);
kfree(priv);
} else {
complete(&priv->done);
complete(priv->sync_reads);
}
}
bio_put(&bbio->bio);
Expand All @@ -9180,16 +9179,26 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
struct page **pages, void *uring_ctx)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_encoded_read_private *priv;
struct btrfs_encoded_read_private *priv, sync_priv;
struct completion sync_reads;
unsigned long i = 0;
struct btrfs_bio *bbio;
int ret;

priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
if (!priv)
return -ENOMEM;
/*
* Fast path for synchronous reads which completes in this call, io_uring
* needs longer time span.
*/
if (uring_ctx) {
priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
if (!priv)
return -ENOMEM;
} else {
priv = &sync_priv;
init_completion(&sync_reads);
priv->sync_reads = &sync_reads;
}

init_completion(&priv->done);
refcount_set(&priv->pending_refs, 1);
priv->status = 0;
priv->uring_ctx = uring_ctx;
Expand Down Expand Up @@ -9232,11 +9241,9 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
return -EIOCBQUEUED;
} else {
if (!refcount_dec_and_test(&priv->pending_refs))
wait_for_completion_io(&priv->done);
wait_for_completion_io(&sync_reads);
/* See btrfs_encoded_read_endio() for ordering. */
ret = blk_status_to_errno(READ_ONCE(priv->status));
kfree(priv);
return ret;
return blk_status_to_errno(READ_ONCE(priv->status));
}
}

Expand Down

0 comments on commit 0338954

Please sign in to comment.