Skip to content

Commit

Permalink
Merge branch 'for-next-next-v6.4-20230529' into for-next-20230529
Browse files Browse the repository at this point in the history
  • Loading branch information
kdave committed May 29, 2023
2 parents 044c7e5 + 0a7e1e5 commit bebbb1f
Show file tree
Hide file tree
Showing 60 changed files with 1,545 additions and 1,921 deletions.
44 changes: 39 additions & 5 deletions fs/btrfs/async-thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,16 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
return atomic_read(&wq->pending) > wq->thresh * 2;
}

static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
struct btrfs_fs_info *fs_info)
{
wq->fs_info = fs_info;
atomic_set(&wq->pending, 0);
INIT_LIST_HEAD(&wq->ordered_list);
spin_lock_init(&wq->list_lock);
spin_lock_init(&wq->thres_lock);
}

struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
const char *name, unsigned int flags,
int limit_active, int thresh)
Expand All @@ -80,9 +90,9 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
if (!ret)
return NULL;

ret->fs_info = fs_info;
btrfs_init_workqueue(ret, fs_info);

ret->limit_active = limit_active;
atomic_set(&ret->pending, 0);
if (thresh == 0)
thresh = DFT_THRESHOLD;
/* For low threshold, disabling threshold is a better choice */
Expand All @@ -106,9 +116,33 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
return NULL;
}

INIT_LIST_HEAD(&ret->ordered_list);
spin_lock_init(&ret->list_lock);
spin_lock_init(&ret->thres_lock);
trace_btrfs_workqueue_alloc(ret, name);
return ret;
}

struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags)
{
struct btrfs_workqueue *ret;

ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;

btrfs_init_workqueue(ret, fs_info);

/* Ordered workqueues don't allow @max_active adjustments. */
ret->limit_active = 1;
ret->current_active = 1;
ret->thresh = NO_THRESHOLD;

ret->normal_wq = alloc_ordered_workqueue("btrfs-%s", flags, name);
if (!ret->normal_wq) {
kfree(ret);
return NULL;
}

trace_btrfs_workqueue_alloc(ret, name);
return ret;
}
Expand Down
3 changes: 3 additions & 0 deletions fs/btrfs/async-thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
unsigned int flags,
int limit_active,
int thresh);
struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags);
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t ordered_func, btrfs_func_t ordered_free);
void btrfs_queue_work(struct btrfs_workqueue *wq,
Expand Down
32 changes: 11 additions & 21 deletions fs/btrfs/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -574,27 +574,20 @@ static void run_one_async_free(struct btrfs_work *work)

static bool should_async_write(struct btrfs_bio *bbio)
{
/*
* If the I/O is not issued by fsync and friends, (->sync_writers != 0),
* then try to defer the submission to a workqueue to parallelize the
* checksum calculation.
*/
if (atomic_read(&bbio->inode->sync_writers))
/* Submit synchronously if the checksum implementation is fast. */
if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags))
return false;

/*
* Submit metadata writes synchronously if the checksum implementation
* is fast, or we are on a zoned device that wants I/O to be submitted
* in order.
* Try to defer the submission to a workqueue to parallelize the
* checksum calculation unless the I/O is issued synchronously.
*/
if (bbio->bio.bi_opf & REQ_META) {
struct btrfs_fs_info *fs_info = bbio->fs_info;
if (op_is_sync(bbio->bio.bi_opf))
return false;

if (btrfs_is_zoned(fs_info))
return false;
if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
return false;
}
/* Zoned devices require I/O to be submitted in order. */
if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info))
return false;

return true;
}
Expand Down Expand Up @@ -622,10 +615,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,

btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
run_one_async_free);
if (op_is_sync(bbio->bio.bi_opf))
btrfs_queue_work(fs_info->hipri_workers, &async->work);
else
btrfs_queue_work(fs_info->workers, &async->work);
btrfs_queue_work(fs_info->workers, &async->work);
return true;
}

Expand All @@ -635,7 +625,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
struct btrfs_fs_info *fs_info = bbio->fs_info;
struct btrfs_bio *orig_bbio = bbio;
struct bio *bio = &bbio->bio;
u64 logical = bio->bi_iter.bi_sector << 9;
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 length = bio->bi_iter.bi_size;
u64 map_length = length;
bool use_append = btrfs_use_zone_append(bbio);
Expand Down
6 changes: 3 additions & 3 deletions fs/btrfs/block-group.c
Original file line number Diff line number Diff line change
Expand Up @@ -3521,9 +3521,9 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);

set_extent_dirty(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
set_extent_bit(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
EXTENT_DIRTY, NULL);
}

spin_lock(&trans->transaction->dirty_bgs_lock);
Expand Down
19 changes: 19 additions & 0 deletions fs/btrfs/block-rsv.c
Original file line number Diff line number Diff line change
Expand Up @@ -541,3 +541,22 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,

return ERR_PTR(ret);
}

int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
u64 needed_bytes;
int ret;

/* 1 for slack space, 1 for updating the inode */
needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
btrfs_calc_metadata_size(fs_info, 1);

spin_lock(&rsv->lock);
if (rsv->reserved < needed_bytes)
ret = -ENOSPC;
else
ret = 0;
spin_unlock(&rsv->lock);
return ret;
}
2 changes: 2 additions & 0 deletions fs/btrfs/block-rsv.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u32 blocksize);
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u32 blocksize)
Expand Down
19 changes: 0 additions & 19 deletions fs/btrfs/btrfs_inode.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,6 @@ struct btrfs_inode {

unsigned long runtime_flags;

/* Keep track of who's O_SYNC/fsyncing currently */
atomic_t sync_writers;

/* full 64 bit generation number, struct vfs_inode doesn't have a big
* enough field for this.
*/
Expand Down Expand Up @@ -407,22 +404,6 @@ static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode)
return true;
}

/*
* btrfs_inode_item stores flags in a u64, btrfs_inode stores them in two
* separate u32s. These two functions convert between the two representations.
*/
static inline u64 btrfs_inode_combine_flags(u32 flags, u32 ro_flags)
{
return (flags | ((u64)ro_flags << 32));
}

static inline void btrfs_inode_split_flags(u64 inode_item_flags,
u32 *flags, u32 *ro_flags)
{
*flags = (u32)inode_item_flags;
*ro_flags = (u32)(inode_item_flags >> 32);
}

/* Array of bytes with variable length, hexadecimal format 0x1234 */
#define CSUM_FMT "0x%*phN"
#define CSUM_FMT_VALUE(size, bytes) size, bytes
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/check-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -1565,7 +1565,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,

bio = bio_alloc(block_ctx->dev->bdev, num_pages - i,
REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio->bi_iter.bi_sector = dev_bytenr >> SECTOR_SHIFT;

for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
Expand Down
6 changes: 3 additions & 3 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
*/
if (!em || cur < em->start ||
(cur + fs_info->sectorsize > extent_map_end(em)) ||
(em->block_start >> 9) != orig_bio->bi_iter.bi_sector) {
(em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) {
free_extent_map(em);
unlock_extent(tree, cur, page_end, NULL);
unlock_page(page);
Expand Down Expand Up @@ -472,7 +472,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* After the compressed pages are read, we copy the bytes into the
* bio we were passed and then call the bio end_io calls
*/
void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num)
void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
Expand Down Expand Up @@ -538,7 +538,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num)
if (memstall)
psi_memstall_leave(&pflags);

btrfs_submit_bio(&cb->bbio, mirror_num);
btrfs_submit_bio(&cb->bbio, 0);
return;

out_free_compressed_pages:
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/compression.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
unsigned int nr_pages,
blk_opf_t write_flags,
bool writeback);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);

unsigned int btrfs_compress_str2level(unsigned int type, const char *str);

Expand Down
Loading

0 comments on commit bebbb1f

Please sign in to comment.