Skip to content

Commit 7e89540

Browse files
naotakdave
authored andcommitted
btrfs: factor out prepare_allocation() for extent allocation
This function finally factor out prepare_allocation() form find_free_extent(). This function is called before the allocation loop and a specific allocator function like prepare_allocation_clustered() should initialize their private information and can set proper hint_byte to indicate where to start the allocation with. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 45d8e03 commit 7e89540

File tree

1 file changed

+68
-42
lines changed

1 file changed

+68
-42
lines changed

fs/btrfs/extent-tree.c

Lines changed: 68 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -3853,6 +3853,71 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
38533853
return -ENOSPC;
38543854
}
38553855

3856+
static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
3857+
struct find_free_extent_ctl *ffe_ctl,
3858+
struct btrfs_space_info *space_info,
3859+
struct btrfs_key *ins)
3860+
{
3861+
/*
3862+
* If our free space is heavily fragmented we may not be able to make
3863+
* big contiguous allocations, so instead of doing the expensive search
3864+
* for free space, simply return ENOSPC with our max_extent_size so we
3865+
* can go ahead and search for a more manageable chunk.
3866+
*
3867+
* If our max_extent_size is large enough for our allocation simply
3868+
* disable clustering since we will likely not be able to find enough
3869+
* space to create a cluster and induce latency trying.
3870+
*/
3871+
if (space_info->max_extent_size) {
3872+
spin_lock(&space_info->lock);
3873+
if (space_info->max_extent_size &&
3874+
ffe_ctl->num_bytes > space_info->max_extent_size) {
3875+
ins->offset = space_info->max_extent_size;
3876+
spin_unlock(&space_info->lock);
3877+
return -ENOSPC;
3878+
} else if (space_info->max_extent_size) {
3879+
ffe_ctl->use_cluster = false;
3880+
}
3881+
spin_unlock(&space_info->lock);
3882+
}
3883+
3884+
ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
3885+
&ffe_ctl->empty_cluster);
3886+
if (ffe_ctl->last_ptr) {
3887+
struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3888+
3889+
spin_lock(&last_ptr->lock);
3890+
if (last_ptr->block_group)
3891+
ffe_ctl->hint_byte = last_ptr->window_start;
3892+
if (last_ptr->fragmented) {
3893+
/*
3894+
* We still set window_start so we can keep track of the
3895+
* last place we found an allocation to try and save
3896+
* some time.
3897+
*/
3898+
ffe_ctl->hint_byte = last_ptr->window_start;
3899+
ffe_ctl->use_cluster = false;
3900+
}
3901+
spin_unlock(&last_ptr->lock);
3902+
}
3903+
3904+
return 0;
3905+
}
3906+
3907+
static int prepare_allocation(struct btrfs_fs_info *fs_info,
3908+
struct find_free_extent_ctl *ffe_ctl,
3909+
struct btrfs_space_info *space_info,
3910+
struct btrfs_key *ins)
3911+
{
3912+
switch (ffe_ctl->policy) {
3913+
case BTRFS_EXTENT_ALLOC_CLUSTERED:
3914+
return prepare_allocation_clustered(fs_info, ffe_ctl,
3915+
space_info, ins);
3916+
default:
3917+
BUG();
3918+
}
3919+
}
3920+
38563921
/*
38573922
* walks the btree of allocated extents and find a hole of a given size.
38583923
* The key ins is changed to record the hole:
@@ -3922,48 +3987,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
39223987
return -ENOSPC;
39233988
}
39243989

3925-
/*
3926-
* If our free space is heavily fragmented we may not be able to make
3927-
* big contiguous allocations, so instead of doing the expensive search
3928-
* for free space, simply return ENOSPC with our max_extent_size so we
3929-
* can go ahead and search for a more manageable chunk.
3930-
*
3931-
* If our max_extent_size is large enough for our allocation simply
3932-
* disable clustering since we will likely not be able to find enough
3933-
* space to create a cluster and induce latency trying.
3934-
*/
3935-
if (unlikely(space_info->max_extent_size)) {
3936-
spin_lock(&space_info->lock);
3937-
if (space_info->max_extent_size &&
3938-
num_bytes > space_info->max_extent_size) {
3939-
ins->offset = space_info->max_extent_size;
3940-
spin_unlock(&space_info->lock);
3941-
return -ENOSPC;
3942-
} else if (space_info->max_extent_size) {
3943-
ffe_ctl.use_cluster = false;
3944-
}
3945-
spin_unlock(&space_info->lock);
3946-
}
3947-
3948-
ffe_ctl.last_ptr = fetch_cluster_info(fs_info, space_info,
3949-
&ffe_ctl.empty_cluster);
3950-
if (ffe_ctl.last_ptr) {
3951-
struct btrfs_free_cluster *last_ptr = ffe_ctl.last_ptr;
3952-
3953-
spin_lock(&last_ptr->lock);
3954-
if (last_ptr->block_group)
3955-
ffe_ctl.hint_byte = last_ptr->window_start;
3956-
if (last_ptr->fragmented) {
3957-
/*
3958-
* We still set window_start so we can keep track of the
3959-
* last place we found an allocation to try and save
3960-
* some time.
3961-
*/
3962-
ffe_ctl.hint_byte = last_ptr->window_start;
3963-
ffe_ctl.use_cluster = false;
3964-
}
3965-
spin_unlock(&last_ptr->lock);
3966-
}
3990+
ret = prepare_allocation(fs_info, &ffe_ctl, space_info, ins);
3991+
if (ret < 0)
3992+
return ret;
39673993

39683994
ffe_ctl.search_start = max(ffe_ctl.search_start,
39693995
first_logical_byte(fs_info, 0));

0 commit comments

Comments
 (0)