Skip to content

Commit

Permalink
dm vdo memory-alloc: change from uds_ to vdo_ namespace
Browse files Browse the repository at this point in the history
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Matthew Sakai <msakai@redhat.com>
  • Loading branch information
Mike Snitzer committed Mar 4, 2024
1 parent 6008d52 commit 0eea6b6
Show file tree
Hide file tree
Showing 44 changed files with 453 additions and 455 deletions.
2 changes: 1 addition & 1 deletion drivers/md/dm-vdo/action-manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ int vdo_make_action_manager(zone_count_t zones,
struct action_manager **manager_ptr)
{
struct action_manager *manager;
int result = uds_allocate(1, struct action_manager, __func__, &manager);
int result = vdo_allocate(1, struct action_manager, __func__, &manager);

if (result != VDO_SUCCESS)
return result;
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-vdo/admin-state.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ bool vdo_finish_operation(struct admin_state *state, int result)
if (!state->starting) {
vdo_set_admin_state_code(state, state->next_state);
if (state->waiter != NULL)
vdo_launch_completion(uds_forget(state->waiter));
vdo_launch_completion(vdo_forget(state->waiter));
}

return true;
Expand Down
60 changes: 30 additions & 30 deletions drivers/md/dm-vdo/block-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,12 +221,12 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
u64 size = cache->page_count * (u64) VDO_BLOCK_SIZE;
int result;

result = uds_allocate(cache->page_count, struct page_info, "page infos",
result = vdo_allocate(cache->page_count, struct page_info, "page infos",
&cache->infos);
if (result != UDS_SUCCESS)
return result;

result = uds_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages);
result = vdo_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages);
if (result != UDS_SUCCESS)
return result;

Expand Down Expand Up @@ -1341,7 +1341,7 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
}

/* Reset the page map by re-allocating it. */
vdo_int_map_free(uds_forget(cache->page_map));
vdo_int_map_free(vdo_forget(cache->page_map));
return vdo_int_map_create(cache->page_count, &cache->page_map);
}

Expand Down Expand Up @@ -2346,17 +2346,17 @@ static int make_segment(struct forest *old_forest, block_count_t new_pages,

forest->segments = index + 1;

result = uds_allocate(forest->segments, struct boundary,
result = vdo_allocate(forest->segments, struct boundary,
"forest boundary array", &forest->boundaries);
if (result != VDO_SUCCESS)
return result;

result = uds_allocate(forest->segments, struct tree_page *,
result = vdo_allocate(forest->segments, struct tree_page *,
"forest page pointers", &forest->pages);
if (result != VDO_SUCCESS)
return result;

result = uds_allocate(new_pages, struct tree_page,
result = vdo_allocate(new_pages, struct tree_page,
"new forest pages", &forest->pages[index]);
if (result != VDO_SUCCESS)
return result;
Expand All @@ -2382,7 +2382,7 @@ static int make_segment(struct forest *old_forest, block_count_t new_pages,
struct block_map_tree *tree = &(forest->trees[root]);
height_t height;

int result = uds_allocate(forest->segments,
int result = vdo_allocate(forest->segments,
struct block_map_tree_segment,
"tree root segments", &tree->segments);
if (result != VDO_SUCCESS)
Expand Down Expand Up @@ -2424,15 +2424,15 @@ static void deforest(struct forest *forest, size_t first_page_segment)
size_t segment;

for (segment = first_page_segment; segment < forest->segments; segment++)
uds_free(forest->pages[segment]);
uds_free(forest->pages);
vdo_free(forest->pages[segment]);
vdo_free(forest->pages);
}

for (root = 0; root < forest->map->root_count; root++)
uds_free(forest->trees[root].segments);
vdo_free(forest->trees[root].segments);

uds_free(forest->boundaries);
uds_free(forest);
vdo_free(forest->boundaries);
vdo_free(forest);
}

/**
Expand All @@ -2459,7 +2459,7 @@ static int make_forest(struct block_map *map, block_count_t entries)
return VDO_SUCCESS;
}

result = uds_allocate_extended(struct forest, map->root_count,
result = vdo_allocate_extended(struct forest, map->root_count,
struct block_map_tree, __func__,
&forest);
if (result != VDO_SUCCESS)
Expand All @@ -2485,7 +2485,7 @@ static void replace_forest(struct block_map *map)
if (map->next_forest != NULL) {
if (map->forest != NULL)
deforest(map->forest, map->forest->segments);
map->forest = uds_forget(map->next_forest);
map->forest = vdo_forget(map->next_forest);
}

map->entry_count = map->next_entry_count;
Expand All @@ -2501,11 +2501,11 @@ static void finish_cursor(struct cursor *cursor)
struct cursors *cursors = cursor->parent;
struct vdo_completion *completion = cursors->completion;

return_vio_to_pool(cursors->pool, uds_forget(cursor->vio));
return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
if (--cursors->active_roots > 0)
return;

uds_free(cursors);
vdo_free(cursors);

vdo_finish_completion(completion);
}
Expand Down Expand Up @@ -2681,7 +2681,7 @@ void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
struct cursors *cursors;
int result;

result = uds_allocate_extended(struct cursors, map->root_count,
result = vdo_allocate_extended(struct cursors, map->root_count,
struct cursor, __func__, &cursors);
if (result != VDO_SUCCESS) {
vdo_fail_completion(completion, result);
Expand Down Expand Up @@ -2729,7 +2729,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map,
zone->thread_id = vdo->thread_config.logical_threads[zone_number];
zone->block_map = map;

result = uds_allocate_extended(struct dirty_lists, maximum_age,
result = vdo_allocate_extended(struct dirty_lists, maximum_age,
dirty_era_t, __func__,
&zone->dirty_lists);
if (result != VDO_SUCCESS)
Expand Down Expand Up @@ -2822,19 +2822,19 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone)
{
struct vdo_page_cache *cache = &zone->page_cache;

uds_free(uds_forget(zone->dirty_lists));
free_vio_pool(uds_forget(zone->vio_pool));
vdo_int_map_free(uds_forget(zone->loading_pages));
vdo_free(vdo_forget(zone->dirty_lists));
free_vio_pool(vdo_forget(zone->vio_pool));
vdo_int_map_free(vdo_forget(zone->loading_pages));
if (cache->infos != NULL) {
struct page_info *info;

for (info = cache->infos; info < cache->infos + cache->page_count; info++)
free_vio(uds_forget(info->vio));
free_vio(vdo_forget(info->vio));
}

vdo_int_map_free(uds_forget(cache->page_map));
uds_free(uds_forget(cache->infos));
uds_free(uds_forget(cache->pages));
vdo_int_map_free(vdo_forget(cache->page_map));
vdo_free(vdo_forget(cache->infos));
vdo_free(vdo_forget(cache->pages));
}

void vdo_free_block_map(struct block_map *map)
Expand All @@ -2849,9 +2849,9 @@ void vdo_free_block_map(struct block_map *map)

vdo_abandon_block_map_growth(map);
if (map->forest != NULL)
deforest(uds_forget(map->forest), 0);
uds_free(uds_forget(map->action_manager));
uds_free(map);
deforest(vdo_forget(map->forest), 0);
vdo_free(vdo_forget(map->action_manager));
vdo_free(map);
}

/* @journal may be NULL. */
Expand All @@ -2871,7 +2871,7 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical
if (result != UDS_SUCCESS)
return result;

result = uds_allocate_extended(struct block_map,
result = vdo_allocate_extended(struct block_map,
vdo->thread_config.logical_zone_count,
struct block_map_zone, __func__, &map);
if (result != UDS_SUCCESS)
Expand Down Expand Up @@ -3053,7 +3053,7 @@ void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent)

void vdo_abandon_block_map_growth(struct block_map *map)
{
struct forest *forest = uds_forget(map->next_forest);
struct forest *forest = vdo_forget(map->next_forest);

if (forest != NULL)
deforest(forest, forest->segments - 1);
Expand Down
24 changes: 12 additions & 12 deletions drivers/md/dm-vdo/data-vio.c
Original file line number Diff line number Diff line change
Expand Up @@ -789,20 +789,20 @@ static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo)
int result;

BUILD_BUG_ON(VDO_BLOCK_SIZE > PAGE_SIZE);
result = uds_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data",
result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data",
&data_vio->vio.data);
if (result != VDO_SUCCESS)
return uds_log_error_strerror(result,
"data_vio data allocation failure");

result = uds_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block",
result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block",
&data_vio->compression.block);
if (result != VDO_SUCCESS) {
return uds_log_error_strerror(result,
"data_vio compressed block allocation failure");
}

result = uds_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch",
result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch",
&data_vio->scratch_block);
if (result != VDO_SUCCESS)
return uds_log_error_strerror(result,
Expand All @@ -825,10 +825,10 @@ static void destroy_data_vio(struct data_vio *data_vio)
if (data_vio == NULL)
return;

vdo_free_bio(uds_forget(data_vio->vio.bio));
uds_free(uds_forget(data_vio->vio.data));
uds_free(uds_forget(data_vio->compression.block));
uds_free(uds_forget(data_vio->scratch_block));
vdo_free_bio(vdo_forget(data_vio->vio.bio));
vdo_free(vdo_forget(data_vio->vio.data));
vdo_free(vdo_forget(data_vio->compression.block));
vdo_free(vdo_forget(data_vio->scratch_block));
}

/**
Expand All @@ -845,7 +845,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
struct data_vio_pool *pool;
data_vio_count_t i;

result = uds_allocate_extended(struct data_vio_pool, pool_size, struct data_vio,
result = vdo_allocate_extended(struct data_vio_pool, pool_size, struct data_vio,
__func__, &pool);
if (result != UDS_SUCCESS)
return result;
Expand All @@ -867,7 +867,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,

result = uds_make_funnel_queue(&pool->queue);
if (result != UDS_SUCCESS) {
free_data_vio_pool(uds_forget(pool));
free_data_vio_pool(vdo_forget(pool));
return result;
}

Expand Down Expand Up @@ -924,8 +924,8 @@ void free_data_vio_pool(struct data_vio_pool *pool)
destroy_data_vio(data_vio);
}

uds_free_funnel_queue(uds_forget(pool->queue));
uds_free(pool);
uds_free_funnel_queue(vdo_forget(pool->queue));
vdo_free(pool);
}

static bool acquire_permit(struct limiter *limiter)
Expand Down Expand Up @@ -1431,7 +1431,7 @@ void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset)
allocation->pbn = VDO_ZERO_BLOCK;

vdo_release_physical_zone_pbn_lock(allocation->zone, locked_pbn,
uds_forget(allocation->lock));
vdo_forget(allocation->lock));
}

/**
Expand Down
Loading

0 comments on commit 0eea6b6

Please sign in to comment.