Skip to content

Commit

Permalink
Remove assertions from block allocator.
Browse files Browse the repository at this point in the history
It has been extremely well-tested at this point, and is a very hot code path so
the performance gain is measurable (~1-2% on most captures I tried).

Change-Id: I2f5e03d2f348f56e740bf0dfbc83a4fd9cc8c5a9
Reviewed-on: https://code.wireshark.org/review/499
Reviewed-by: Anders Broman <a.broman58@gmail.com>
  • Loading branch information
eapache authored and AndersBroman committed Mar 4, 2014
1 parent 3a4b311 commit 01467c2
Showing 1 changed file with 0 additions and 30 deletions.
30 changes: 0 additions & 30 deletions epan/wmem/wmem_allocator_block.c
Original file line number Diff line number Diff line change
Expand Up @@ -407,8 +407,6 @@ wmem_block_add_to_recycler(wmem_block_allocator_t *allocator,
{
wmem_block_free_t *free_chunk;

g_assert(! chunk->used);

if (WMEM_CHUNK_DATA_LEN(chunk) < sizeof(wmem_block_free_t)) {
return;
}
Expand Down Expand Up @@ -441,15 +439,10 @@ wmem_block_remove_from_recycler(wmem_block_allocator_t *allocator,
{
wmem_block_free_t *free_chunk;

g_assert (! chunk->used);

free_chunk = WMEM_GET_FREE(chunk);

g_assert(free_chunk->prev && free_chunk->next);

if (free_chunk->prev == chunk && free_chunk->next == chunk) {
/* Only one item in recycler, just empty it. */
g_assert(allocator->recycler_head == chunk);
allocator->recycler_head = NULL;
}
else {
Expand Down Expand Up @@ -512,8 +505,6 @@ wmem_block_merge_free(wmem_block_allocator_t *allocator,
wmem_block_chunk_t *left_free = NULL;
wmem_block_chunk_t *right_free = NULL;

g_assert(!chunk->used);

/* Check the chunk to our right. If it is free, merge it into our current
* chunk. If it is big enough to hold a free-header, save it for later (we
* need to know about the left chunk before we decide what goes where). */
Expand Down Expand Up @@ -592,9 +583,6 @@ wmem_block_split_free_chunk(wmem_block_allocator_t *allocator,
size_t aligned_size, available;
gboolean last;

g_assert(!chunk->used);
g_assert(WMEM_CHUNK_DATA_LEN(chunk) >= size);

aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;

if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size + sizeof(wmem_block_free_t)) {
Expand Down Expand Up @@ -687,9 +675,6 @@ wmem_block_split_used_chunk(wmem_block_allocator_t *allocator,
size_t aligned_size, available;
gboolean last;

g_assert(chunk->used);
g_assert(WMEM_CHUNK_DATA_LEN(chunk) >= size);

aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;

if (aligned_size > WMEM_CHUNK_DATA_LEN(chunk)) {
Expand Down Expand Up @@ -905,20 +890,9 @@ wmem_block_alloc(void *private_data, const size_t size)
chunk = allocator->master_head;
}

/* if our chunk is used, something is wrong */
g_assert(! chunk->used);
/* if we still don't have the space at this point, something is wrong */
g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk));

/* Split our chunk into two to preserve any trailing free space */
wmem_block_split_free_chunk(allocator, chunk, size);

/* if our split reduced our size too much, something went wrong */
g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk));
/* the resulting chunk should not be in either free list */
g_assert(chunk != allocator->master_head);
g_assert(chunk != allocator->recycler_head);

/* Now cycle the recycler */
wmem_block_cycle_recycler(allocator);

Expand All @@ -942,8 +916,6 @@ wmem_block_free(void *private_data, void *ptr)
return;
}

g_assert(chunk->used);

/* mark it as unused */
chunk->used = FALSE;

Expand All @@ -967,8 +939,6 @@ wmem_block_realloc(void *private_data, void *ptr, const size_t size)
return wmem_block_realloc_jumbo(allocator, chunk, size);
}

g_assert(chunk->used);

if (size > WMEM_CHUNK_DATA_LEN(chunk)) {
/* grow */
wmem_block_chunk_t *tmp;
Expand Down

0 comments on commit 01467c2

Please sign in to comment.