Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
230 changes: 154 additions & 76 deletions lfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -593,45 +593,52 @@ static int lfs_rawunmount(lfs_t *lfs);


/// Block allocator ///

// allocations should call this when all allocated blocks are committed to
// the filesystem
//
// after a checkpoint, the block allocator may realloc any untracked blocks
static void lfs_alloc_ckpoint(lfs_t *lfs) {
lfs->lookahead.ckpoint = lfs->block_count;
}

// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
lfs->lookahead.size = 0;
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);
}

#ifndef LFS_READONLY
static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
lfs_t *lfs = (lfs_t*)p;
lfs_block_t off = ((block - lfs->free.off)
lfs_block_t off = ((block - lfs->lookahead.start)
+ lfs->block_count) % lfs->block_count;

if (off < lfs->free.size) {
lfs->free.buffer[off / 32] |= 1U << (off % 32);
if (off < lfs->lookahead.size) {
lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
}

return 0;
}
#endif

// indicate allocated blocks have been committed into the filesystem, this
// is to prevent blocks from being garbage collected in the middle of a
// commit operation
static void lfs_alloc_ack(lfs_t *lfs) {
lfs->free.ack = lfs->block_count;
}

// drop the lookahead buffer, this is done during mounting and failed
// traversals in order to avoid invalid lookahead state
static void lfs_alloc_drop(lfs_t *lfs) {
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
}

#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
// Move free offset at the first unused block (lfs->free.i)
// lfs->free.i is equal lfs->free.size when all blocks are used
lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
lfs->free.i = 0;
static int lfs_alloc_scan(lfs_t *lfs) {
// move lookahead buffer to the first unused block
//
// note we limit the lookahead buffer to at most the amount of blocks
// checkpointed, this prevents the math in lfs_alloc from underflowing
lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;
lfs->lookahead.next = 0;
lfs->lookahead.size = lfs_min(
8*lfs->cfg->lookahead_size,
lfs->lookahead.ckpoint);

// find mask of free blocks from tree
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
if (err) {
lfs_alloc_drop(lfs);
Expand All @@ -645,36 +652,49 @@ static int lfs_fs_rawgc(lfs_t *lfs) {
#ifndef LFS_READONLY
static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
while (true) {
while (lfs->free.i != lfs->free.size) {
lfs_block_t off = lfs->free.i;
lfs->free.i += 1;
lfs->free.ack -= 1;

if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
// scan our lookahead buffer for free blocks
while (lfs->lookahead.next < lfs->lookahead.size) {
if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
// found a free block
*block = (lfs->free.off + off) % lfs->block_count;

// eagerly find next off so an alloc ack can
// discredit old lookahead blocks
while (lfs->free.i != lfs->free.size &&
(lfs->free.buffer[lfs->free.i / 32]
& (1U << (lfs->free.i % 32)))) {
lfs->free.i += 1;
lfs->free.ack -= 1;
*block = (lfs->lookahead.start + lfs->lookahead.next)
% lfs->block_count;

// eagerly find next free block to maximize how many blocks
// lfs_alloc_ckpoint makes available for scanning
while (true) {
lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;

if (lfs->lookahead.next >= lfs->lookahead.size
|| !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
& (1U << (lfs->lookahead.next % 8)))) {
return 0;
}
}

return 0;
}

lfs->lookahead.next += 1;
lfs->lookahead.ckpoint -= 1;
}

// check if we have looked at all blocks since last ack
if (lfs->free.ack == 0) {
LFS_ERROR("No more free space %"PRIu32,
lfs->free.i + lfs->free.off);
// In order to keep our block allocator from spinning forever when our
// filesystem is full, we mark points where there are no in-flight
// allocations with a checkpoint before starting a set of allocations.
//
// If we've looked at all blocks since the last checkpoint, we report
// the filesystem as out of storage.
//
if (lfs->lookahead.ckpoint <= 0) {
LFS_ERROR("No more free space 0x%"PRIx32,
(lfs->lookahead.start + lfs->lookahead.next)
% lfs->cfg->block_count);
return LFS_ERR_NOSPC;
}

int err = lfs_fs_rawgc(lfs);
// No blocks in our lookahead buffer, we need to scan the filesystem for
// unused blocks in the next lookahead window.
int err = lfs_alloc_scan(lfs);
if(err) {
return err;
}
Expand Down Expand Up @@ -2586,7 +2606,7 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
}

// build up new directory
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
lfs_mdir_t dir;
err = lfs_dir_alloc(lfs, &dir);
if (err) {
Expand Down Expand Up @@ -3272,7 +3292,7 @@ static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
#ifndef LFS_READONLY
static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
file->off = file->pos;
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
int err = lfs_file_relocate(lfs, file);
if (err) {
return err;
Expand Down Expand Up @@ -3535,7 +3555,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
}

// extend file with new blocks
lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
file->block, file->pos,
&file->block, &file->off);
Expand Down Expand Up @@ -3578,7 +3598,7 @@ static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
data += diff;
nsize -= diff;

lfs_alloc_ack(lfs);
lfs_alloc_ckpoint(lfs);
}

return size;
Expand Down Expand Up @@ -4153,6 +4173,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
// wear-leveling.
LFS_ASSERT(lfs->cfg->block_cycles != 0);

// check that compact_thresh makes sense
//
// metadata can't be compacted below block_size/2, and metadata can't
// exceed a block_size
LFS_ASSERT(lfs->cfg->compact_thresh == 0
|| lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
|| lfs->cfg->compact_thresh <= lfs->cfg->block_size);

// setup read cache
if (lfs->cfg->read_buffer) {
Expand Down Expand Up @@ -4180,15 +4208,14 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
lfs_cache_zero(lfs, &lfs->rcache);
lfs_cache_zero(lfs, &lfs->pcache);

// setup lookahead, must be multiple of 64-bits, 32-bit aligned
// setup lookahead buffer, note mount finishes initializing this after
// we establish a decent pseudo-random seed
LFS_ASSERT(lfs->cfg->lookahead_size > 0);
LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
(uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
if (lfs->cfg->lookahead_buffer) {
lfs->free.buffer = lfs->cfg->lookahead_buffer;
lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
} else {
lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->free.buffer) {
lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
if (!lfs->lookahead.buffer) {
err = LFS_ERR_NOMEM;
goto cleanup;
}
Expand Down Expand Up @@ -4245,7 +4272,7 @@ static int lfs_deinit(lfs_t *lfs) {
}

if (!lfs->cfg->lookahead_buffer) {
lfs_free(lfs->free.buffer);
lfs_free(lfs->lookahead.buffer);
}

return 0;
Expand All @@ -4265,12 +4292,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
LFS_ASSERT(cfg->block_count != 0);

// create free lookahead
memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
lfs->free.off = 0;
lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
lfs->lookahead.start = 0;
lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
lfs->block_count);
lfs->free.i = 0;
lfs_alloc_ack(lfs);
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);

// create root dir
lfs_mdir_t root;
Expand Down Expand Up @@ -4478,7 +4505,7 @@ static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {

// setup free lookahead, to distribute allocations uniformly across
// boots, we start the allocator at a random location
lfs->free.off = lfs->seed % lfs->block_count;
lfs->lookahead.start = lfs->seed % lfs->block_count;
lfs_alloc_drop(lfs);

return 0;
Expand Down Expand Up @@ -5045,6 +5072,57 @@ static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
return size;
}

// explicit garbage collection
#ifndef LFS_READONLY
static int lfs_fs_rawgc(lfs_t *lfs) {
// force consistency, even if we're not necessarily going to write,
// because this function is supposed to take care of janitorial work
// isn't it?
int err = lfs_fs_forceconsistency(lfs);
if (err) {
return err;
}

// try to compact metadata pairs, note we can't really accomplish
// anything if compact_thresh doesn't at least leave a prog_size
// available
if (lfs->cfg->compact_thresh
< lfs->cfg->block_size - lfs->cfg->prog_size) {
// iterate over all mdirs
lfs_mdir_t mdir = {.tail = {0, 1}};
while (!lfs_pair_isnull(mdir.tail)) {
err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
if (err) {
return err;
}

// not erased? exceeds our compaction threshold?
if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
: mdir.off > lfs->cfg->compact_thresh)) {
// the easiest way to trigger a compaction is to mark
// the mdir as unerased and add an empty commit
mdir.erased = false;
err = lfs_dir_commit(lfs, &mdir, NULL, 0);
if (err) {
return err;
}
}
}
}

// try to populate the lookahead buffer, unless it's already full
if (lfs->lookahead.size < 8*lfs->cfg->lookahead_size) {
err = lfs_alloc_scan(lfs);
if (err) {
return err;
}
}

return 0;
}
#endif

#ifndef LFS_READONLY
static int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
// shrinking is not supported
Expand Down Expand Up @@ -5451,10 +5529,10 @@ static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
lfs->lfs1->root[1] = LFS_BLOCK_NULL;

// setup free lookahead
lfs->free.off = 0;
lfs->free.size = 0;
lfs->free.i = 0;
lfs_alloc_ack(lfs);
lfs->lookahead.start = 0;
lfs->lookahead.size = 0;
lfs->lookahead.next = 0;
lfs_alloc_ckpoint(lfs);

// load superblock
lfs1_dir_t dir;
Expand Down Expand Up @@ -6251,32 +6329,32 @@ int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
}

#ifndef LFS_READONLY
int lfs_fs_gc(lfs_t *lfs) {
int lfs_fs_mkconsistent(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);

err = lfs_fs_rawgc(lfs);
err = lfs_fs_rawmkconsistent(lfs);

LFS_TRACE("lfs_fs_gc -> %d", err);
LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
#endif

#ifndef LFS_READONLY
int lfs_fs_mkconsistent(lfs_t *lfs) {
int lfs_fs_gc(lfs_t *lfs) {
int err = LFS_LOCK(lfs->cfg);
if (err) {
return err;
}
LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);

err = lfs_fs_rawmkconsistent(lfs);
err = lfs_fs_rawgc(lfs);

LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
LFS_TRACE("lfs_fs_gc -> %d", err);
LFS_UNLOCK(lfs->cfg);
return err;
}
Expand Down
Loading