Skip to content

Commit

Permalink
Merge tag '5.17-rc1-4.19' of https://kernel.googlesource.com/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/jaegeuk/f2fs-stable into HEAD

* tag '5.17-rc1-4.19' of https://kernel.googlesource.com/pub/scm/linux/kernel/git/jaegeuk/f2fs-stable:
  f2fs: do not allow partial truncation on pinned file
  f2fs: remove redunant invalidate compress pages
  f2fs: Simplify bool conversion
  f2fs: don't drop compressed page cache in .{invalidate,release}page
  f2fs: fix to reserve space for IO align feature
  f2fs: fix to check available space of CP area correctly in update_ckpt_flags()
  f2fs: support fault injection to f2fs_trylock_op()
  f2fs: clean up __find_inline_xattr() with __find_xattr()
  f2fs: fix to do sanity check on last xattr entry in __f2fs_setxattr()
  f2fs: do not bother checkpoint by f2fs_get_node_info
  f2fs: avoid down_write on nat_tree_lock during checkpoint
  f2fs: compress: fix potential deadlock of compress file
  f2fs: avoid EINVAL by SBI_NEED_FSCK when pinning a file
  f2fs: add gc_urgent_high_remaining sysfs node
  f2fs: fix to do sanity check in is_alive()
  f2fs: fix to avoid panic in is_alive() if metadata is inconsistent
  f2fs: fix to do sanity check on inode type during garbage collection
  f2fs: avoid duplicate call of mark_inode_dirty
  f2fs: fix remove page failed in invalidate compress pages
  f2fs: fix the f2fs_file_write_iter tracepoint
  f2fs: do not expose unwritten blocks to user by DIO
  f2fs: reduce indentation in f2fs_file_write_iter()
  f2fs: rework write preallocations
  f2fs: compress: reduce one page array alloc and free when write compressed page
  f2fs: show number of pending discard commands
  f2fs: check nr_pages for readahead

Change-Id: Ic70305109978c74a5ffe79aca610647a52f5c695
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>

Conflicts:
	fs/f2fs/gc.c
	fs/f2fs/xattr.c
  • Loading branch information
UtsavBalar1231 committed Jul 3, 2022
2 parents aca4f15 + 80a2667 commit b5e72d3
Show file tree
Hide file tree
Showing 18 changed files with 365 additions and 238 deletions.
12 changes: 12 additions & 0 deletions Documentation/ABI/testing/sysfs-fs-f2fs
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,11 @@ Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Set timeout to issue discard commands during umount.
Default: 5 secs

What: /sys/fs/f2fs/<disk>/pending_discard
Date: November 2021
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Shows the number of pending discard commands in the queue.

What: /sys/fs/f2fs/<disk>/max_victim_search
Date: January 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Expand Down Expand Up @@ -502,3 +507,10 @@ Description: With "mode=fragment:block" mount options, we can scatter block allo
f2fs will allocate 1..<max_fragment_chunk> blocks in a chunk and make a hole
in the length of 1..<max_fragment_hole> by turns. This value can be set
between 1..512 and the default value is 4.

What: /sys/fs/f2fs/<disk>/gc_urgent_high_remaining
Date: December 2021
Contact: "Daeho Jeong" <daehojeong@google.com>
Description: You can set the trial count limit for GC urgent high mode with this value.
If GC thread gets to the limit, the mode will turn back to GC normal mode.
By default, the value is zero, which means there is no limit like before.
1 change: 1 addition & 0 deletions Documentation/filesystems/f2fs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ fault_type=%d Support configuring fault injection type, should be
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
FAULT_DQUOT_INIT 0x000010000
FAULT_LOCK_OP 0x000020000
=================== ===========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
Expand Down
6 changes: 3 additions & 3 deletions fs/f2fs/checkpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);

err = f2fs_get_node_info(sbi, ino, &ni);
err = f2fs_get_node_info(sbi, ino, &ni, false);
if (err)
goto err_out;

Expand Down Expand Up @@ -1300,8 +1300,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long flags;

if (cpc->reason & CP_UMOUNT) {
if (le32_to_cpu(ckpt->cp_pack_total_block_count) >
sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) {
if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) {
clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
f2fs_notice(sbi, "Disable nat_bits due to no space");
} else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
Expand Down
84 changes: 33 additions & 51 deletions fs/f2fs/compress.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
cc->valid_nr_cpages = 0;
if (!reuse)
cc->cluster_idx = NULL_CLUSTER;
}
Expand Down Expand Up @@ -590,7 +591,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, new_nr_cpages;
struct page **new_cpages;
u32 chksum = 0;
int i, ret;

Expand All @@ -605,6 +605,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)

max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
cc->valid_nr_cpages = cc->nr_cpages;

cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
Expand Down Expand Up @@ -655,13 +656,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)

new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);

/* Now we're going to cut unnecessary tail pages */
new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
if (!new_cpages) {
ret = -ENOMEM;
goto out_vunmap_cbuf;
}

/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
(new_nr_cpages * PAGE_SIZE) -
Expand All @@ -671,20 +665,16 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
vm_unmap_ram(cc->rbuf, cc->cluster_size);

for (i = 0; i < cc->nr_cpages; i++) {
if (i < new_nr_cpages) {
new_cpages[i] = cc->cpages[i];
if (i < new_nr_cpages)
continue;
}
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}

if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);

page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = new_cpages;
cc->nr_cpages = new_nr_cpages;
cc->valid_nr_cpages = new_nr_cpages;

trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
Expand Down Expand Up @@ -1266,7 +1256,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;

err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
goto out_put_dnode;

Expand All @@ -1278,14 +1268,14 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;

cic->nr_rpages = cc->cluster_size;

for (i = 0; i < cc->nr_cpages; i++) {
for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index, cic);
fio.compressed_page = cc->cpages[i];
Expand Down Expand Up @@ -1331,7 +1321,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
fio.compr_blocks++;

if (i > cc->nr_cpages) {
if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
Expand All @@ -1356,8 +1346,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

if (fio.compr_blocks)
f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
add_compr_block_stat(inode, cc->nr_cpages);
f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
add_compr_block_stat(inode, cc->valid_nr_cpages);

set_inode_flag(cc->inode, FI_APPEND_WRITE);
if (cc->cluster_idx == 0)
Expand Down Expand Up @@ -1395,9 +1385,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
else
f2fs_unlock_op(sbi);
out_free:
for (i = 0; i < cc->nr_cpages; i++) {
if (!cc->cpages[i])
continue;
for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
Expand Down Expand Up @@ -1439,25 +1427,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
int _submitted, compr_blocks, ret;
int i = -1, err = 0;
int _submitted, compr_blocks, ret, i;

compr_blocks = f2fs_compressed_blocks(cc);
if (compr_blocks < 0) {
err = compr_blocks;
goto out_err;

for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;

redirty_page_for_writepage(wbc, cc->rpages[i]);
unlock_page(cc->rpages[i]);
}

if (compr_blocks < 0)
return compr_blocks;

for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
retry_write:
lock_page(cc->rpages[i]);

if (cc->rpages[i]->mapping != mapping) {
continue_unlock:
unlock_page(cc->rpages[i]);
continue;
}

BUG_ON(!PageLocked(cc->rpages[i]));
if (!PageDirty(cc->rpages[i]))
goto continue_unlock;

if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;

ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
NULL, NULL, wbc, io_type,
Expand All @@ -1472,26 +1473,15 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
if (IS_NOQUOTA(cc->inode)) {
err = 0;
goto out_err;
}
if (IS_NOQUOTA(cc->inode))
return 0;
ret = 0;
cond_resched();
congestion_wait(BLK_RW_ASYNC,
DEFAULT_IO_TIMEOUT);
lock_page(cc->rpages[i]);

if (!PageDirty(cc->rpages[i])) {
unlock_page(cc->rpages[i]);
continue;
}

clear_page_dirty_for_io(cc->rpages[i]);
goto retry_write;
}
err = ret;
goto out_err;
return ret;
}

*submitted += _submitted;
Expand All @@ -1500,14 +1490,6 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
f2fs_balance_fs(F2FS_M_SB(mapping), true);

return 0;
out_err:
for (++i; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
redirty_page_for_writepage(wbc, cc->rpages[i]);
unlock_page(cc->rpages[i]);
}
return err;
}

int f2fs_write_multi_pages(struct compress_ctx *cc,
Expand Down
Loading

0 comments on commit b5e72d3

Please sign in to comment.