Skip to content

Commit f783e35

Browse files
chaseyugregkh
authored andcommitted
f2fs: compress: fix deadloop in f2fs_write_cache_pages()
[ Upstream commit c5d3f9b ] With below mount option and testcase, it hangs kernel. 1. mount -t f2fs -o compress_log_size=5 /dev/vdb /mnt/f2fs 2. touch /mnt/f2fs/file 3. chattr +c /mnt/f2fs/file 4. dd if=/dev/zero of=/mnt/f2fs/file bs=1MB count=1 5. sync 6. dd if=/dev/zero of=/mnt/f2fs/file bs=111 count=11 conv=notrunc 7. sync INFO: task sync:4788 blocked for more than 120 seconds. Not tainted 6.5.0-rc1+ torvalds#322 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:sync state:D stack:0 pid:4788 ppid:509 flags:0x00000002 Call Trace: <TASK> __schedule+0x335/0xf80 schedule+0x6f/0xf0 wb_wait_for_completion+0x5e/0x90 sync_inodes_sb+0xd8/0x2a0 sync_inodes_one_sb+0x1d/0x30 iterate_supers+0x99/0xf0 ksys_sync+0x46/0xb0 __do_sys_sync+0x12/0x20 do_syscall_64+0x3f/0x90 entry_SYSCALL_64_after_hwframe+0x6e/0xd8 The reason is f2fs_all_cluster_page_ready() assumes that pages array should cover at least one cluster, otherwise, it will always return false, result in deadloop. By default, pages array size is 16, and it can cover the case cluster_size is equal or less than 16, for the case cluster_size is larger than 16, let's allocate memory of pages array dynamically. Fixes: 4c8ff70 ("f2fs: support data compression") Signed-off-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 0383267 commit f783e35

File tree

1 file changed

+18
-2
lines changed

1 file changed

+18
-2
lines changed

fs/f2fs/data.c

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3020,7 +3020,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
30203020
{
30213021
int ret = 0;
30223022
int done = 0, retry = 0;
3023-
struct page *pages[F2FS_ONSTACK_PAGES];
3023+
struct page *pages_local[F2FS_ONSTACK_PAGES];
3024+
struct page **pages = pages_local;
30243025
struct folio_batch fbatch;
30253026
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
30263027
struct bio *bio = NULL;
@@ -3044,6 +3045,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
30443045
#endif
30453046
int nr_folios, p, idx;
30463047
int nr_pages;
3048+
unsigned int max_pages = F2FS_ONSTACK_PAGES;
30473049
pgoff_t index;
30483050
pgoff_t end; /* Inclusive */
30493051
pgoff_t done_index;
@@ -3053,6 +3055,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
30533055
int submitted = 0;
30543056
int i;
30553057

3058+
#ifdef CONFIG_F2FS_FS_COMPRESSION
3059+
if (f2fs_compressed_file(inode) &&
3060+
1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
3061+
pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
3062+
cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
3063+
max_pages = 1 << cc.log_cluster_size;
3064+
}
3065+
#endif
3066+
30563067
folio_batch_init(&fbatch);
30573068

30583069
if (get_dirty_pages(mapping->host) <=
@@ -3098,7 +3109,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
30983109
add_more:
30993110
pages[nr_pages] = folio_page(folio, idx);
31003111
folio_get(folio);
3101-
if (++nr_pages == F2FS_ONSTACK_PAGES) {
3112+
if (++nr_pages == max_pages) {
31023113
index = folio->index + idx + 1;
31033114
folio_batch_release(&fbatch);
31043115
goto write;
@@ -3281,6 +3292,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
32813292
if (bio)
32823293
f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
32833294

3295+
#ifdef CONFIG_F2FS_FS_COMPRESSION
3296+
if (pages != pages_local)
3297+
kfree(pages);
3298+
#endif
3299+
32843300
return ret;
32853301
}
32863302

0 commit comments

Comments
 (0)