@@ -1767,8 +1767,6 @@ static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
17671767 struct page * page = bvec -> bv_page ;
17681768 u32 len = bvec -> bv_len ;
17691769
1770- atomic_dec (& eb -> io_pages );
1771-
17721770 if (!uptodate ) {
17731771 btrfs_page_clear_uptodate (fs_info , page , start , len );
17741772 btrfs_page_set_error (fs_info , page , start , len );
@@ -1791,7 +1789,6 @@ static void prepare_eb_write(struct extent_buffer *eb)
17911789 unsigned long end ;
17921790
17931791 clear_bit (EXTENT_BUFFER_WRITE_ERR , & eb -> bflags );
1794- atomic_set (& eb -> io_pages , num_extent_pages (eb ));
17951792
17961793 /* Set btree blocks beyond nritems with 0 to avoid stale content */
17971794 nritems = btrfs_header_nritems (eb );
@@ -3235,8 +3232,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
32353232
32363233static int extent_buffer_under_io (const struct extent_buffer * eb )
32373234{
3238- return (atomic_read (& eb -> io_pages ) ||
3239- test_bit (EXTENT_BUFFER_WRITEBACK , & eb -> bflags ) ||
3235+ return (test_bit (EXTENT_BUFFER_WRITEBACK , & eb -> bflags ) ||
32403236 test_bit (EXTENT_BUFFER_DIRTY , & eb -> bflags ));
32413237}
32423238
@@ -3372,7 +3368,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
33723368
33733369 spin_lock_init (& eb -> refs_lock );
33743370 atomic_set (& eb -> refs , 1 );
3375- atomic_set (& eb -> io_pages , 0 );
33763371
33773372 ASSERT (len <= BTRFS_MAX_METADATA_BLOCKSIZE );
33783373
@@ -3489,9 +3484,9 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
34893484 * adequately protected by the refcount, but the TREE_REF bit and
34903485 * its corresponding reference are not. To protect against this
34913486 * class of races, we call check_buffer_tree_ref from the codepaths
3492- * which trigger io after they set eb->io_pages . Note that once io is
3493- * initiated, TREE_REF can no longer be cleared, so that is the
3494- * moment at which any such race is best fixed.
3487+ * which trigger io. Note that once io is initiated, TREE_REF can no
3488+ * longer be cleared, so that is the moment at which any such race is
3489+ * best fixed.
34953490 */
34963491 refs = atomic_read (& eb -> refs );
34973492 if (refs >= 2 && test_bit (EXTENT_BUFFER_TREE_REF , & eb -> bflags ))
@@ -4062,7 +4057,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
40624057 struct bio_vec * bvec ;
40634058 u32 bio_offset = 0 ;
40644059
4065- atomic_inc (& eb -> refs );
40664060 eb -> read_mirror = bbio -> mirror_num ;
40674061
40684062 if (uptodate &&
@@ -4077,7 +4071,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
40774071 }
40784072
40794073 bio_for_each_segment_all (bvec , & bbio -> bio , iter_all ) {
4080- atomic_dec (& eb -> io_pages );
40814074 end_page_read (bvec -> bv_page , uptodate , eb -> start + bio_offset ,
40824075 bvec -> bv_len );
40834076 bio_offset += bvec -> bv_len ;
@@ -4100,8 +4093,8 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
41004093
41014094 clear_bit (EXTENT_BUFFER_READ_ERR , & eb -> bflags );
41024095 eb -> read_mirror = 0 ;
4103- atomic_set (& eb -> io_pages , num_pages );
41044096 check_buffer_tree_ref (eb );
4097+ atomic_inc (& eb -> refs );
41054098
41064099 bbio = btrfs_bio_alloc (INLINE_EXTENT_BUFFER_PAGES ,
41074100 REQ_OP_READ | REQ_META , eb -> fs_info ,
0 commit comments