From f7d4b4924d226a9007c3a3cb14551b28a47f8767 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 23 Oct 2024 14:14:11 +0100 Subject: [PATCH] btrfs: remove num_entries atomic counter from delayed ref root The atomic counter 'num_entries' is not used anymore, we increment it and decrement it but then we don't ever read it to use for any logic. Its last use was removed with commit 61a56a992fcf ("btrfs: delayed refs pre-flushing should only run the heads we have"). So remove it. Reviewed-by: Boris Burkov Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/delayed-ref.c | 4 ---- fs/btrfs/delayed-ref.h | 5 ----- fs/btrfs/extent-tree.c | 1 - fs/btrfs/transaction.c | 1 - 4 files changed, 11 deletions(-) diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index dc3a29f3c3577a..f7c7d1249f04f8 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -463,7 +463,6 @@ static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info, if (!list_empty(&ref->add_list)) list_del(&ref->add_list); btrfs_put_delayed_ref(ref); - atomic_dec(&delayed_refs->num_entries); btrfs_delayed_refs_rsv_release(fs_info, 1, 0); } @@ -604,7 +603,6 @@ void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, rb_erase_cached(&head->href_node, &delayed_refs->href_root); RB_CLEAR_NODE(&head->href_node); - atomic_dec(&delayed_refs->num_entries); delayed_refs->num_heads--; if (!head->processing) delayed_refs->num_heads_ready--; @@ -630,7 +628,6 @@ static bool insert_delayed_ref(struct btrfs_trans_handle *trans, if (!exist) { if (ref->action == BTRFS_ADD_DELAYED_REF) list_add_tail(&ref->add_list, &href->ref_add_list); - atomic_inc(&root->num_entries); spin_unlock(&href->lock); trans->delayed_ref_updates++; return false; @@ -901,7 +898,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, } delayed_refs->num_heads++; delayed_refs->num_heads_ready++; - atomic_inc(&delayed_refs->num_entries); } if (qrecord_inserted_ret) *qrecord_inserted_ret = qrecord_inserted; diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index cc78395f2fcd0c..a97c9df19ea071 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -216,11 +216,6 @@ struct btrfs_delayed_ref_root { /* this spin lock protects the rbtree and the entries inside */ spinlock_t lock; - /* how many delayed ref updates we've queued, used by the - * throttling code - */ - atomic_t num_entries; - /* total number of head nodes in tree */ unsigned long num_heads; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 594d18ed908c47..adff2b6fb62940 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2029,7 +2029,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, default: WARN_ON(1); } - atomic_dec(&delayed_refs->num_entries); /* * Record the must_insert_reserved flag before we drop the diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e580c566f03320..9ccf68ab53f9b4 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -351,7 +351,6 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info, cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; xa_init(&cur_trans->delayed_refs.dirty_extents); - atomic_set(&cur_trans->delayed_refs.num_entries, 0); /* * although the tree mod log is per file system and not per transaction,