Skip to content

Commit

Permalink
mm: remove references to pagevec
Browse files Browse the repository at this point in the history
Most of these should just refer to the LRU cache rather than the data
structure used to implement the LRU cache.

Link: https://lkml.kernel.org/r/20230621164557.3510324-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and akpm00 committed Jun 23, 2023
1 parent 1a0fc81 commit 1fec689
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1344,7 +1344,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
/*
* See do_wp_page(): we can only reuse the folio exclusively if
* there are no additional references. Note that we always drain
* the LRU pagevecs immediately after adding a THP.
* the LRU cache immediately after adding a THP.
*/
if (folio_ref_count(folio) >
1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
Expand Down
6 changes: 3 additions & 3 deletions mm/khugepaged.c
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
if (pte)
pte_unmap(pte);

/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
/* Drain LRU cache to remove extra pin on the swapped in pages */
if (swapped_in)
lru_add_drain();

Expand Down Expand Up @@ -1972,7 +1972,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
result = SCAN_FAIL;
goto xa_unlocked;
}
/* drain pagevecs to help isolate_lru_page() */
/* drain lru cache to help isolate_lru_page() */
lru_add_drain();
page = folio_file_page(folio, index);
} else if (trylock_page(page)) {
Expand All @@ -1988,7 +1988,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
end - index);
/* drain pagevecs to help isolate_lru_page() */
/* drain lru cache to help isolate_lru_page() */
lru_add_drain();
page = find_lock_page(mapping, index);
if (unlikely(page == NULL)) {
Expand Down
6 changes: 3 additions & 3 deletions mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -932,7 +932,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
* The stable node did not yet appear stale to get_ksm_page(),
* since that allows for an unmapped ksm page to be recognized
* right up until it is freed; but the node is safe to remove.
* This page might be in a pagevec waiting to be freed,
* This page might be in an LRU cache waiting to be freed,
* or it might be PageSwapCache (perhaps under writeback),
* or it might have been removed from swapcache a moment ago.
*/
Expand Down Expand Up @@ -2303,8 +2303,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);

/*
* A number of pages can hang around indefinitely on per-cpu
* pagevecs, raised page count preventing write_protect_page
* A number of pages can hang around indefinitely in per-cpu
* LRU cache, raised page count preventing write_protect_page
* from merging them. Though it doesn't really matter much,
* it is puzzling to see some stuck in pages_volatile until
* other activity jostles them out, and they also prevented
Expand Down
6 changes: 3 additions & 3 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -3401,8 +3401,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
goto copy;
if (!folio_test_lru(folio))
/*
* Note: We cannot easily detect+handle references from
* remote LRU pagevecs or references to LRU folios.
* We cannot easily detect+handle references from
* remote LRU caches or references to LRU folios.
*/
lru_add_drain();
if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
Expand Down Expand Up @@ -3880,7 +3880,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* If we want to map a page that's in the swapcache writable, we
* have to detect via the refcount if we're really the exclusive
* owner. Try removing the extra reference from the local LRU
* pagevecs if required.
* caches if required.
*/
if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
!folio_test_ksm(folio) && !folio_test_lru(folio))
Expand Down
2 changes: 1 addition & 1 deletion mm/migrate_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
/* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) {
if (!PageLRU(page) && allow_drain) {
/* Drain CPU's pagevec */
/* Drain CPU's lru cache */
lru_add_drain_all();
allow_drain = false;
}
Expand Down
2 changes: 1 addition & 1 deletion mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {

/*
* This path almost never happens for VM activity - pages are normally freed
* via pagevecs. But it gets used by networking - and for compound pages.
* in batches. But it gets used by networking - and for compound pages.
*/
static void __page_cache_release(struct folio *folio)
{
Expand Down
2 changes: 1 addition & 1 deletion mm/truncate.c
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
* refcount. We do this because invalidate_inode_pages2() needs stronger
* invalidation guarantees, and cannot afford to leave pages behind because
* shrink_page_list() has a temp ref on them, or because they're transiently
* sitting in the folio_add_lru() pagevecs.
* sitting in the folio_add_lru() caches.
*/
static int invalidate_complete_folio2(struct address_space *mapping,
struct folio *folio)
Expand Down

0 comments on commit 1fec689

Please sign in to comment.