Skip to content

Commit 0a2c1e8

Browse files
VMoolaakpm00
authored andcommitted
mm/mempolicy: convert queue_pages_hugetlb() to queue_folios_hugetlb()
This change is in preparation for the conversion of queue_pages_required() to queue_folio_required() and migrate_page_add() to migrate_folio_add(). Link: https://lkml.kernel.org/r/20230130201833.27042-5-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: "Yin, Fengwei" <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 3dae02b commit 0a2c1e8

File tree

1 file changed

+18
-11
lines changed

1 file changed

+18
-11
lines changed

mm/mempolicy.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -558,29 +558,29 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
558558
return addr != end ? -EIO : 0;
559559
}
560560

561-
static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
561+
static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
562562
unsigned long addr, unsigned long end,
563563
struct mm_walk *walk)
564564
{
565565
int ret = 0;
566566
#ifdef CONFIG_HUGETLB_PAGE
567567
struct queue_pages *qp = walk->private;
568568
unsigned long flags = (qp->flags & MPOL_MF_VALID);
569-
struct page *page;
569+
struct folio *folio;
570570
spinlock_t *ptl;
571571
pte_t entry;
572572

573573
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
574574
entry = huge_ptep_get(pte);
575575
if (!pte_present(entry))
576576
goto unlock;
577-
page = pte_page(entry);
578-
if (!queue_pages_required(page, qp))
577+
folio = pfn_folio(pte_pfn(entry));
578+
if (!queue_pages_required(&folio->page, qp))
579579
goto unlock;
580580

581581
if (flags == MPOL_MF_STRICT) {
582582
/*
583-
* STRICT alone means only detecting misplaced page and no
583+
* STRICT alone means only detecting misplaced folio and no
584584
* need to further check other vma.
585585
*/
586586
ret = -EIO;
@@ -591,21 +591,28 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
591591
/*
592592
* Must be STRICT with MOVE*, otherwise .test_walk() have
593593
* stopped walking current vma.
594-
* Detecting misplaced page but allow migrating pages which
594+
* Detecting misplaced folio but allow migrating folios which
595595
* have been queued.
596596
*/
597597
ret = 1;
598598
goto unlock;
599599
}
600600

601-
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
601+
/*
602+
* With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
603+
* is shared it is likely not worth migrating.
604+
*
605+
* To check if the folio is shared, ideally we want to make sure
606+
* every page is mapped to the same process. Doing that is very
607+
* expensive, so check the estimated mapcount of the folio instead.
608+
*/
602609
if (flags & (MPOL_MF_MOVE_ALL) ||
603-
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
610+
(flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
604611
!hugetlb_pmd_shared(pte))) {
605-
if (isolate_hugetlb(page_folio(page), qp->pagelist) &&
612+
if (isolate_hugetlb(folio, qp->pagelist) &&
606613
(flags & MPOL_MF_STRICT))
607614
/*
608-
* Failed to isolate page but allow migrating pages
615+
* Failed to isolate folio but allow migrating pages
609616
* which have been queued.
610617
*/
611618
ret = 1;
@@ -703,7 +710,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
703710
}
704711

705712
static const struct mm_walk_ops queue_pages_walk_ops = {
706-
.hugetlb_entry = queue_pages_hugetlb,
713+
.hugetlb_entry = queue_folios_hugetlb,
707714
.pmd_entry = queue_folios_pte_range,
708715
.test_walk = queue_pages_test_walk,
709716
};

0 commit comments

Comments
 (0)