@@ -1616,9 +1616,10 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
16161616static int migrate_pages_batch (struct list_head * from , new_page_t get_new_page ,
16171617 free_page_t put_new_page , unsigned long private ,
16181618 enum migrate_mode mode , int reason , struct list_head * ret_folios ,
1619- struct migrate_pages_stats * stats )
1619+ struct list_head * split_folios , struct migrate_pages_stats * stats ,
1620+ int nr_pass )
16201621{
1621- int retry ;
1622+ int retry = 1 ;
16221623 int large_retry = 1 ;
16231624 int thp_retry = 1 ;
16241625 int nr_failed = 0 ;
@@ -1628,21 +1629,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
16281629 bool is_large = false;
16291630 bool is_thp = false;
16301631 struct folio * folio , * folio2 , * dst = NULL , * dst2 ;
1631- int rc , rc_saved , nr_pages ;
1632- LIST_HEAD (split_folios );
1632+ int rc , rc_saved = 0 , nr_pages ;
16331633 LIST_HEAD (unmap_folios );
16341634 LIST_HEAD (dst_folios );
16351635 bool nosplit = (reason == MR_NUMA_MISPLACED );
1636- bool no_split_folio_counting = false;
16371636
16381637 VM_WARN_ON_ONCE (mode != MIGRATE_ASYNC &&
16391638 !list_empty (from ) && !list_is_singular (from ));
1640- retry :
1641- rc_saved = 0 ;
1642- retry = 1 ;
1643- for (pass = 0 ;
1644- pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry );
1645- pass ++ ) {
1639+
1640+ for (pass = 0 ; pass < nr_pass && (retry || large_retry ); pass ++ ) {
16461641 retry = 0 ;
16471642 large_retry = 0 ;
16481643 thp_retry = 0 ;
@@ -1673,7 +1668,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
16731668 if (!thp_migration_supported () && is_thp ) {
16741669 nr_large_failed ++ ;
16751670 stats -> nr_thp_failed ++ ;
1676- if (!try_split_folio (folio , & split_folios )) {
1671+ if (!try_split_folio (folio , split_folios )) {
16771672 stats -> nr_thp_split ++ ;
16781673 continue ;
16791674 }
@@ -1705,7 +1700,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
17051700 stats -> nr_thp_failed += is_thp ;
17061701 /* Large folio NUMA faulting doesn't split to retry. */
17071702 if (!nosplit ) {
1708- int ret = try_split_folio (folio , & split_folios );
1703+ int ret = try_split_folio (folio , split_folios );
17091704
17101705 if (!ret ) {
17111706 stats -> nr_thp_split += is_thp ;
@@ -1722,18 +1717,11 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
17221717 break ;
17231718 }
17241719 }
1725- } else if (! no_split_folio_counting ) {
1720+ } else {
17261721 nr_failed ++ ;
17271722 }
17281723
17291724 stats -> nr_failed_pages += nr_pages + nr_retry_pages ;
1730- /*
1731- * There might be some split folios of fail-to-migrate large
1732- * folios left in split_folios list. Move them to ret_folios
1733- * list so that they could be put back to the right list by
1734- * the caller otherwise the folio refcnt will be leaked.
1735- */
1736- list_splice_init (& split_folios , ret_folios );
17371725 /* nr_failed isn't updated for not used */
17381726 nr_large_failed += large_retry ;
17391727 stats -> nr_thp_failed += thp_retry ;
@@ -1746,7 +1734,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
17461734 if (is_large ) {
17471735 large_retry ++ ;
17481736 thp_retry += is_thp ;
1749- } else if (! no_split_folio_counting ) {
1737+ } else {
17501738 retry ++ ;
17511739 }
17521740 nr_retry_pages += nr_pages ;
@@ -1769,7 +1757,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
17691757 if (is_large ) {
17701758 nr_large_failed ++ ;
17711759 stats -> nr_thp_failed += is_thp ;
1772- } else if (! no_split_folio_counting ) {
1760+ } else {
17731761 nr_failed ++ ;
17741762 }
17751763
@@ -1787,9 +1775,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
17871775 try_to_unmap_flush ();
17881776
17891777 retry = 1 ;
1790- for (pass = 0 ;
1791- pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry );
1792- pass ++ ) {
1778+ for (pass = 0 ; pass < nr_pass && (retry || large_retry ); pass ++ ) {
17931779 retry = 0 ;
17941780 large_retry = 0 ;
17951781 thp_retry = 0 ;
@@ -1818,7 +1804,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
18181804 if (is_large ) {
18191805 large_retry ++ ;
18201806 thp_retry += is_thp ;
1821- } else if (! no_split_folio_counting ) {
1807+ } else {
18221808 retry ++ ;
18231809 }
18241810 nr_retry_pages += nr_pages ;
@@ -1831,7 +1817,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
18311817 if (is_large ) {
18321818 nr_large_failed ++ ;
18331819 stats -> nr_thp_failed += is_thp ;
1834- } else if (! no_split_folio_counting ) {
1820+ } else {
18351821 nr_failed ++ ;
18361822 }
18371823
@@ -1868,27 +1854,6 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
18681854 dst2 = list_next_entry (dst , lru );
18691855 }
18701856
1871- /*
1872- * Try to migrate split folios of fail-to-migrate large folios, no
1873- * nr_failed counting in this round, since all split folios of a
1874- * large folio is counted as 1 failure in the first round.
1875- */
1876- if (rc >= 0 && !list_empty (& split_folios )) {
1877- /*
1878- * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
1879- * retries) to ret_folios to avoid migrating them again.
1880- */
1881- list_splice_init (from , ret_folios );
1882- list_splice_init (& split_folios , from );
1883- /*
1884- * Force async mode to avoid to wait lock or bit when we have
1885- * locked more than one folios.
1886- */
1887- mode = MIGRATE_ASYNC ;
1888- no_split_folio_counting = true;
1889- goto retry ;
1890- }
1891-
18921857 return rc ;
18931858}
18941859
@@ -1927,6 +1892,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
19271892 struct folio * folio , * folio2 ;
19281893 LIST_HEAD (folios );
19291894 LIST_HEAD (ret_folios );
1895+ LIST_HEAD (split_folios );
19301896 struct migrate_pages_stats stats ;
19311897
19321898 trace_mm_migrate_pages_start (mode , reason );
@@ -1960,12 +1926,24 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
19601926 else
19611927 list_splice_init (from , & folios );
19621928 rc = migrate_pages_batch (& folios , get_new_page , put_new_page , private ,
1963- mode , reason , & ret_folios , & stats );
1929+ mode , reason , & ret_folios , & split_folios , & stats ,
1930+ NR_MAX_MIGRATE_PAGES_RETRY );
19641931 list_splice_tail_init (& folios , & ret_folios );
19651932 if (rc < 0 ) {
19661933 rc_gather = rc ;
1934+ list_splice_tail (& split_folios , & ret_folios );
19671935 goto out ;
19681936 }
1937+ if (!list_empty (& split_folios )) {
1938+ /*
1939+ * Failure isn't counted since all split folios of a large folio
1940+ * is counted as 1 failure already. And, we only try to migrate
1941+ * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1942+ */
1943+ migrate_pages_batch (& split_folios , get_new_page , put_new_page , private ,
1944+ MIGRATE_ASYNC , reason , & ret_folios , NULL , & stats , 1 );
1945+ list_splice_tail_init (& split_folios , & ret_folios );
1946+ }
19691947 rc_gather += rc ;
19701948 if (!list_empty (from ))
19711949 goto again ;
0 commit comments