Skip to content

Commit ff6a6da

Browse files
walken-googletorvalds
authored andcommitted
mm: accelerate munlock() treatment of THP pages
munlock_vma_pages_range() was always incrementing addresses by PAGE_SIZE at a time. When munlocking THP pages (or the huge zero page), this resulted in taking the mm->page_table_lock 512 times in a row. We can do better by making use of the page_mask returned by follow_page_mask (for the huge zero page case), or the size of the page munlock_vma_page() operated on (for the true THP page case). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent c5a5105 commit ff6a6da

File tree

2 files changed

+24
-12
lines changed

2 files changed

+24
-12
lines changed

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
195195
* must be called with vma's mmap_sem held for read or write, and page locked.
196196
*/
197197
extern void mlock_vma_page(struct page *page);
198-
extern void munlock_vma_page(struct page *page);
198+
extern unsigned int munlock_vma_page(struct page *page);
199199

200200
/*
201201
* Clear the page's PageMlocked(). This can be useful in a situation where

mm/mlock.c

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
102102
* can't isolate the page, we leave it for putback_lru_page() and vmscan
103103
* [page_referenced()/try_to_unmap()] to deal with.
104104
*/
105-
void munlock_vma_page(struct page *page)
105+
unsigned int munlock_vma_page(struct page *page)
106106
{
107+
unsigned int page_mask = 0;
108+
107109
BUG_ON(!PageLocked(page));
108110

109111
if (TestClearPageMlocked(page)) {
110-
mod_zone_page_state(page_zone(page), NR_MLOCK,
111-
-hpage_nr_pages(page));
112+
unsigned int nr_pages = hpage_nr_pages(page);
113+
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
114+
page_mask = nr_pages - 1;
112115
if (!isolate_lru_page(page)) {
113116
int ret = SWAP_AGAIN;
114117

@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
141144
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
142145
}
143146
}
147+
148+
return page_mask;
144149
}
145150

146151
/**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
159164
unsigned long start, unsigned long end, int *nonblocking)
160165
{
161166
struct mm_struct *mm = vma->vm_mm;
162-
unsigned long addr = start;
163167
unsigned long nr_pages = (end - start) / PAGE_SIZE;
164168
int gup_flags;
165169

@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
189193
* We made sure addr is within a VMA, so the following will
190194
* not result in a stack expansion that recurses back here.
191195
*/
192-
return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
196+
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
193197
NULL, NULL, nonblocking);
194198
}
195199

@@ -226,27 +230,35 @@ static int __mlock_posix_error_return(long retval)
226230
void munlock_vma_pages_range(struct vm_area_struct *vma,
227231
unsigned long start, unsigned long end)
228232
{
229-
unsigned long addr;
230-
231-
lru_add_drain();
232233
vma->vm_flags &= ~VM_LOCKED;
233234

234-
for (addr = start; addr < end; addr += PAGE_SIZE) {
235+
while (start < end) {
235236
struct page *page;
237+
unsigned int page_mask, page_increm;
238+
236239
/*
237240
* Although FOLL_DUMP is intended for get_dump_page(),
238241
* it just so happens that its special treatment of the
239242
* ZERO_PAGE (returning an error instead of doing get_page)
240243
* suits munlock very well (and if somehow an abnormal page
241244
* has sneaked into the range, we won't oops here: great).
242245
*/
243-
page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
246+
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
247+
&page_mask);
244248
if (page && !IS_ERR(page)) {
245249
lock_page(page);
246-
munlock_vma_page(page);
250+
lru_add_drain();
251+
/*
252+
* Any THP page found by follow_page_mask() may have
253+
* gotten split before reaching munlock_vma_page(),
254+
* so we need to recompute the page_mask here.
255+
*/
256+
page_mask = munlock_vma_page(page);
247257
unlock_page(page);
248258
put_page(page);
249259
}
260+
page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
261+
start += page_increm * PAGE_SIZE;
250262
cond_resched();
251263
}
252264
}

0 commit comments

Comments
 (0)