Skip to content

Commit

Permalink
FROMLIST: mm: fix use-after free of page_ext after race with memory-o…
Browse files Browse the repository at this point in the history
…ffline

The below is one path where race between page_ext and  offline of the
respective memory blocks will cause use-after-free on the access of
page_ext structure.

process1		              process2
---------                             ---------
a)doing /proc/page_owner           doing memory offline
			           through offline_pages.

b)PageBuddy check is failed
thus proceed to get the
page_owner information
through page_ext access.
page_ext = lookup_page_ext(page);

				    migrate_pages();
				    .................
				Since all pages are successfully
				migrated as part of the offline
				operation,send MEM_OFFLINE notification
				where for page_ext it calls:
				offline_page_ext()-->
				__free_page_ext()-->
				   free_page_ext()-->
				     vfree(ms->page_ext)
			           mem_section->page_ext = NULL

c) Check for the PAGE_EXT flags
in the page_ext->flags access
results into the use-after-free(leading
to the translation faults).

As mentioned above, there is really no synchronization between page_ext
access and its freeing in the memory_offline.

The memory offline steps(roughly) on a memory block is as below:
1) Isolate all the pages
2) while(1)
  try free the pages to buddy.(->free_list[MIGRATE_ISOLATE])
3) delete the pages from this buddy list.
4) Then free page_ext.(Note: The struct page is still alive as it is
freed only during hot remove of the memory which frees the memmap, which
steps the user might not perform).

This design leads to the state where struct page is alive but the struct
page_ext is freed, where the later is ideally part of the former which
just representing the page_flags (check [3] for why this design is
chosen).

The above mentioned race is just one example __but the problem persists
in the other paths too involving page_ext->flags access(eg:
page_is_idle())__.

Fix all the paths where offline races with page_ext access by
maintaining synchronization with rcu lock and is achieved in 3 steps:
1) Invalidate all the page_ext's of the sections of a memory block by
storing a flag in the LSB of mem_section->page_ext.

2) Wait till all the existing readers to finish working with the
->page_ext's with synchronize_rcu(). Any parallel process that starts
after this call will not get page_ext, through lookup_page_ext(), for
the block parallel offline operation is being performed.

3) Now safely free all sections ->page_ext's of the block on which
offline operation is being performed.

Note: If synchronize_rcu() takes time then optimizations can be done in
this path through call_rcu()[2].

Thanks to David Hildenbrand for his views/suggestions on the initial
discussion[1] and Pavan kondeti for various inputs on this patch.

[1] https://lore.kernel.org/linux-mm/59edde13-4167-8550-86f0-11fc67882107@quicinc.com/
[2] https://lore.kernel.org/all/a26ce299-aed1-b8ad-711e-a49e82bdd180@quicinc.com/T/#u
[3] https://lore.kernel.org/all/6fa6b7aa-731e-891c-3efb-a03d6a700efa@redhat.com/

Bug: 236222283
Link: https://lore.kernel.org/all/1661496993-11473-1-git-send-email-quic_charante@quicinc.com/
Change-Id: Ib439ae19c61a557a5c70ea90e3c4b35a5583ba0d
Suggested-by: David Hildenbrand <david@redhat.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com>
(fixed merge conflicts and still exported lookup_page_ext)
  • Loading branch information
Charan Teja Kalla authored and surenbaghdasaryan committed Aug 30, 2022
1 parent dec2f52 commit 2b3f9b8
Show file tree
Hide file tree
Showing 4 changed files with 182 additions and 39 deletions.
15 changes: 10 additions & 5 deletions include/linux/page_ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,9 @@ static inline void page_ext_init(void)
{
}
#endif

struct page_ext *lookup_page_ext(const struct page *page);
extern struct page_ext *page_ext_get(struct page *page);
extern void page_ext_put(struct page_ext *page_ext);

static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
Expand All @@ -73,16 +74,20 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}

static inline struct page_ext *lookup_page_ext(const struct page *page)
static inline void page_ext_init(void)
{
return NULL;
}

static inline void page_ext_init(void)
static inline void page_ext_init_flatmem(void)
{
}

static inline void page_ext_init_flatmem(void)
static inline struct page_ext *page_ext_get(struct page *page)
{
return NULL;
}

static inline void page_ext_put(struct page_ext *page_ext)
{
}
#endif /* CONFIG_PAGE_EXTENSION */
Expand Down
33 changes: 24 additions & 9 deletions include/linux/page_idle.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,62 +47,77 @@ extern struct page_ext_operations page_idle_ops;

static inline bool page_is_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_young;

if (unlikely(!page_ext))
return false;

return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);

return page_young;
}

static inline void set_page_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);

if (unlikely(!page_ext))
return;

set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
}

static inline bool test_and_clear_page_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_young;

if (unlikely(!page_ext))
return false;

return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);

return page_young;
}

static inline bool page_is_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_idle;

if (unlikely(!page_ext))
return false;

return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);

return page_idle;
}

static inline void set_page_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);

if (unlikely(!page_ext))
return;

set_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
}

static inline void clear_page_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);

if (unlikely(!page_ext))
return;

clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
}
#endif /* CONFIG_64BIT */

Expand Down
100 changes: 95 additions & 5 deletions mm/page_ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include <linux/kmemleak.h>
#include <linux/page_owner.h>
#include <linux/page_idle.h>

#include <linux/rcupdate.h>
/*
* struct page extension
*
Expand Down Expand Up @@ -58,6 +58,10 @@
* can utilize this callback to initialize the state of it correctly.
*/

#ifdef CONFIG_SPARSEMEM
#define PAGE_EXT_INVALID (0x1)
#endif

#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
static bool need_page_idle(void)
{
Expand Down Expand Up @@ -117,6 +121,49 @@ static inline struct page_ext *get_entry(void *base, unsigned long index)
return base + page_ext_size * index;
}

/**
* page_ext_get() - Get the extended information for a page.
* @page: The page we're interested in.
*
* Ensures that the page_ext will remain valid until page_ext_put()
* is called.
*
* Return: NULL if no page_ext exists for this page.
* Context: Any context. Caller may not sleep until they have called
* page_ext_put().
*/
struct page_ext *page_ext_get(struct page *page)
{
struct page_ext *page_ext;

rcu_read_lock();
page_ext = lookup_page_ext(page);
if (!page_ext) {
rcu_read_unlock();
return NULL;
}

return page_ext;
}

/**
* page_ext_put() - Working with page extended information is done.
* @page_ext - Page extended information received from page_ext_get().
*
* The page extended information of the page may not be valid after this
* function is called.
*
* Return: None.
* Context: Any context with corresponding page_ext_get() is called.
*/
void page_ext_put(struct page_ext *page_ext)
{
if (unlikely(!page_ext))
return;

rcu_read_unlock();
}

#if !defined(CONFIG_SPARSEMEM)


Expand All @@ -131,6 +178,7 @@ struct page_ext *lookup_page_ext(const struct page *page)
unsigned long index;
struct page_ext *base;

WARN_ON_ONCE(!rcu_read_lock_held());
base = NODE_DATA(page_to_nid(page))->node_page_ext;
/*
* The sanity checks the page allocator does upon freeing a
Expand Down Expand Up @@ -200,20 +248,27 @@ void __init page_ext_init_flatmem(void)
}

#else /* CONFIG_FLAT_NODE_MEM_MAP */
static bool page_ext_invalid(struct page_ext *page_ext)
{
return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
}

struct page_ext *lookup_page_ext(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
struct page_ext *page_ext = READ_ONCE(section->page_ext);

WARN_ON_ONCE(!rcu_read_lock_held());
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug.
*/
if (!section->page_ext)
if (page_ext_invalid(page_ext))
return NULL;
return get_entry(section->page_ext, pfn);
return get_entry(page_ext, pfn);
}
EXPORT_SYMBOL_GPL(lookup_page_ext);

Expand Down Expand Up @@ -293,9 +348,30 @@ static void __free_page_ext(unsigned long pfn)
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
base = get_entry(ms->page_ext, pfn);

base = READ_ONCE(ms->page_ext);
/*
* page_ext here can be valid while doing the roll back
* operation in online_page_ext().
*/
if (page_ext_invalid(base))
base = (void *)base - PAGE_EXT_INVALID;
WRITE_ONCE(ms->page_ext, NULL);

base = get_entry(base, pfn);
free_page_ext(base);
ms->page_ext = NULL;
}

static void __invalidate_page_ext(unsigned long pfn)
{
struct mem_section *ms;
void *val;

ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
val = (void *)ms->page_ext + PAGE_EXT_INVALID;
WRITE_ONCE(ms->page_ext, val);
}

static int __meminit online_page_ext(unsigned long start_pfn,
Expand Down Expand Up @@ -338,6 +414,20 @@ static int __meminit offline_page_ext(unsigned long start_pfn,
start = SECTION_ALIGN_DOWN(start_pfn);
end = SECTION_ALIGN_UP(start_pfn + nr_pages);

/*
* Freeing of page_ext is done in 3 steps to avoid
* use-after-free of it:
* 1) Traverse all the sections and mark their page_ext
* as invalid.
* 2) Wait for all the existing users of page_ext who
* started before invalidation to finish.
* 3) Free the page_ext.
*/
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__invalidate_page_ext(pfn);

synchronize_rcu();

for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__free_page_ext(pfn);
return 0;
Expand Down
Loading

0 comments on commit 2b3f9b8

Please sign in to comment.