Skip to content

Commit

Permalink
Merge branch 'for-5.14-fixes' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/dennis/percpu

Pull percpu fix from Dennis Zhou:
 "This is just a single change to fix percpu depopulation. The code
  relied on depopulation code written specifically for the free path and
  relied on vmalloc to do the tlb flush lazily. As we're modifying the
  backing pages during the lifetime of a chunk, we need to also flush
  the tlb accordingly.

  Guenter Roeck reported this issue in [1] on mips. I believe we just
  happen to be lucky given the much larger chunk sizes on x86 and
  consequently less churning of this memory"

Link: https://lore.kernel.org/lkml/20210702191140.GA3166599@roeck-us.net/ [1]

* 'for-5.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu:
  percpu: flush tlb in pcpu_reclaim_populated()
  • Loading branch information
torvalds committed Jul 10, 2021
2 parents 50be941 + 93274f1 commit 20d5e57
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 8 deletions.
6 changes: 6 additions & 0 deletions mm/percpu-km.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,12 @@

#include <linux/log2.h>

static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
/* nothing */
}

static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end, gfp_t gfp)
{
Expand Down
5 changes: 3 additions & 2 deletions mm/percpu-vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,9 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
* For each cpu, depopulate and unmap pages [@page_start,@page_end)
* from @chunk.
*
* Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the
* region back to vmalloc() which will lazily flush the tlb.
*
* CONTEXT:
* pcpu_alloc_mutex.
*/
Expand All @@ -324,8 +327,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,

pcpu_unmap_pages(chunk, pages, page_start, page_end);

/* no need to flush tlb, vmalloc will handle it lazily */

pcpu_free_pages(chunk, pages, page_start, page_end);
}

Expand Down
32 changes: 26 additions & 6 deletions mm/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1572,6 +1572,7 @@ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
*
* pcpu_populate_chunk - populate the specified range of a chunk
* pcpu_depopulate_chunk - depopulate the specified range of a chunk
* pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
* pcpu_create_chunk - create a new chunk
* pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
* pcpu_addr_to_page - translate address to physical address
Expand All @@ -1581,6 +1582,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end, gfp_t gfp);
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
int page_start, int page_end);
static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end);
static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
static struct page *pcpu_addr_to_page(void *addr);
Expand Down Expand Up @@ -2137,11 +2140,12 @@ static void pcpu_reclaim_populated(void)
{
struct pcpu_chunk *chunk;
struct pcpu_block_md *block;
int freed_page_start, freed_page_end;
int i, end;
bool reintegrate;

lockdep_assert_held(&pcpu_lock);

restart:
/*
* Once a chunk is isolated to the to_depopulate list, the chunk is no
* longer discoverable to allocations whom may populate pages. The only
Expand All @@ -2157,15 +2161,18 @@ static void pcpu_reclaim_populated(void)
* Scan chunk's pages in the reverse order to keep populated
* pages close to the beginning of the chunk.
*/
freed_page_start = chunk->nr_pages;
freed_page_end = 0;
reintegrate = false;
for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
/* no more work to do */
if (chunk->nr_empty_pop_pages == 0)
break;

/* reintegrate chunk to prevent atomic alloc failures */
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
pcpu_reintegrate_chunk(chunk);
goto restart;
reintegrate = true;
goto end_chunk;
}

/*
Expand Down Expand Up @@ -2194,16 +2201,29 @@ static void pcpu_reclaim_populated(void)
spin_lock_irq(&pcpu_lock);

pcpu_chunk_depopulated(chunk, i + 1, end + 1);
freed_page_start = min(freed_page_start, i + 1);
freed_page_end = max(freed_page_end, end + 1);

/* reset the range and continue */
end = -1;
}

if (chunk->free_bytes == pcpu_unit_size)
end_chunk:
/* batch tlb flush per chunk to amortize cost */
if (freed_page_start < freed_page_end) {
spin_unlock_irq(&pcpu_lock);
pcpu_post_unmap_tlb_flush(chunk,
freed_page_start,
freed_page_end);
cond_resched();
spin_lock_irq(&pcpu_lock);
}

if (reintegrate || chunk->free_bytes == pcpu_unit_size)
pcpu_reintegrate_chunk(chunk);
else
list_move(&chunk->list,
&pcpu_chunk_lists[pcpu_sidelined_slot]);
list_move_tail(&chunk->list,
&pcpu_chunk_lists[pcpu_sidelined_slot]);
}
}

Expand Down

0 comments on commit 20d5e57

Please sign in to comment.