Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge more updates from Andrew Morton:
 "The post-linux-next material.

  7 patches.

  Subsystems affected by this patch series (all mm): debug,
  slab-generic, migration, memcg, and kasan"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  kasan: add kasan mode messages when kasan init
  mm: unexport {,un}lock_page_memcg
  mm: unexport folio_memcg_{,un}lock
  mm/migrate.c: remove MIGRATE_PFN_LOCKED
  mm: migrate: simplify the file-backed pages validation when migrating its mapping
  mm: allow only SLUB on PREEMPT_RT
  mm/page_owner.c: modify the type of argument "order" in some functions
  • Loading branch information
torvalds committed Nov 11, 2021
2 parents 6d76f6e + b873e98 commit dbf4989
Show file tree
Hide file tree
Showing 14 changed files with 61 additions and 150 deletions.
2 changes: 1 addition & 1 deletion Documentation/vm/hmm.rst
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ between device driver specific code and shared common code:
system memory page, locks the page with ``lock_page()``, and fills in the
``dst`` array entry with::

dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
dst[i] = migrate_pfn(page_to_pfn(dpage));

Now that the driver knows that this page is being migrated, it can
invalidate device private MMU mappings and copy device private memory
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/mm/kasan_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ void __init kasan_init(void)
kasan_init_depth();
#if defined(CONFIG_KASAN_GENERIC)
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
pr_info("KernelAddressSanitizer initialized\n");
pr_info("KernelAddressSanitizer initialized (generic)\n");
#endif
}

Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/book3s_hv_uvmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
gpa, 0, page_shift);

if (ret == U_SUCCESS)
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
*mig.dst = migrate_pfn(pfn);
else {
unlock_page(dpage);
__free_page(dpage);
Expand Down Expand Up @@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
}
}

*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
*mig.dst = migrate_pfn(page_to_pfn(dpage));
migrate_vma_pages(&mig);
out_finalize:
migrate_vma_finalize(&mig);
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]);
migrate->dst[i] = migrate_pfn(migrate->dst[i]);
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
Expand Down Expand Up @@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));

migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
j++;
}

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/nouveau/nouveau_dmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
goto error_dma_unmap;
mutex_unlock(&svmm->mutex);

args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0;

error_dma_unmap:
Expand Down Expand Up @@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
return migrate_pfn(page_to_pfn(dpage));

out_dma_unmap:
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
Expand Down
1 change: 0 additions & 1 deletion include/linux/migrate.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
*/
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6

Expand Down
12 changes: 6 additions & 6 deletions include/linux/page_owner.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,24 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;

extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
unsigned short order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);

static inline void reset_page_owner(struct page *page, unsigned int order)
static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}

static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
Expand All @@ -52,15 +52,15 @@ static inline void dump_page_owner(const struct page *page)
__dump_page_owner(page);
}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
static inline void split_page_owner(struct page *page,
unsigned int order)
unsigned short order)
{
}
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
Expand Down
2 changes: 2 additions & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1896,6 +1896,7 @@ choice

config SLAB
bool "SLAB"
depends on !PREEMPT_RT
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
Expand All @@ -1916,6 +1917,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
depends on !PREEMPT_RT
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
Expand Down
5 changes: 2 additions & 3 deletions lib/test_hmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/
rpage->zone_device_data = dmirror;

*dst = migrate_pfn(page_to_pfn(dpage)) |
MIGRATE_PFN_LOCKED;
*dst = migrate_pfn(page_to_pfn(dpage));
if ((*src & MIGRATE_PFN_WRITE) ||
(!spage && args->vma->vm_flags & VM_WRITE))
*dst |= MIGRATE_PFN_WRITE;
Expand Down Expand Up @@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
lock_page(dpage);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
*dst = migrate_pfn(page_to_pfn(dpage));
if (*src & MIGRATE_PFN_WRITE)
*dst |= MIGRATE_PFN_WRITE;
}
Expand Down
14 changes: 13 additions & 1 deletion mm/kasan/hw_tags.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
}
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);

static inline const char *kasan_mode_info(void)
{
if (kasan_mode == KASAN_MODE_ASYNC)
return "async";
else if (kasan_mode == KASAN_MODE_ASYMM)
return "asymm";
else
return "sync";
}

/* kasan_init_hw_tags_cpu() is called for each CPU. */
void kasan_init_hw_tags_cpu(void)
{
Expand Down Expand Up @@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void)
break;
}

pr_info("KernelAddressSanitizer initialized\n");
pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n",
kasan_mode_info(),
kasan_stack_collection_enabled() ? "on" : "off");
}

void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
Expand Down
2 changes: 1 addition & 1 deletion mm/kasan/sw_tags.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void)
for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles();

pr_info("KernelAddressSanitizer initialized\n");
pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
}

/*
Expand Down
4 changes: 0 additions & 4 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2058,13 +2058,11 @@ void folio_memcg_lock(struct folio *folio)
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
}
EXPORT_SYMBOL(folio_memcg_lock);

void lock_page_memcg(struct page *page)
{
folio_memcg_lock(page_folio(page));
}
EXPORT_SYMBOL(lock_page_memcg);

static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
Expand Down Expand Up @@ -2092,13 +2090,11 @@ void folio_memcg_unlock(struct folio *folio)
{
__folio_memcg_unlock(folio_memcg(folio));
}
EXPORT_SYMBOL(folio_memcg_unlock);

void unlock_page_memcg(struct page *page)
{
folio_memcg_unlock(page_folio(page));
}
EXPORT_SYMBOL(unlock_page_memcg);

struct obj_stock {
#ifdef CONFIG_MEMCG_KMEM
Expand Down
Loading

0 comments on commit dbf4989

Please sign in to comment.