Skip to content

Commit

Permalink
Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/tip/tip

Pull x86 mm updates from Dave Hansen:
 "New Feature:

   - Randomize the per-cpu entry areas

  Cleanups:

   - Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it

   - Move to "native" set_memory_rox() helper

   - Clean up pmd_get_atomic() and i386-PAE

   - Remove some unused page table size macros"

* tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits)
  x86/mm: Ensure forced page table splitting
  x86/kasan: Populate shadow for shared chunk of the CPU entry area
  x86/kasan: Add helpers to align shadow addresses up and down
  x86/kasan: Rename local CPU_ENTRY_AREA variables to shorten names
  x86/mm: Populate KASAN shadow for entire per-CPU range of CPU entry area
  x86/mm: Recompute physical address for every page of per-CPU CEA mapping
  x86/mm: Rename __change_page_attr_set_clr(.checkalias)
  x86/mm: Inhibit _PAGE_NX changes from cpa_process_alias()
  x86/mm: Untangle __change_page_attr_set_clr(.checkalias)
  x86/mm: Add a few comments
  x86/mm: Fix CR3_ADDR_MASK
  x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros
  mm: Convert __HAVE_ARCH_P..P_GET to the new style
  mm: Remove pointless barrier() after pmdp_get_lockless()
  x86/mm/pae: Get rid of set_64bit()
  x86_64: Remove pointless set_64bit() usage
  x86/mm/pae: Be consistent with pXXp_get_and_clear()
  x86/mm/pae: Use WRITE_ONCE()
  x86/mm/pae: Don't (ab)use atomic64
  mm/gup: Fix the lockless PMD access
  ...
  • Loading branch information
torvalds committed Dec 17, 2022
2 parents 03d84bd + 3e844d8 commit 4f292c4
Show file tree
Hide file tree
Showing 55 changed files with 356 additions and 395 deletions.
8 changes: 3 additions & 5 deletions arch/arm/mach-omap1/sram-init.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/set_memory.h>

#include <asm/fncpy.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>

#include <asm/mach/map.h>

Expand Down Expand Up @@ -74,8 +74,7 @@ void *omap_sram_push(void *funcp, unsigned long size)

dst = fncpy(sram, funcp, size);

set_memory_ro(base, pages);
set_memory_x(base, pages);
set_memory_rox(base, pages);

return dst;
}
Expand Down Expand Up @@ -126,8 +125,7 @@ static void __init omap_detect_and_map_sram(void)
base = (unsigned long)omap_sram_base;
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;

set_memory_ro(base, pages);
set_memory_x(base, pages);
set_memory_rox(base, pages);
}

static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
Expand Down
8 changes: 3 additions & 5 deletions arch/arm/mach-omap2/sram.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/set_memory.h>

#include <asm/fncpy.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>

#include <asm/mach/map.h>

Expand Down Expand Up @@ -96,8 +96,7 @@ void *omap_sram_push(void *funcp, unsigned long size)

dst = fncpy(sram, funcp, size);

set_memory_ro(base, pages);
set_memory_x(base, pages);
set_memory_rox(base, pages);

return dst;
}
Expand Down Expand Up @@ -217,8 +216,7 @@ static void __init omap2_map_sram(void)
base = (unsigned long)omap_sram_base;
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;

set_memory_ro(base, pages);
set_memory_x(base, pages);
set_memory_rox(base, pages);
}

static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ config MIPS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GUP_GET_PTE_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
select GUP_GET_PXX_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB if MIPS_FP_SUPPORT
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/nohash/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
}

#ifdef CONFIG_PPC_16K_PAGES
#define __HAVE_ARCH_PTEP_GET
#define ptep_get ptep_get
static inline pte_t ptep_get(pte_t *ptep)
{
pte_basic_t val = READ_ONCE(ptep->pte);
Expand Down
9 changes: 4 additions & 5 deletions arch/powerpc/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <linux/moduleloader.h>
#include <linux/set_memory.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
#include <asm/inst.h>
#include <asm/set_memory.h>
#include <linux/uaccess.h>

DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
Expand Down Expand Up @@ -134,10 +134,9 @@ void *alloc_insn_page(void)
if (!page)
return NULL;

if (strict_module_rwx_enabled()) {
set_memory_ro((unsigned long)page, 1);
set_memory_x((unsigned long)page, 1);
}
if (strict_module_rwx_enabled())
set_memory_rox((unsigned long)page, 1);

return page;
}

Expand Down
2 changes: 1 addition & 1 deletion arch/sh/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ config SUPERH
select GENERIC_PCI_IOMAP if PCI
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GUP_GET_PTE_LOW_HIGH if X2TLB
select GUP_GET_PXX_LOW_HIGH if X2TLB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
Expand Down
10 changes: 8 additions & 2 deletions arch/sh/include/asm/pgtable-3level.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,15 @@
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))

typedef struct { unsigned long long pmd; } pmd_t;
typedef struct {
struct {
unsigned long pmd_low;
unsigned long pmd_high;
};
unsigned long long pmd;
} pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) } )
#define __pmd(x) ((pmd_t) { .pmd = (x) } )

static inline pmd_t *pud_pgtable(pud_t pud)
{
Expand Down
8 changes: 0 additions & 8 deletions arch/um/include/asm/pgtable-3level.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,7 @@
#define pud_populate(mm, pud, pmd) \
set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))

#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
#else
#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
#endif

static inline int pgd_newpage(pgd_t pgd)
{
Expand All @@ -71,11 +67,7 @@ static inline int pgd_newpage(pgd_t pgd)

static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }

#ifdef CONFIG_64BIT
#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
#else
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#endif

static inline void pud_clear (pud_t *pud)
{
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ config X86
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
select GENERIC_VDSO_TIME_NS
select GUP_GET_PTE_LOW_HIGH if X86_PAE
select GUP_GET_PXX_LOW_HIGH if X86_PAE
select HARDIRQS_SW_RESEND
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI
Expand Down
28 changes: 0 additions & 28 deletions arch/x86/include/asm/cmpxchg_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,34 +7,6 @@
* you need to test for the feature in boot_cpu_data.
*/

/*
* CMPXCHG8B only writes to the target if we had the previous
* value in registers, otherwise it acts as a read and gives us the
* "new previous" value. That is why there is a loop. Preloading
* EDX:EAX is a performance optimization: in the common case it means
* we need only one locked operation.
*
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
* least an FPU save and/or %cr0.ts manipulation.
*
* cmpxchg8b must be used with the lock prefix here to allow the
* instruction to be executed atomically. We need to have the reader
* side to see the coherent 64bit value.
*/
static inline void set_64bit(volatile u64 *ptr, u64 value)
{
u32 low = value;
u32 high = value >> 32;
u64 prev = *ptr;

asm volatile("\n1:\t"
LOCK_PREFIX "cmpxchg8b %0\n\t"
"jnz 1b"
: "=m" (*ptr), "+A" (prev)
: "b" (low), "c" (high)
: "memory");
}

#ifdef CONFIG_X86_CMPXCHG64
#define arch_cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
Expand Down
5 changes: 0 additions & 5 deletions arch/x86/include/asm/cmpxchg_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,6 @@
#ifndef _ASM_X86_CMPXCHG_64_H
#define _ASM_X86_CMPXCHG_64_H

static inline void set_64bit(volatile u64 *ptr, u64 val)
{
*ptr = val;
}

#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
Expand Down
4 changes: 0 additions & 4 deletions arch/x86/include/asm/cpu_entry_area.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,6 @@ struct cpu_entry_area {
};

#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)

/* Total size includes the readonly IDT mapping page as well: */
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)

DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,12 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
int nid) { }
#endif

#endif
Expand Down
12 changes: 3 additions & 9 deletions arch/x86/include/asm/page_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,14 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))

#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))

#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))

#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)

/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
/* Cast P*D_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_MASK) & __PHYSICAL_MASK)

#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
Expand Down
Loading

0 comments on commit 4f292c4

Please sign in to comment.