Skip to content

Commit

Permalink
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "There's a reasonable amount here and the juicy details are all below.

  It's worth noting that the MTE/KASAN changes strayed outside of our
  usual directories due to core mm changes and some associated changes
  to some other architectures; Andrew asked for us to carry these [1]
  rather that take them via the -mm tree.

  Summary:

   - Optimise SVE switching for CPUs with 128-bit implementations.

   - Fix output format from SVE selftest.

   - Add support for versions v1.2 and 1.3 of the SMC calling
     convention.

   - Allow Pointer Authentication to be configured independently for
     kernel and userspace.

   - PMU driver cleanups for managing IRQ affinity and exposing event
     attributes via sysfs.

   - KASAN optimisations for both hardware tagging (MTE) and out-of-line
     software tagging implementations.

   - Relax frame record alignment requirements to facilitate 8-byte
     alignment with KASAN and Clang.

   - Cleanup of page-table definitions and removal of unused memory
     types.

   - Reduction of ARCH_DMA_MINALIGN back to 64 bytes.

   - Refactoring of our instruction decoding routines and addition of
     some missing encodings.

   - Move entry code moved into C and hardened against harmful compiler
     instrumentation.

   - Update booting requirements for the FEAT_HCX feature, added to v8.7
     of the architecture.

   - Fix resume from idle when pNMI is being used.

   - Additional CPU sanity checks for MTE and preparatory changes for
     systems where not all of the CPUs support 32-bit EL0.

   - Update our kernel string routines to the latest Cortex Strings
     implementation.

   - Big cleanup of our cache maintenance routines, which were
     confusingly named and inconsistent in their implementations.

   - Tweak linker flags so that GDB can understand vmlinux when using
     RELR relocations.

   - Boot path cleanups to enable early initialisation of per-cpu
     operations needed by KCSAN.

   - Non-critical fixes and miscellaneous cleanup"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (150 commits)
  arm64: tlb: fix the TTL value of tlb_get_level
  arm64: Restrict undef hook for cpufeature registers
  arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS
  arm64: insn: avoid circular include dependency
  arm64: smp: Bump debugging information print down to KERN_DEBUG
  drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe()
  perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number
  arm64: suspend: Use cpuidle context helpers in cpu_suspend()
  PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter()
  arm64: Convert cpu_do_idle() to using cpuidle context helpers
  arm64: Add cpuidle context save/restore helpers
  arm64: head: fix code comments in set_cpu_boot_mode_flag
  arm64: mm: drop unused __pa(__idmap_text_start)
  arm64: mm: fix the count comments in compute_indices
  arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan
  arm64: mm: Pass original fault address to handle_mm_fault()
  arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK]
  arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT
  arm64/mm: Drop SWAPPER_INIT_MAP_SIZE
  arm64: Conditionally configure PTR_AUTH key of the kernel.
  ...
  • Loading branch information
torvalds committed Jun 28, 2021
2 parents 17cfb9d + 3d1bf78 commit 9840cfc
Show file tree
Hide file tree
Showing 158 changed files with 3,370 additions and 2,563 deletions.
6 changes: 6 additions & 0 deletions Documentation/arm64/booting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,12 @@ Before jumping into the kernel, the following conditions must be met:

- SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.

For CPUs with support for HCRX_EL2 (FEAT_HCX) present:

- If EL3 is present and the kernel is entered at EL2:

- SCR_EL3.HXEn (bit 38) must be initialised to 0b1.

For CPUs with Advanced SIMD and floating point support:

- If EL3 is present:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -1039,7 +1039,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
endif

ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
endif

# We never want expected sections to be placed heuristically by the
Expand Down
6 changes: 3 additions & 3 deletions arch/alpha/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)

#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE

extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
Expand Down
5 changes: 5 additions & 0 deletions arch/arm/include/asm/cpuidle.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,9 @@ extern int arm_cpuidle_suspend(int index);

extern int arm_cpuidle_init(int cpu);

struct arm_cpuidle_irq_context { };

#define arm_cpuidle_save_irq_context(c) (void)c
#define arm_cpuidle_restore_irq_context(c) (void)c

#endif
4 changes: 2 additions & 2 deletions arch/arm/kernel/perf_event_v7.c
Original file line number Diff line number Diff line change
Expand Up @@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
} else {
armv7_pmnc_select_counter(idx);
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
}
}

Expand Down
33 changes: 19 additions & 14 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1481,12 +1481,6 @@ menu "ARMv8.3 architectural features"
config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
# Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
Expand All @@ -1498,13 +1492,6 @@ config ARM64_PTR_AUTH
for each process at exec() time, with these keys being
context-switched along with the process.

If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option
will also cause the kernel itself to be compiled with return address
protection. In this case, and if the target hardware is known to
support pointer authentication, then CONFIG_STACKPROTECTOR can be
disabled with minimal loss of protection.

The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled.
Expand All @@ -1515,6 +1502,24 @@ config ARM64_PTR_AUTH
but with the feature disabled. On such a system, this option should
not be selected.

config ARM64_PTR_AUTH_KERNEL
bool "Use pointer authentication for kernel"
default y
depends on ARM64_PTR_AUTH
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
# Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option
will cause the kernel itself to be compiled with return address
protection. In this case, and if the target hardware is known to
support pointer authentication, then CONFIG_STACKPROTECTOR can be
disabled with minimal loss of protection.

This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled.

Expand Down Expand Up @@ -1606,7 +1611,7 @@ config ARM64_BTI_KERNEL
bool "Use Branch Target Identification for kernel"
default y
depends on ARM64_BTI
depends on ARM64_PTR_AUTH
depends on ARM64_PTR_AUTH_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ endif
# off, this will be overridden if we are using branch protection.
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)

ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
# We enable additional protection for leaf functions as there is some
# narrow potential for ROP protection benefits and no substantial
Expand Down
9 changes: 1 addition & 8 deletions arch/arm64/include/asm/alternative-macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
#define __ASM_ALTERNATIVE_MACROS_H

#include <asm/cpucaps.h>
#include <asm/insn-def.h>

#define ARM64_CB_PATCH ARM64_NCAPS

/* A64 instructions are always 32 bits. */
#define AARCH64_INSN_SIZE 4

#ifndef __ASSEMBLY__

#include <linux/stringify.h>
Expand Down Expand Up @@ -197,11 +195,6 @@ alternative_endif
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)

.macro user_alt, label, oldinstr, newinstr, cond
9999: alternative_insn "\oldinstr", "\newinstr", \cond
_asm_extable 9999b, \label
.endm

#endif /* __ASSEMBLY__ */

/*
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/arch_gicv3.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)

#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
#define gic_flush_dcache_to_poc(a,l) \
dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))

#define gits_read_baser(c) readq_relaxed(c)
#define gits_write_baser(v, c) writeq_relaxed(v, c)
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/include/asm/asm-prototypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b);
long long __ashrti3(long long a, int b);
long long __lshrti3(long long a, int b);

/*
* This function uses a custom calling convention and cannot be called from C so
* this prototype is not entirely accurate.
*/
void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info);

#endif /* __ASM_PROTOTYPES_H */
49 changes: 29 additions & 20 deletions arch/arm64/include/asm/asm_pointer_auth.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,7 @@
#include <asm/cpufeature.h>
#include <asm/sysreg.h>

#ifdef CONFIG_ARM64_PTR_AUTH
/*
* thread.keys_user.ap* as offset exceeds the #imm offset range
* so use the base value of ldp as thread.keys_user and offset as
* thread.keys_user.ap*.
*/
.macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_USER
add \tmp1, \tsk, \tmp1
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
.endm
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL

.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_KERNEL
Expand All @@ -42,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
alternative_else_nop_endif
.endm

#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */

.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
.endm

.macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
.endm

.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
.endm

#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */

#ifdef CONFIG_ARM64_PTR_AUTH
/*
* thread.keys_user.ap* as offset exceeds the #imm offset range
* so use the base value of ldp as thread.keys_user and offset as
* thread.keys_user.ap*.
*/
.macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_USER
add \tmp1, \tsk, \tmp1
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
.endm

.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
mrs \tmp1, id_aa64isar1_el1
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
Expand All @@ -64,17 +79,11 @@ alternative_else_nop_endif
.Lno_addr_auth\@:
.endm

#else /* CONFIG_ARM64_PTR_AUTH */
#else /* !CONFIG_ARM64_PTR_AUTH */

.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.endm

.macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
.endm

.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
.endm

#endif /* CONFIG_ARM64_PTR_AUTH */

#endif /* __ASM_ASM_POINTER_AUTH_H */
Loading

0 comments on commit 9840cfc

Please sign in to comment.