Skip to content

Commit 9840cfc

Browse files
committed
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "There's a reasonable amount here and the juicy details are all below. It's worth noting that the MTE/KASAN changes strayed outside of our usual directories due to core mm changes and some associated changes to some other architectures; Andrew asked for us to carry these [1] rather that take them via the -mm tree. Summary: - Optimise SVE switching for CPUs with 128-bit implementations. - Fix output format from SVE selftest. - Add support for versions v1.2 and 1.3 of the SMC calling convention. - Allow Pointer Authentication to be configured independently for kernel and userspace. - PMU driver cleanups for managing IRQ affinity and exposing event attributes via sysfs. - KASAN optimisations for both hardware tagging (MTE) and out-of-line software tagging implementations. - Relax frame record alignment requirements to facilitate 8-byte alignment with KASAN and Clang. - Cleanup of page-table definitions and removal of unused memory types. - Reduction of ARCH_DMA_MINALIGN back to 64 bytes. - Refactoring of our instruction decoding routines and addition of some missing encodings. - Move entry code moved into C and hardened against harmful compiler instrumentation. - Update booting requirements for the FEAT_HCX feature, added to v8.7 of the architecture. - Fix resume from idle when pNMI is being used. - Additional CPU sanity checks for MTE and preparatory changes for systems where not all of the CPUs support 32-bit EL0. - Update our kernel string routines to the latest Cortex Strings implementation. - Big cleanup of our cache maintenance routines, which were confusingly named and inconsistent in their implementations. - Tweak linker flags so that GDB can understand vmlinux when using RELR relocations. - Boot path cleanups to enable early initialisation of per-cpu operations needed by KCSAN. - Non-critical fixes and miscellaneous cleanup" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (150 commits) arm64: tlb: fix the TTL value of tlb_get_level arm64: Restrict undef hook for cpufeature registers arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS arm64: insn: avoid circular include dependency arm64: smp: Bump debugging information print down to KERN_DEBUG drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe() perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number arm64: suspend: Use cpuidle context helpers in cpu_suspend() PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter() arm64: Convert cpu_do_idle() to using cpuidle context helpers arm64: Add cpuidle context save/restore helpers arm64: head: fix code comments in set_cpu_boot_mode_flag arm64: mm: drop unused __pa(__idmap_text_start) arm64: mm: fix the count comments in compute_indices arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan arm64: mm: Pass original fault address to handle_mm_fault() arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK] arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT arm64/mm: Drop SWAPPER_INIT_MAP_SIZE arm64: Conditionally configure PTR_AUTH key of the kernel. ...
2 parents 17cfb9d + 3d1bf78 commit 9840cfc

File tree

158 files changed

+3370
-2563
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

158 files changed

+3370
-2563
lines changed

Documentation/arm64/booting.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,12 @@ Before jumping into the kernel, the following conditions must be met:
277277

278278
- SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.
279279

280+
For CPUs with support for HCRX_EL2 (FEAT_HCX) present:
281+
282+
- If EL3 is present and the kernel is entered at EL2:
283+
284+
- SCR_EL3.HXEn (bit 38) must be initialised to 0b1.
285+
280286
For CPUs with Advanced SIMD and floating point support:
281287

282288
- If EL3 is present:

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1039,7 +1039,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
10391039
endif
10401040

10411041
ifeq ($(CONFIG_RELR),y)
1042-
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
1042+
LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
10431043
endif
10441044

10451045
# We never want expected sections to be placed heuristically by the

arch/alpha/include/asm/page.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@
1717
extern void clear_page(void *page);
1818
#define clear_user_page(page, vaddr, pg) clear_page(page)
1919

20-
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
21-
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
22-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
20+
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
21+
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
22+
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
2323

2424
extern void copy_page(void * _to, void * _from);
2525
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)

arch/arm/include/asm/cpuidle.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,4 +50,9 @@ extern int arm_cpuidle_suspend(int index);
5050

5151
extern int arm_cpuidle_init(int cpu);
5252

53+
struct arm_cpuidle_irq_context { };
54+
55+
#define arm_cpuidle_save_irq_context(c) (void)c
56+
#define arm_cpuidle_restore_irq_context(c) (void)c
57+
5358
#endif

arch/arm/kernel/perf_event_v7.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
773773
pr_err("CPU%u writing wrong counter %d\n",
774774
smp_processor_id(), idx);
775775
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
776-
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
776+
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
777777
} else {
778778
armv7_pmnc_select_counter(idx);
779-
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
779+
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
780780
}
781781
}
782782

arch/arm64/Kconfig

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1481,12 +1481,6 @@ menu "ARMv8.3 architectural features"
14811481
config ARM64_PTR_AUTH
14821482
bool "Enable support for pointer authentication"
14831483
default y
1484-
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
1485-
# Modern compilers insert a .note.gnu.property section note for PAC
1486-
# which is only understood by binutils starting with version 2.33.1.
1487-
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
1488-
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
1489-
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
14901484
help
14911485
Pointer authentication (part of the ARMv8.3 Extensions) provides
14921486
instructions for signing and authenticating pointers against secret
@@ -1498,13 +1492,6 @@ config ARM64_PTR_AUTH
14981492
for each process at exec() time, with these keys being
14991493
context-switched along with the process.
15001494

1501-
If the compiler supports the -mbranch-protection or
1502-
-msign-return-address flag (e.g. GCC 7 or later), then this option
1503-
will also cause the kernel itself to be compiled with return address
1504-
protection. In this case, and if the target hardware is known to
1505-
support pointer authentication, then CONFIG_STACKPROTECTOR can be
1506-
disabled with minimal loss of protection.
1507-
15081495
The feature is detected at runtime. If the feature is not present in
15091496
hardware it will not be advertised to userspace/KVM guest nor will it
15101497
be enabled.
@@ -1515,6 +1502,24 @@ config ARM64_PTR_AUTH
15151502
but with the feature disabled. On such a system, this option should
15161503
not be selected.
15171504

1505+
config ARM64_PTR_AUTH_KERNEL
1506+
bool "Use pointer authentication for kernel"
1507+
default y
1508+
depends on ARM64_PTR_AUTH
1509+
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
1510+
# Modern compilers insert a .note.gnu.property section note for PAC
1511+
# which is only understood by binutils starting with version 2.33.1.
1512+
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
1513+
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
1514+
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
1515+
help
1516+
If the compiler supports the -mbranch-protection or
1517+
-msign-return-address flag (e.g. GCC 7 or later), then this option
1518+
will cause the kernel itself to be compiled with return address
1519+
protection. In this case, and if the target hardware is known to
1520+
support pointer authentication, then CONFIG_STACKPROTECTOR can be
1521+
disabled with minimal loss of protection.
1522+
15181523
This feature works with FUNCTION_GRAPH_TRACER option only if
15191524
DYNAMIC_FTRACE_WITH_REGS is enabled.
15201525

@@ -1606,7 +1611,7 @@ config ARM64_BTI_KERNEL
16061611
bool "Use Branch Target Identification for kernel"
16071612
default y
16081613
depends on ARM64_BTI
1609-
depends on ARM64_PTR_AUTH
1614+
depends on ARM64_PTR_AUTH_KERNEL
16101615
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
16111616
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
16121617
depends on !CC_IS_GCC || GCC_VERSION >= 100100

arch/arm64/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ endif
7070
# off, this will be overridden if we are using branch protection.
7171
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
7272

73-
ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
73+
ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
7474
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
7575
# We enable additional protection for leaf functions as there is some
7676
# narrow potential for ROP protection benefits and no substantial

arch/arm64/include/asm/alternative-macros.h

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,10 @@
33
#define __ASM_ALTERNATIVE_MACROS_H
44

55
#include <asm/cpucaps.h>
6+
#include <asm/insn-def.h>
67

78
#define ARM64_CB_PATCH ARM64_NCAPS
89

9-
/* A64 instructions are always 32 bits. */
10-
#define AARCH64_INSN_SIZE 4
11-
1210
#ifndef __ASSEMBLY__
1311

1412
#include <linux/stringify.h>
@@ -197,11 +195,6 @@ alternative_endif
197195
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
198196
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
199197

200-
.macro user_alt, label, oldinstr, newinstr, cond
201-
9999: alternative_insn "\oldinstr", "\newinstr", \cond
202-
_asm_extable 9999b, \label
203-
.endm
204-
205198
#endif /* __ASSEMBLY__ */
206199

207200
/*

arch/arm64/include/asm/arch_gicv3.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
124124
#define gic_read_lpir(c) readq_relaxed(c)
125125
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
126126

127-
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
127+
#define gic_flush_dcache_to_poc(a,l) \
128+
dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
128129

129130
#define gits_read_baser(c) readq_relaxed(c)
130131
#define gits_write_baser(v, c) writeq_relaxed(v, c)

arch/arm64/include/asm/asm-prototypes.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b);
2323
long long __ashrti3(long long a, int b);
2424
long long __lshrti3(long long a, int b);
2525

26+
/*
27+
* This function uses a custom calling convention and cannot be called from C so
28+
* this prototype is not entirely accurate.
29+
*/
30+
void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info);
31+
2632
#endif /* __ASM_PROTOTYPES_H */

arch/arm64/include/asm/asm_pointer_auth.h

Lines changed: 29 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -7,19 +7,7 @@
77
#include <asm/cpufeature.h>
88
#include <asm/sysreg.h>
99

10-
#ifdef CONFIG_ARM64_PTR_AUTH
11-
/*
12-
* thread.keys_user.ap* as offset exceeds the #imm offset range
13-
* so use the base value of ldp as thread.keys_user and offset as
14-
* thread.keys_user.ap*.
15-
*/
16-
.macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
17-
mov \tmp1, #THREAD_KEYS_USER
18-
add \tmp1, \tsk, \tmp1
19-
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
20-
msr_s SYS_APIAKEYLO_EL1, \tmp2
21-
msr_s SYS_APIAKEYHI_EL1, \tmp3
22-
.endm
10+
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
2311

2412
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
2513
mov \tmp1, #THREAD_KEYS_KERNEL
@@ -42,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
4230
alternative_else_nop_endif
4331
.endm
4432

33+
#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */
34+
35+
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
36+
.endm
37+
38+
.macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
39+
.endm
40+
41+
.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
42+
.endm
43+
44+
#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
45+
46+
#ifdef CONFIG_ARM64_PTR_AUTH
47+
/*
48+
* thread.keys_user.ap* as offset exceeds the #imm offset range
49+
* so use the base value of ldp as thread.keys_user and offset as
50+
* thread.keys_user.ap*.
51+
*/
52+
.macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
53+
mov \tmp1, #THREAD_KEYS_USER
54+
add \tmp1, \tsk, \tmp1
55+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
56+
msr_s SYS_APIAKEYLO_EL1, \tmp2
57+
msr_s SYS_APIAKEYHI_EL1, \tmp3
58+
.endm
59+
4560
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
4661
mrs \tmp1, id_aa64isar1_el1
4762
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
@@ -64,17 +79,11 @@ alternative_else_nop_endif
6479
.Lno_addr_auth\@:
6580
.endm
6681

67-
#else /* CONFIG_ARM64_PTR_AUTH */
82+
#else /* !CONFIG_ARM64_PTR_AUTH */
6883

6984
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
7085
.endm
7186

72-
.macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
73-
.endm
74-
75-
.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
76-
.endm
77-
7887
#endif /* CONFIG_ARM64_PTR_AUTH */
7988

8089
#endif /* __ASM_ASM_POINTER_AUTH_H */

0 commit comments

Comments
 (0)