Skip to content

Commit 88f0912

Browse files
committed
Merge branch 'for-next/stage1-lpa2' into for-next/core
* for-next/stage1-lpa2: (48 commits) : Add support for LPA2 and WXN and stage 1 arm64/mm: Avoid ID mapping of kpti flag if it is no longer needed arm64/mm: Use generic __pud_free() helper in pud_free() implementation arm64: gitignore: ignore relacheck arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and VARange arm64: mm: Make PUD folding check in set_pud() a runtime check arm64: mm: add support for WXN memory translation attribute mm: add arch hook to validate mmap() prot flags arm64: defconfig: Enable LPA2 support arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs arm64: kvm: avoid CONFIG_PGTABLE_LEVELS for runtime levels arm64: ptdump: Deal with translation levels folded at runtime arm64: ptdump: Disregard unaddressable VA space arm64: mm: Add support for folding PUDs at runtime arm64: kasan: Reduce minimum shadow alignment and enable 5 level paging arm64: mm: Add 5 level paging support to fixmap and swapper handling arm64: Enable LPA2 at boot if supported by the system arm64: mm: add LPA2 and 5 level paging support to G-to-nG conversion arm64: mm: Add definitions to support 5 levels of paging arm64: mm: Add LPA2 support to phys<->pte conversion routines arm64: mm: Wire up TCR.DS bit to PTE shareability fields ...
2 parents 0c5ade7 + 27f2b9f commit 88f0912

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+1949
-1124
lines changed

arch/arm64/Kconfig

Lines changed: 27 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ config ARM64
165165
select HAVE_ARCH_HUGE_VMAP
166166
select HAVE_ARCH_JUMP_LABEL
167167
select HAVE_ARCH_JUMP_LABEL_RELATIVE
168-
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
168+
select HAVE_ARCH_KASAN
169169
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
170170
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
171171
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
@@ -370,7 +370,9 @@ config PGTABLE_LEVELS
370370
default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
371371
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
372372
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
373+
default 4 if ARM64_16K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
373374
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
375+
default 5 if ARM64_4K_PAGES && ARM64_VA_BITS_52
374376

375377
config ARCH_SUPPORTS_UPROBES
376378
def_bool y
@@ -398,13 +400,13 @@ config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
398400
config KASAN_SHADOW_OFFSET
399401
hex
400402
depends on KASAN_GENERIC || KASAN_SW_TAGS
401-
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
402-
default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
403+
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || (ARM64_VA_BITS_52 && !ARM64_16K_PAGES)) && !KASAN_SW_TAGS
404+
default 0xdfffc00000000000 if (ARM64_VA_BITS_47 || ARM64_VA_BITS_52) && ARM64_16K_PAGES && !KASAN_SW_TAGS
403405
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
404406
default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
405407
default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
406-
default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
407-
default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
408+
default 0xefff800000000000 if (ARM64_VA_BITS_48 || (ARM64_VA_BITS_52 && !ARM64_16K_PAGES)) && KASAN_SW_TAGS
409+
default 0xefffc00000000000 if (ARM64_VA_BITS_47 || ARM64_VA_BITS_52) && ARM64_16K_PAGES && KASAN_SW_TAGS
408410
default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
409411
default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
410412
default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
@@ -1280,9 +1282,7 @@ endchoice
12801282

12811283
choice
12821284
prompt "Virtual address space size"
1283-
default ARM64_VA_BITS_39 if ARM64_4K_PAGES
1284-
default ARM64_VA_BITS_47 if ARM64_16K_PAGES
1285-
default ARM64_VA_BITS_42 if ARM64_64K_PAGES
1285+
default ARM64_VA_BITS_52
12861286
help
12871287
Allows choosing one of multiple possible virtual address
12881288
space sizes. The level of translation table is determined by
@@ -1309,7 +1309,7 @@ config ARM64_VA_BITS_48
13091309

13101310
config ARM64_VA_BITS_52
13111311
bool "52-bit"
1312-
depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
1312+
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
13131313
help
13141314
Enable 52-bit virtual addressing for userspace when explicitly
13151315
requested via a hint to mmap(). The kernel will also use 52-bit
@@ -1356,10 +1356,11 @@ choice
13561356

13571357
config ARM64_PA_BITS_48
13581358
bool "48-bit"
1359+
depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
13591360

13601361
config ARM64_PA_BITS_52
1361-
bool "52-bit (ARMv8.2)"
1362-
depends on ARM64_64K_PAGES
1362+
bool "52-bit"
1363+
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
13631364
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
13641365
help
13651366
Enable support for a 52-bit physical address space, introduced as
@@ -1376,6 +1377,10 @@ config ARM64_PA_BITS
13761377
default 48 if ARM64_PA_BITS_48
13771378
default 52 if ARM64_PA_BITS_52
13781379

1380+
config ARM64_LPA2
1381+
def_bool y
1382+
depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
1383+
13791384
choice
13801385
prompt "Endianness"
13811386
default CPU_LITTLE_ENDIAN
@@ -1602,6 +1607,17 @@ config RODATA_FULL_DEFAULT_ENABLED
16021607
This requires the linear region to be mapped down to pages,
16031608
which may adversely affect performance in some cases.
16041609

1610+
config ARM64_WXN
1611+
bool "Enable WXN attribute so all writable mappings are non-exec"
1612+
help
1613+
Set the WXN bit in the SCTLR system register so that all writable
1614+
mappings are treated as if the PXN/UXN bit is set as well.
1615+
If this is set to Y, it can still be disabled at runtime by
1616+
passing 'arm64.nowxn' on the kernel command line.
1617+
1618+
This should only be set if no software needs to be supported that
1619+
relies on being able to execute from writable mappings.
1620+
16051621
config ARM64_SW_TTBR0_PAN
16061622
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
16071623
help

arch/arm64/configs/defconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ CONFIG_ARCH_VEXPRESS=y
7676
CONFIG_ARCH_VISCONTI=y
7777
CONFIG_ARCH_XGENE=y
7878
CONFIG_ARCH_ZYNQMP=y
79-
CONFIG_ARM64_VA_BITS_48=y
8079
CONFIG_SCHED_MC=y
8180
CONFIG_SCHED_SMT=y
8281
CONFIG_NUMA=y

arch/arm64/include/asm/archrandom.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,4 @@ static inline bool __init __early_cpu_has_rndr(void)
129129
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
130130
}
131131

132-
u64 kaslr_early_init(void *fdt);
133-
134132
#endif /* _ASM_ARCHRANDOM_H */

arch/arm64/include/asm/assembler.h

Lines changed: 19 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -341,20 +341,6 @@ alternative_cb_end
341341
bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
342342
.endm
343343

344-
/*
345-
* idmap_get_t0sz - get the T0SZ value needed to cover the ID map
346-
*
347-
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
348-
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
349-
* this number conveniently equals the number of leading zeroes in
350-
* the physical address of _end.
351-
*/
352-
.macro idmap_get_t0sz, reg
353-
adrp \reg, _end
354-
orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
355-
clz \reg, \reg
356-
.endm
357-
358344
/*
359345
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
360346
* ID_AA64MMFR0_EL1.PARange value
@@ -586,18 +572,27 @@ alternative_endif
586572
.endm
587573

588574
/*
589-
* Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
575+
* If the kernel is built for 52-bit virtual addressing but the hardware only
576+
* supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
577+
* but we have to add an offset so that the TTBR1 address corresponds with the
578+
* pgdir entry that covers the lowest 48-bit addressable VA.
579+
*
580+
* Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an
581+
* additional paging level, and on LPA2/16k pages, we would end up with a root
582+
* level table with only 2 entries, which is suboptimal in terms of TLB
583+
* utilization, so there we fall back to 47 bits of translation if LPA2 is not
584+
* supported.
585+
*
590586
* orr is used as it can cover the immediate value (and is idempotent).
591-
* In future this may be nop'ed out when dealing with 52-bit kernel VAs.
592587
* ttbr: Value of ttbr to set, modified.
593588
*/
594589
.macro offset_ttbr1, ttbr, tmp
595-
#ifdef CONFIG_ARM64_VA_BITS_52
596-
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
597-
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
598-
cbnz \tmp, .Lskipoffs_\@
599-
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
600-
.Lskipoffs_\@ :
590+
#if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
591+
mrs \tmp, tcr_el1
592+
and \tmp, \tmp, #TCR_T1SZ_MASK
593+
cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
594+
orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
595+
csel \ttbr, \tmp, \ttbr, eq
601596
#endif
602597
.endm
603598

@@ -619,25 +614,13 @@ alternative_endif
619614

620615
.macro phys_to_pte, pte, phys
621616
#ifdef CONFIG_ARM64_PA_BITS_52
622-
/*
623-
* We assume \phys is 64K aligned and this is guaranteed by only
624-
* supporting this configuration with 64K pages.
625-
*/
626-
orr \pte, \phys, \phys, lsr #36
627-
and \pte, \pte, #PTE_ADDR_MASK
617+
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
618+
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
628619
#else
629620
mov \pte, \phys
630621
#endif
631622
.endm
632623

633-
.macro pte_to_phys, phys, pte
634-
and \phys, \pte, #PTE_ADDR_MASK
635-
#ifdef CONFIG_ARM64_PA_BITS_52
636-
orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
637-
and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
638-
#endif
639-
.endm
640-
641624
/*
642625
* tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
643626
*/

arch/arm64/include/asm/cpufeature.h

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
1919
#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
20+
#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
21+
#define ARM64_SW_FEATURE_OVERRIDE_NOWXN 12
2022

2123
#ifndef __ASSEMBLY__
2224

@@ -910,7 +912,9 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
910912
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
911913
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
912914

915+
extern struct arm64_ftr_override id_aa64mmfr0_override;
913916
extern struct arm64_ftr_override id_aa64mmfr1_override;
917+
extern struct arm64_ftr_override id_aa64mmfr2_override;
914918
extern struct arm64_ftr_override id_aa64pfr0_override;
915919
extern struct arm64_ftr_override id_aa64pfr1_override;
916920
extern struct arm64_ftr_override id_aa64zfr0_override;
@@ -920,9 +924,121 @@ extern struct arm64_ftr_override id_aa64isar2_override;
920924

921925
extern struct arm64_ftr_override arm64_sw_feature_override;
922926

927+
static inline
928+
u64 arm64_apply_feature_override(u64 val, int feat, int width,
929+
const struct arm64_ftr_override *override)
930+
{
931+
u64 oval = override->val;
932+
933+
/*
934+
* When it encounters an invalid override (e.g., an override that
935+
* cannot be honoured due to a missing CPU feature), the early idreg
936+
* override code will set the mask to 0x0 and the value to non-zero for
937+
* the field in question. In order to determine whether the override is
938+
* valid or not for the field we are interested in, we first need to
939+
* disregard bits belonging to other fields.
940+
*/
941+
oval &= GENMASK_ULL(feat + width - 1, feat);
942+
943+
/*
944+
* The override is valid if all value bits are accounted for in the
945+
* mask. If so, replace the masked bits with the override value.
946+
*/
947+
if (oval == (oval & override->mask)) {
948+
val &= ~override->mask;
949+
val |= oval;
950+
}
951+
952+
/* Extract the field from the updated value */
953+
return cpuid_feature_extract_unsigned_field(val, feat);
954+
}
955+
956+
static inline bool arm64_test_sw_feature_override(int feat)
957+
{
958+
/*
959+
* Software features are pseudo CPU features that have no underlying
960+
* CPUID system register value to apply the override to.
961+
*/
962+
return arm64_apply_feature_override(0, feat, 4,
963+
&arm64_sw_feature_override);
964+
}
965+
966+
static inline bool kaslr_disabled_cmdline(void)
967+
{
968+
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
969+
}
970+
971+
static inline bool arm64_wxn_enabled(void)
972+
{
973+
if (!IS_ENABLED(CONFIG_ARM64_WXN))
974+
return false;
975+
return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN);
976+
}
977+
923978
u32 get_kvm_ipa_limit(void);
924979
void dump_cpu_features(void);
925980

981+
static inline bool cpu_has_bti(void)
982+
{
983+
if (!IS_ENABLED(CONFIG_ARM64_BTI))
984+
return false;
985+
986+
return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
987+
ID_AA64PFR1_EL1_BT_SHIFT, 4,
988+
&id_aa64pfr1_override);
989+
}
990+
991+
static inline bool cpu_has_pac(void)
992+
{
993+
u64 isar1, isar2;
994+
995+
if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
996+
return false;
997+
998+
isar1 = read_cpuid(ID_AA64ISAR1_EL1);
999+
isar2 = read_cpuid(ID_AA64ISAR2_EL1);
1000+
1001+
if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
1002+
&id_aa64isar1_override))
1003+
return true;
1004+
1005+
if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
1006+
&id_aa64isar1_override))
1007+
return true;
1008+
1009+
return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
1010+
&id_aa64isar2_override);
1011+
}
1012+
1013+
static inline bool cpu_has_lva(void)
1014+
{
1015+
u64 mmfr2;
1016+
1017+
mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1018+
mmfr2 &= ~id_aa64mmfr2_override.mask;
1019+
mmfr2 |= id_aa64mmfr2_override.val;
1020+
return cpuid_feature_extract_unsigned_field(mmfr2,
1021+
ID_AA64MMFR2_EL1_VARange_SHIFT);
1022+
}
1023+
1024+
static inline bool cpu_has_lpa2(void)
1025+
{
1026+
#ifdef CONFIG_ARM64_LPA2
1027+
u64 mmfr0;
1028+
int feat;
1029+
1030+
mmfr0 = read_sysreg(id_aa64mmfr0_el1);
1031+
mmfr0 &= ~id_aa64mmfr0_override.mask;
1032+
mmfr0 |= id_aa64mmfr0_override.val;
1033+
feat = cpuid_feature_extract_signed_field(mmfr0,
1034+
ID_AA64MMFR0_EL1_TGRAN_SHIFT);
1035+
1036+
return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
1037+
#else
1038+
return false;
1039+
#endif
1040+
}
1041+
9261042
#endif /* __ASSEMBLY__ */
9271043

9281044
#endif

arch/arm64/include/asm/esr.h

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -117,15 +117,9 @@
117117
#define ESR_ELx_FSC_ACCESS (0x08)
118118
#define ESR_ELx_FSC_FAULT (0x04)
119119
#define ESR_ELx_FSC_PERM (0x0C)
120-
#define ESR_ELx_FSC_SEA_TTW0 (0x14)
121-
#define ESR_ELx_FSC_SEA_TTW1 (0x15)
122-
#define ESR_ELx_FSC_SEA_TTW2 (0x16)
123-
#define ESR_ELx_FSC_SEA_TTW3 (0x17)
120+
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
124121
#define ESR_ELx_FSC_SECC (0x18)
125-
#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
126-
#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
127-
#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
128-
#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
122+
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
129123

130124
/* ISS field definitions for Data Aborts */
131125
#define ESR_ELx_ISV_SHIFT (24)
@@ -394,6 +388,9 @@ static inline bool esr_is_data_abort(unsigned long esr)
394388

395389
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
396390
{
391+
/* Translation fault, level -1 */
392+
if ((esr & ESR_ELx_FSC) == 0b101011)
393+
return true;
397394
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
398395
}
399396

arch/arm64/include/asm/fixmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ enum fixed_addresses {
8787
FIX_PTE,
8888
FIX_PMD,
8989
FIX_PUD,
90+
FIX_P4D,
9091
FIX_PGD,
9192

9293
__end_of_fixed_addresses
@@ -100,7 +101,6 @@ enum fixed_addresses {
100101
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
101102

102103
void __init early_fixmap_init(void);
103-
void __init fixmap_copy(pgd_t *pgdir);
104104

105105
#define __early_set_fixmap __set_fixmap
106106

arch/arm64/include/asm/kasan.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,9 @@
1717

1818
asmlinkage void kasan_early_init(void);
1919
void kasan_init(void);
20-
void kasan_copy_shadow(pgd_t *pgdir);
2120

2221
#else
2322
static inline void kasan_init(void) { }
24-
static inline void kasan_copy_shadow(pgd_t *pgdir) { }
2523
#endif
2624

2725
#endif

0 commit comments

Comments
 (0)