Skip to content

Commit 14c127c

Browse files
stevecapperarmwilldeacon
authored andcommitted
arm64: mm: Flip kernel VA space
In order to allow for a KASAN shadow that changes size at boot time, one must fix the KASAN_SHADOW_END for both 48 & 52-bit VAs and "grow" the start address. Also, it is highly desirable to maintain the same function addresses in the kernel .text between VA sizes. Both of these requirements necessitate us to flip the kernel address space halves s.t. the direct linear map occupies the lower addresses. This patch puts the direct linear map in the lower addresses of the kernel VA range and everything else in the higher ranges. We need to adjust: *) KASAN shadow region placement logic, *) KASAN_SHADOW_OFFSET computation logic, *) virt_to_phys, phys_to_virt checks, *) page table dumper. These are all small changes, that need to take place atomically, so they are bundled into this commit. As part of the re-arrangement, a guard region of 2MB (to preserve alignment for fixed map) is added after the vmemmap. Otherwise the vmemmap could intersect with IS_ERR pointers. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
1 parent 9cb1c5d commit 14c127c

File tree

8 files changed

+16
-22
lines changed

8 files changed

+16
-22
lines changed

arch/arm64/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
130130
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
131131
# in 32-bit arithmetic
132132
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
133-
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
133+
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 1 - 32))) \
134134
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
135135
- (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
136136

arch/arm64/include/asm/memory.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,17 @@
3838
*/
3939
#define VA_BITS (CONFIG_ARM64_VA_BITS)
4040
#define VA_START (UL(0xffffffffffffffff) - \
41-
(UL(1) << VA_BITS) + 1)
42-
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
4341
(UL(1) << (VA_BITS - 1)) + 1)
42+
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
43+
(UL(1) << VA_BITS) + 1)
4444
#define KIMAGE_VADDR (MODULES_END)
4545
#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
4646
#define BPF_JIT_REGION_SIZE (SZ_128M)
4747
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
4848
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
4949
#define MODULES_VADDR (BPF_JIT_REGION_END)
5050
#define MODULES_VSIZE (SZ_128M)
51-
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
51+
#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
5252
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
5353
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
5454
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
@@ -231,7 +231,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
231231
* space. Testing the top bit for the start of the region is a
232232
* sufficient check.
233233
*/
234-
#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
234+
#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1)))
235235

236236
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
237237
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)

arch/arm64/include/asm/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
* and fixed mappings
2222
*/
2323
#define VMALLOC_START (MODULES_END)
24-
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
24+
#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
2525

2626
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
2727

arch/arm64/kernel/hibernate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
496496
rc = -ENOMEM;
497497
goto out;
498498
}
499-
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
499+
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, VA_START);
500500
if (rc)
501501
goto out;
502502

arch/arm64/mm/dump.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@
2626
#include <asm/ptdump.h>
2727

2828
static const struct addr_marker address_markers[] = {
29+
{ PAGE_OFFSET, "Linear Mapping start" },
30+
{ VA_START, "Linear Mapping end" },
2931
#ifdef CONFIG_KASAN
3032
{ KASAN_SHADOW_START, "Kasan shadow start" },
3133
{ KASAN_SHADOW_END, "Kasan shadow end" },
@@ -42,7 +44,6 @@ static const struct addr_marker address_markers[] = {
4244
{ VMEMMAP_START, "vmemmap start" },
4345
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
4446
#endif
45-
{ PAGE_OFFSET, "Linear mapping" },
4647
{ -1, NULL },
4748
};
4849

@@ -376,7 +377,7 @@ static void ptdump_initialize(void)
376377
static struct ptdump_info kernel_ptdump_info = {
377378
.mm = &init_mm,
378379
.markers = address_markers,
379-
.base_addr = VA_START,
380+
.base_addr = PAGE_OFFSET,
380381
};
381382

382383
void ptdump_check_wx(void)

arch/arm64/mm/init.c

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -301,21 +301,14 @@ static void __init fdt_enforce_memory_region(void)
301301

302302
void __init arm64_memblock_init(void)
303303
{
304-
const s64 linear_region_size = -(s64)PAGE_OFFSET;
304+
const s64 linear_region_size = BIT(VA_BITS - 1);
305305

306306
/* Handle linux,usable-memory-range property */
307307
fdt_enforce_memory_region();
308308

309309
/* Remove memory above our supported physical address size */
310310
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
311311

312-
/*
313-
* Ensure that the linear region takes up exactly half of the kernel
314-
* virtual address space. This way, we can distinguish a linear address
315-
* from a kernel/module/vmalloc address by testing a single bit.
316-
*/
317-
BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
318-
319312
/*
320313
* Select a suitable value for the base of physical memory.
321314
*/

arch/arm64/mm/kasan_init.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -225,10 +225,10 @@ void __init kasan_init(void)
225225
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
226226
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
227227

228-
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
229-
(void *)mod_shadow_start);
228+
kasan_populate_early_shadow(kasan_mem_to_shadow((void *) VA_START),
229+
(void *)mod_shadow_start);
230230
kasan_populate_early_shadow((void *)kimg_shadow_end,
231-
kasan_mem_to_shadow((void *)PAGE_OFFSET));
231+
(void *)KASAN_SHADOW_END);
232232

233233
if (kimg_shadow_start > mod_shadow_end)
234234
kasan_populate_early_shadow((void *)mod_shadow_end,

arch/arm64/mm/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
398398
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
399399
phys_addr_t size, pgprot_t prot)
400400
{
401-
if (virt < VMALLOC_START) {
401+
if ((virt >= VA_START) && (virt < VMALLOC_START)) {
402402
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
403403
&phys, virt);
404404
return;
@@ -425,7 +425,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
425425
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
426426
phys_addr_t size, pgprot_t prot)
427427
{
428-
if (virt < VMALLOC_START) {
428+
if ((virt >= VA_START) && (virt < VMALLOC_START)) {
429429
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
430430
&phys, virt);
431431
return;

0 commit comments

Comments
 (0)