Skip to content

Commit 9577dd7

Browse files
xairytorvalds
authored andcommitted
kasan: rename kasan_zero_page to kasan_early_shadow_page
With tag based KASAN mode the early shadow value is 0xff and not 0x00, so this patch renames kasan_zero_(page|pte|pmd|pud|p4d) to kasan_early_shadow_(page|pte|pmd|pud|p4d) to avoid confusion. Link: http://lkml.kernel.org/r/3fed313280ebf4f88645f5b89ccbc066d320e177.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Suggested-by: Mark Rutland <mark.rutland@arm.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent b2f557e commit 9577dd7

File tree

8 files changed

+145
-114
lines changed

8 files changed

+145
-114
lines changed

arch/arm64/mm/kasan_init.c

Lines changed: 24 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,9 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
4747
bool early)
4848
{
4949
if (pmd_none(READ_ONCE(*pmdp))) {
50-
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
51-
: kasan_alloc_zeroed_page(node);
50+
phys_addr_t pte_phys = early ?
51+
__pa_symbol(kasan_early_shadow_pte)
52+
: kasan_alloc_zeroed_page(node);
5253
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
5354
}
5455

@@ -60,8 +61,9 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
6061
bool early)
6162
{
6263
if (pud_none(READ_ONCE(*pudp))) {
63-
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
64-
: kasan_alloc_zeroed_page(node);
64+
phys_addr_t pmd_phys = early ?
65+
__pa_symbol(kasan_early_shadow_pmd)
66+
: kasan_alloc_zeroed_page(node);
6567
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
6668
}
6769

@@ -72,8 +74,9 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
7274
bool early)
7375
{
7476
if (pgd_none(READ_ONCE(*pgdp))) {
75-
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
76-
: kasan_alloc_zeroed_page(node);
77+
phys_addr_t pud_phys = early ?
78+
__pa_symbol(kasan_early_shadow_pud)
79+
: kasan_alloc_zeroed_page(node);
7780
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
7881
}
7982

@@ -87,8 +90,9 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
8790
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
8891

8992
do {
90-
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
91-
: kasan_alloc_zeroed_page(node);
93+
phys_addr_t page_phys = early ?
94+
__pa_symbol(kasan_early_shadow_page)
95+
: kasan_alloc_zeroed_page(node);
9296
next = addr + PAGE_SIZE;
9397
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
9498
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
@@ -205,14 +209,14 @@ void __init kasan_init(void)
205209
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
206210
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
207211

208-
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
209-
(void *)mod_shadow_start);
210-
kasan_populate_zero_shadow((void *)kimg_shadow_end,
211-
kasan_mem_to_shadow((void *)PAGE_OFFSET));
212+
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
213+
(void *)mod_shadow_start);
214+
kasan_populate_early_shadow((void *)kimg_shadow_end,
215+
kasan_mem_to_shadow((void *)PAGE_OFFSET));
212216

213217
if (kimg_shadow_start > mod_shadow_end)
214-
kasan_populate_zero_shadow((void *)mod_shadow_end,
215-
(void *)kimg_shadow_start);
218+
kasan_populate_early_shadow((void *)mod_shadow_end,
219+
(void *)kimg_shadow_start);
216220

217221
for_each_memblock(memory, reg) {
218222
void *start = (void *)__phys_to_virt(reg->base);
@@ -227,14 +231,15 @@ void __init kasan_init(void)
227231
}
228232

229233
/*
230-
* KAsan may reuse the contents of kasan_zero_pte directly, so we
231-
* should make sure that it maps the zero page read-only.
234+
* KAsan may reuse the contents of kasan_early_shadow_pte directly,
235+
* so we should make sure that it maps the zero page read-only.
232236
*/
233237
for (i = 0; i < PTRS_PER_PTE; i++)
234-
set_pte(&kasan_zero_pte[i],
235-
pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
238+
set_pte(&kasan_early_shadow_pte[i],
239+
pfn_pte(sym_to_pfn(kasan_early_shadow_page),
240+
PAGE_KERNEL_RO));
236241

237-
memset(kasan_zero_page, 0, PAGE_SIZE);
242+
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
238243
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
239244

240245
/* At this point kasan is fully initialized. Enable error messages */

arch/s390/mm/dump_pagetables.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -111,11 +111,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
111111
}
112112

113113
#ifdef CONFIG_KASAN
114-
static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
114+
static void note_kasan_early_shadow_page(struct seq_file *m,
115+
struct pg_state *st)
115116
{
116117
unsigned int prot;
117118

118-
prot = pte_val(*kasan_zero_pte) &
119+
prot = pte_val(*kasan_early_shadow_pte) &
119120
(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
120121
note_page(m, st, prot, 4);
121122
}
@@ -154,8 +155,8 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
154155
int i;
155156

156157
#ifdef CONFIG_KASAN
157-
if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
158-
note_kasan_zero_page(m, st);
158+
if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
159+
note_kasan_early_shadow_page(m, st);
159160
return;
160161
}
161162
#endif
@@ -185,8 +186,8 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
185186
int i;
186187

187188
#ifdef CONFIG_KASAN
188-
if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
189-
note_kasan_zero_page(m, st);
189+
if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
190+
note_kasan_early_shadow_page(m, st);
190191
return;
191192
}
192193
#endif
@@ -215,8 +216,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
215216
int i;
216217

217218
#ifdef CONFIG_KASAN
218-
if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
219-
note_kasan_zero_page(m, st);
219+
if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
220+
note_kasan_early_shadow_page(m, st);
220221
return;
221222
}
222223
#endif

arch/s390/mm/kasan_init.c

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
107107
if (mode == POPULATE_ZERO_SHADOW &&
108108
IS_ALIGNED(address, PGDIR_SIZE) &&
109109
end - address >= PGDIR_SIZE) {
110-
pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
110+
pgd_populate(&init_mm, pg_dir,
111+
kasan_early_shadow_p4d);
111112
address = (address + PGDIR_SIZE) & PGDIR_MASK;
112113
continue;
113114
}
@@ -120,7 +121,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
120121
if (mode == POPULATE_ZERO_SHADOW &&
121122
IS_ALIGNED(address, P4D_SIZE) &&
122123
end - address >= P4D_SIZE) {
123-
p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
124+
p4d_populate(&init_mm, p4_dir,
125+
kasan_early_shadow_pud);
124126
address = (address + P4D_SIZE) & P4D_MASK;
125127
continue;
126128
}
@@ -133,7 +135,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
133135
if (mode == POPULATE_ZERO_SHADOW &&
134136
IS_ALIGNED(address, PUD_SIZE) &&
135137
end - address >= PUD_SIZE) {
136-
pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
138+
pud_populate(&init_mm, pu_dir,
139+
kasan_early_shadow_pmd);
137140
address = (address + PUD_SIZE) & PUD_MASK;
138141
continue;
139142
}
@@ -146,7 +149,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
146149
if (mode == POPULATE_ZERO_SHADOW &&
147150
IS_ALIGNED(address, PMD_SIZE) &&
148151
end - address >= PMD_SIZE) {
149-
pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
152+
pmd_populate(&init_mm, pm_dir,
153+
kasan_early_shadow_pte);
150154
address = (address + PMD_SIZE) & PMD_MASK;
151155
continue;
152156
}
@@ -188,7 +192,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
188192
pte_val(*pt_dir) = __pa(page) | pgt_prot;
189193
break;
190194
case POPULATE_ZERO_SHADOW:
191-
page = kasan_zero_page;
195+
page = kasan_early_shadow_page;
192196
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
193197
break;
194198
}
@@ -256,14 +260,14 @@ void __init kasan_early_init(void)
256260
unsigned long vmax;
257261
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
258262
pte_t pte_z;
259-
pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
260-
pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
261-
p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
263+
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
264+
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
265+
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
262266

263267
kasan_early_detect_facilities();
264268
if (!has_nx)
265269
pgt_prot &= ~_PAGE_NOEXEC;
266-
pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
270+
pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
267271

268272
memsize = get_mem_detect_end();
269273
if (!memsize)
@@ -292,10 +296,13 @@ void __init kasan_early_init(void)
292296
}
293297

294298
/* init kasan zero shadow */
295-
crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
296-
crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
297-
crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
298-
memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
299+
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
300+
p4d_val(p4d_z));
301+
crst_table_init((unsigned long *)kasan_early_shadow_pud,
302+
pud_val(pud_z));
303+
crst_table_init((unsigned long *)kasan_early_shadow_pmd,
304+
pmd_val(pmd_z));
305+
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
299306

300307
shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
301308
pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);

arch/x86/mm/dump_pagetables.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -377,18 +377,19 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
377377

378378
/*
379379
* This is an optimization for KASAN=y case. Since all kasan page tables
380-
* eventually point to the kasan_zero_page we could call note_page()
380+
* eventually point to the kasan_early_shadow_page we could call note_page()
381381
* right away without walking through lower level page tables. This saves
382382
* us dozens of seconds (minutes for 5-level config) while checking for
383383
* W+X mapping or reading kernel_page_tables debugfs file.
384384
*/
385385
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
386386
void *pt)
387387
{
388-
if (__pa(pt) == __pa(kasan_zero_pmd) ||
389-
(pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
390-
__pa(pt) == __pa(kasan_zero_pud)) {
391-
pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
388+
if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
389+
(pgtable_l5_enabled() &&
390+
__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
391+
__pa(pt) == __pa(kasan_early_shadow_pud)) {
392+
pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
392393
note_page(m, st, __pgprot(prot), 0, 5);
393394
return true;
394395
}

arch/x86/mm/kasan_init_64.c

Lines changed: 29 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
211211
unsigned long next;
212212

213213
if (pgd_none(*pgd)) {
214-
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
214+
pgd_entry = __pgd(_KERNPG_TABLE |
215+
__pa_nodebug(kasan_early_shadow_p4d));
215216
set_pgd(pgd, pgd_entry);
216217
}
217218

@@ -222,7 +223,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
222223
if (!p4d_none(*p4d))
223224
continue;
224225

225-
p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
226+
p4d_entry = __p4d(_KERNPG_TABLE |
227+
__pa_nodebug(kasan_early_shadow_pud));
226228
set_p4d(p4d, p4d_entry);
227229
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
228230
}
@@ -261,10 +263,11 @@ static struct notifier_block kasan_die_notifier = {
261263
void __init kasan_early_init(void)
262264
{
263265
int i;
264-
pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
265-
pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
266-
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
267-
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
266+
pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
267+
__PAGE_KERNEL | _PAGE_ENC;
268+
pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
269+
pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
270+
p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
268271

269272
/* Mask out unsupported __PAGE_KERNEL bits: */
270273
pte_val &= __default_kernel_pte_mask;
@@ -273,16 +276,16 @@ void __init kasan_early_init(void)
273276
p4d_val &= __default_kernel_pte_mask;
274277

275278
for (i = 0; i < PTRS_PER_PTE; i++)
276-
kasan_zero_pte[i] = __pte(pte_val);
279+
kasan_early_shadow_pte[i] = __pte(pte_val);
277280

278281
for (i = 0; i < PTRS_PER_PMD; i++)
279-
kasan_zero_pmd[i] = __pmd(pmd_val);
282+
kasan_early_shadow_pmd[i] = __pmd(pmd_val);
280283

281284
for (i = 0; i < PTRS_PER_PUD; i++)
282-
kasan_zero_pud[i] = __pud(pud_val);
285+
kasan_early_shadow_pud[i] = __pud(pud_val);
283286

284287
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
285-
kasan_zero_p4d[i] = __p4d(p4d_val);
288+
kasan_early_shadow_p4d[i] = __p4d(p4d_val);
286289

287290
kasan_map_early_shadow(early_top_pgt);
288291
kasan_map_early_shadow(init_top_pgt);
@@ -326,7 +329,7 @@ void __init kasan_init(void)
326329

327330
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
328331

329-
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
332+
kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
330333
kasan_mem_to_shadow((void *)PAGE_OFFSET));
331334

332335
for (i = 0; i < E820_MAX_ENTRIES; i++) {
@@ -338,50 +341,50 @@ void __init kasan_init(void)
338341

339342
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
340343
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
341-
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
342-
PAGE_SIZE);
344+
shadow_cpu_entry_begin = (void *)round_down(
345+
(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
343346

344347
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
345348
CPU_ENTRY_AREA_MAP_SIZE);
346349
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
347-
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
348-
PAGE_SIZE);
350+
shadow_cpu_entry_end = (void *)round_up(
351+
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
349352

350-
kasan_populate_zero_shadow(
353+
kasan_populate_early_shadow(
351354
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
352355
shadow_cpu_entry_begin);
353356

354357
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
355358
(unsigned long)shadow_cpu_entry_end, 0);
356359

357-
kasan_populate_zero_shadow(shadow_cpu_entry_end,
358-
kasan_mem_to_shadow((void *)__START_KERNEL_map));
360+
kasan_populate_early_shadow(shadow_cpu_entry_end,
361+
kasan_mem_to_shadow((void *)__START_KERNEL_map));
359362

360363
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
361364
(unsigned long)kasan_mem_to_shadow(_end),
362365
early_pfn_to_nid(__pa(_stext)));
363366

364-
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
365-
(void *)KASAN_SHADOW_END);
367+
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
368+
(void *)KASAN_SHADOW_END);
366369

367370
load_cr3(init_top_pgt);
368371
__flush_tlb_all();
369372

370373
/*
371-
* kasan_zero_page has been used as early shadow memory, thus it may
372-
* contain some garbage. Now we can clear and write protect it, since
373-
* after the TLB flush no one should write to it.
374+
* kasan_early_shadow_page has been used as early shadow memory, thus
375+
* it may contain some garbage. Now we can clear and write protect it,
376+
* since after the TLB flush no one should write to it.
374377
*/
375-
memset(kasan_zero_page, 0, PAGE_SIZE);
378+
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
376379
for (i = 0; i < PTRS_PER_PTE; i++) {
377380
pte_t pte;
378381
pgprot_t prot;
379382

380383
prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
381384
pgprot_val(prot) &= __default_kernel_pte_mask;
382385

383-
pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
384-
set_pte(&kasan_zero_pte[i], pte);
386+
pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
387+
set_pte(&kasan_early_shadow_pte[i], pte);
385388
}
386389
/* Flush TLBs again to be sure that write protection applied. */
387390
__flush_tlb_all();

0 commit comments

Comments
 (0)