@@ -211,7 +211,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
211211 unsigned long next ;
212212
213213 if (pgd_none (* pgd )) {
214- pgd_entry = __pgd (_KERNPG_TABLE | __pa_nodebug (kasan_zero_p4d ));
214+ pgd_entry = __pgd (_KERNPG_TABLE |
215+ __pa_nodebug (kasan_early_shadow_p4d ));
215216 set_pgd (pgd , pgd_entry );
216217 }
217218
@@ -222,7 +223,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
222223 if (!p4d_none (* p4d ))
223224 continue ;
224225
225- p4d_entry = __p4d (_KERNPG_TABLE | __pa_nodebug (kasan_zero_pud ));
226+ p4d_entry = __p4d (_KERNPG_TABLE |
227+ __pa_nodebug (kasan_early_shadow_pud ));
226228 set_p4d (p4d , p4d_entry );
227229 } while (p4d ++ , addr = next , addr != end && p4d_none (* p4d ));
228230}
@@ -261,10 +263,11 @@ static struct notifier_block kasan_die_notifier = {
261263void __init kasan_early_init (void )
262264{
263265 int i ;
264- pteval_t pte_val = __pa_nodebug (kasan_zero_page ) | __PAGE_KERNEL | _PAGE_ENC ;
265- pmdval_t pmd_val = __pa_nodebug (kasan_zero_pte ) | _KERNPG_TABLE ;
266- pudval_t pud_val = __pa_nodebug (kasan_zero_pmd ) | _KERNPG_TABLE ;
267- p4dval_t p4d_val = __pa_nodebug (kasan_zero_pud ) | _KERNPG_TABLE ;
266+ pteval_t pte_val = __pa_nodebug (kasan_early_shadow_page ) |
267+ __PAGE_KERNEL | _PAGE_ENC ;
268+ pmdval_t pmd_val = __pa_nodebug (kasan_early_shadow_pte ) | _KERNPG_TABLE ;
269+ pudval_t pud_val = __pa_nodebug (kasan_early_shadow_pmd ) | _KERNPG_TABLE ;
270+ p4dval_t p4d_val = __pa_nodebug (kasan_early_shadow_pud ) | _KERNPG_TABLE ;
268271
269272 /* Mask out unsupported __PAGE_KERNEL bits: */
270273 pte_val &= __default_kernel_pte_mask ;
@@ -273,16 +276,16 @@ void __init kasan_early_init(void)
273276 p4d_val &= __default_kernel_pte_mask ;
274277
275278 for (i = 0 ; i < PTRS_PER_PTE ; i ++ )
276- kasan_zero_pte [i ] = __pte (pte_val );
279+ kasan_early_shadow_pte [i ] = __pte (pte_val );
277280
278281 for (i = 0 ; i < PTRS_PER_PMD ; i ++ )
279- kasan_zero_pmd [i ] = __pmd (pmd_val );
282+ kasan_early_shadow_pmd [i ] = __pmd (pmd_val );
280283
281284 for (i = 0 ; i < PTRS_PER_PUD ; i ++ )
282- kasan_zero_pud [i ] = __pud (pud_val );
285+ kasan_early_shadow_pud [i ] = __pud (pud_val );
283286
284287 for (i = 0 ; pgtable_l5_enabled () && i < PTRS_PER_P4D ; i ++ )
285- kasan_zero_p4d [i ] = __p4d (p4d_val );
288+ kasan_early_shadow_p4d [i ] = __p4d (p4d_val );
286289
287290 kasan_map_early_shadow (early_top_pgt );
288291 kasan_map_early_shadow (init_top_pgt );
@@ -326,7 +329,7 @@ void __init kasan_init(void)
326329
327330 clear_pgds (KASAN_SHADOW_START & PGDIR_MASK , KASAN_SHADOW_END );
328331
329- kasan_populate_zero_shadow ((void * )(KASAN_SHADOW_START & PGDIR_MASK ),
332+ kasan_populate_early_shadow ((void * )(KASAN_SHADOW_START & PGDIR_MASK ),
330333 kasan_mem_to_shadow ((void * )PAGE_OFFSET ));
331334
332335 for (i = 0 ; i < E820_MAX_ENTRIES ; i ++ ) {
@@ -338,50 +341,50 @@ void __init kasan_init(void)
338341
339342 shadow_cpu_entry_begin = (void * )CPU_ENTRY_AREA_BASE ;
340343 shadow_cpu_entry_begin = kasan_mem_to_shadow (shadow_cpu_entry_begin );
341- shadow_cpu_entry_begin = (void * )round_down (( unsigned long ) shadow_cpu_entry_begin ,
342- PAGE_SIZE );
344+ shadow_cpu_entry_begin = (void * )round_down (
345+ ( unsigned long ) shadow_cpu_entry_begin , PAGE_SIZE );
343346
344347 shadow_cpu_entry_end = (void * )(CPU_ENTRY_AREA_BASE +
345348 CPU_ENTRY_AREA_MAP_SIZE );
346349 shadow_cpu_entry_end = kasan_mem_to_shadow (shadow_cpu_entry_end );
347- shadow_cpu_entry_end = (void * )round_up (( unsigned long ) shadow_cpu_entry_end ,
348- PAGE_SIZE );
350+ shadow_cpu_entry_end = (void * )round_up (
351+ ( unsigned long ) shadow_cpu_entry_end , PAGE_SIZE );
349352
350- kasan_populate_zero_shadow (
353+ kasan_populate_early_shadow (
351354 kasan_mem_to_shadow ((void * )PAGE_OFFSET + MAXMEM ),
352355 shadow_cpu_entry_begin );
353356
354357 kasan_populate_shadow ((unsigned long )shadow_cpu_entry_begin ,
355358 (unsigned long )shadow_cpu_entry_end , 0 );
356359
357- kasan_populate_zero_shadow (shadow_cpu_entry_end ,
358- kasan_mem_to_shadow ((void * )__START_KERNEL_map ));
360+ kasan_populate_early_shadow (shadow_cpu_entry_end ,
361+ kasan_mem_to_shadow ((void * )__START_KERNEL_map ));
359362
360363 kasan_populate_shadow ((unsigned long )kasan_mem_to_shadow (_stext ),
361364 (unsigned long )kasan_mem_to_shadow (_end ),
362365 early_pfn_to_nid (__pa (_stext )));
363366
364- kasan_populate_zero_shadow (kasan_mem_to_shadow ((void * )MODULES_END ),
365- (void * )KASAN_SHADOW_END );
367+ kasan_populate_early_shadow (kasan_mem_to_shadow ((void * )MODULES_END ),
368+ (void * )KASAN_SHADOW_END );
366369
367370 load_cr3 (init_top_pgt );
368371 __flush_tlb_all ();
369372
370373 /*
371- * kasan_zero_page has been used as early shadow memory, thus it may
372- * contain some garbage. Now we can clear and write protect it, since
373- * after the TLB flush no one should write to it.
374+ * kasan_early_shadow_page has been used as early shadow memory, thus
375+ * it may contain some garbage. Now we can clear and write protect it,
376+ * since after the TLB flush no one should write to it.
374377 */
375- memset (kasan_zero_page , 0 , PAGE_SIZE );
378+ memset (kasan_early_shadow_page , 0 , PAGE_SIZE );
376379 for (i = 0 ; i < PTRS_PER_PTE ; i ++ ) {
377380 pte_t pte ;
378381 pgprot_t prot ;
379382
380383 prot = __pgprot (__PAGE_KERNEL_RO | _PAGE_ENC );
381384 pgprot_val (prot ) &= __default_kernel_pte_mask ;
382385
383- pte = __pte (__pa (kasan_zero_page ) | pgprot_val (prot ));
384- set_pte (& kasan_zero_pte [i ], pte );
386+ pte = __pte (__pa (kasan_early_shadow_page ) | pgprot_val (prot ));
387+ set_pte (& kasan_early_shadow_pte [i ], pte );
385388 }
386389 /* Flush TLBs again to be sure that write protection applied. */
387390 __flush_tlb_all ();
0 commit comments