Skip to content

Commit 4c585af

Browse files
KAGA-KOKObp3tk0v
authored andcommitted
x86/boot/32: Temporarily map initrd for microcode loading
Early microcode loading on 32-bit runs in physical address mode because the initrd is not covered by the initial page tables. That results in a horrible mess all over the microcode loader code. Provide a temporary mapping for the initrd in the initial page tables by appending it to the actual initial mapping starting with a new PGD or PMD depending on the configured page table levels ([non-]PAE). The page table entries are located after _brk_end so they are not permanently using memory space. The mapping is invalidated right away in i386_start_kernel() after the early microcode loader has run. This prepares for removing the physical address mode oddities from all over the microcode loader code, which in turn allows further cleanups. Provide the map and unmap code and document the place where the microcode loader needs to be invoked with a comment. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20231017211722.292291436@linutronix.de
1 parent fdbd438 commit 4c585af

File tree

2 files changed

+54
-2
lines changed

2 files changed

+54
-2
lines changed

arch/x86/include/asm/microcode.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { }
2323
static inline void microcode_bsp_resume(void) { }
2424
#endif
2525

26+
extern unsigned long initrd_start_early;
27+
2628
#ifdef CONFIG_CPU_SUP_INTEL
2729
/* Intel specific microcode defines. Public for IFS */
2830
struct microcode_header_intel {

arch/x86/kernel/head32.c

Lines changed: 52 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,33 @@ static void __init i386_default_early_setup(void)
2929
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
3030
}
3131

32+
#ifdef CONFIG_MICROCODE_INITRD32
33+
unsigned long __initdata initrd_start_early;
34+
static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end;
35+
36+
static void zap_early_initrd_mapping(void)
37+
{
38+
pte_t *pl2p = initrd_pl2p_start;
39+
40+
for (; pl2p < initrd_pl2p_end; pl2p++) {
41+
*pl2p = (pte_t){ .pte = 0 };
42+
43+
if (!IS_ENABLED(CONFIG_X86_PAE))
44+
*(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0};
45+
}
46+
}
47+
#else
48+
static inline void zap_early_initrd_mapping(void) { }
49+
#endif
50+
3251
asmlinkage __visible void __init __noreturn i386_start_kernel(void)
3352
{
3453
/* Make sure IDT is set up before any exception happens */
3554
idt_setup_early_handler();
3655

56+
/* load_ucode_bsp() */
57+
zap_early_initrd_mapping();
58+
3759
cr4_init_shadow();
3860

3961
sanitize_boot_params(&boot_params);
@@ -105,9 +127,9 @@ static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t
105127
void __init __no_stack_protector mk_early_pgtbl_32(void)
106128
{
107129
/* Enough space to fit pagetables for the low memory linear map */
108-
const unsigned long limit = __pa_nodebug(_end) +
109-
(PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
130+
unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
110131
pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base);
132+
struct boot_params __maybe_unused *params;
111133
pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base);
112134
unsigned long *ptr;
113135

@@ -120,4 +142,32 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
120142

121143
ptr = (unsigned long *)__pa_nodebug(&_brk_end);
122144
*ptr = (unsigned long)ptep + PAGE_OFFSET;
145+
146+
#ifdef CONFIG_MICROCODE_INITRD32
147+
/* Running on a hypervisor? */
148+
if (native_cpuid_ecx(1) & BIT(31))
149+
return;
150+
151+
params = (struct boot_params *)__pa_nodebug(&boot_params);
152+
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
153+
return;
154+
155+
/* Save the virtual start address */
156+
ptr = (unsigned long *)__pa_nodebug(&initrd_start_early);
157+
*ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET;
158+
*ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK;
159+
160+
/* Save PLP2 for cleanup */
161+
ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start);
162+
*ptr = (unsigned long)pl2p + PAGE_OFFSET;
163+
164+
limit = (unsigned long)params->hdr.ramdisk_image;
165+
pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit);
166+
limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size;
167+
168+
init_map(pte, &ptep, &pl2p, limit);
169+
170+
ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end);
171+
*ptr = (unsigned long)pl2p + PAGE_OFFSET;
172+
#endif
123173
}

0 commit comments

Comments
 (0)