Skip to content

Commit 9d908c7

Browse files
carlocaionenashif
authored andcommitted
aarch64: Rewrite reset code using C
There is no strict reason to use assembly for the reset routine. Move as much code as possible to C code using the proper helpers. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
1 parent bba7abe commit 9d908c7

File tree

10 files changed

+340
-197
lines changed

10 files changed

+340
-197
lines changed

arch/arm/core/aarch64/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ zephyr_library_sources(
1414
irq_manage.c
1515
prep_c.c
1616
reset.S
17+
reset.c
1718
switch.S
1819
thread.c
1920
vector_table.S

arch/arm/core/aarch64/reset.S

Lines changed: 90 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,6 @@
44
* SPDX-License-Identifier: Apache-2.0
55
*/
66

7-
/*
8-
* Reset handler
9-
*
10-
* Reset handler that prepares the system for running C code.
11-
*/
12-
137
#include <toolchain.h>
148
#include <linker/sections.h>
159
#include <arch/cpu.h>
@@ -19,128 +13,126 @@
1913
_ASM_FILE_PROLOGUE
2014

2115
/*
22-
* Platform may do platform specific init at EL3.
23-
* The function implementation must preserve callee saved registers as per
24-
* AArch64 ABI PCS.
16+
* Platform specific pre-C init code
17+
*
18+
* Note: - Stack is not yet available
19+
* - x23 must be preserved
2520
*/
2621

27-
WTEXT(z_arch_el3_plat_init)
28-
SECTION_FUNC(TEXT,z_arch_el3_plat_init)
29-
ret
22+
WTEXT(z_arm64_el3_plat_prep_c)
23+
SECTION_FUNC(TEXT,z_arm64_el3_plat_prep_c)
24+
ret
25+
26+
WTEXT(z_arm64_el2_plat_prep_c)
27+
SECTION_FUNC(TEXT,z_arm64_el2_plat_prep_c)
28+
ret
29+
30+
WTEXT(z_arm64_el1_plat_prep_c)
31+
SECTION_FUNC(TEXT,z_arm64_el1_plat_prep_c)
32+
ret
3033

3134
/*
32-
* Reset vector
33-
*
34-
* Ran when the system comes out of reset. The processor is in thread mode with
35-
* privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
36-
* area in SRAM.
37-
*
38-
* When these steps are completed, jump to z_arm64_prep_c(), which will finish
39-
* setting up the system for running C code.
35+
* Set the minimum necessary to safely call C code
4036
*/
4137

42-
GTEXT(__reset)
43-
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
38+
GTEXT(__reset_prep_c)
39+
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset_prep_c)
40+
/* return address: x23 */
41+
mov x23, x30
4442

45-
GTEXT(__start)
46-
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
43+
switch_el x0, 3f, 2f, 1f
44+
3:
45+
/* Reinitialize SCTLR from scratch in EL3 */
46+
ldr w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT)
47+
msr sctlr_el3, x0
4748

48-
/* Setup vector table */
49-
adr x19, _vector_table
49+
/* Custom plat prep_c init */
50+
bl z_arm64_el3_plat_prep_c
5051

51-
switch_el x1, 3f, 2f, 1f
52-
3:
53-
/*
54-
* Zephyr entry happened in EL3. Do EL3 specific init before
55-
* dropping to lower EL.
56-
*/
52+
b out
53+
2:
54+
/* Disable alignment fault checking */
55+
mrs x0, sctlr_el2
56+
bic x0, x0, SCTLR_A_BIT
57+
msr sctlr_el2, x0
58+
59+
/* Custom plat prep_c init */
60+
bl z_arm64_el2_plat_prep_c
5761

58-
/* Initialize VBAR */
59-
msr vbar_el3, x19
62+
b out
63+
1:
64+
/* Disable alignment fault checking */
65+
mrs x0, sctlr_el1
66+
bic x0, x0, SCTLR_A_BIT
67+
msr sctlr_el1, x0
68+
69+
/* Custom plat prep_c init */
70+
bl z_arm64_el1_plat_prep_c
71+
72+
out:
6073
isb
6174

62-
/* Switch to SP_EL0 and setup the stack */
63-
msr spsel, #0
75+
/* Select SP_EL0 */
76+
msr SPSel, #0
6477

78+
/* Initialize stack */
6579
ldr x0, =(z_interrupt_stacks)
6680
add x0, x0, #(CONFIG_ISR_STACK_SIZE)
6781
mov sp, x0
6882

69-
/* Initialize SCTLR_EL3 to reset value */
70-
mov_imm x1, SCTLR_EL3_RES1
71-
mrs x0, sctlr_el3
72-
orr x0, x0, x1
73-
msr sctlr_el3, x0
74-
isb
83+
ret x23
7584

76-
/*
77-
* Disable access traps to EL3 for CPACR, Trace, FP, ASIMD,
78-
* SVE from lower EL.
79-
*/
80-
mov_imm x0, CPTR_EL3_RES0
81-
mov_imm x1, (CPTR_EL3_TTA_BIT | CPTR_EL3_TFP_BIT | CPTR_EL3_TCPAC_BIT)
82-
bic x0, x0, x1
83-
orr x0, x0, #(CPTR_EL3_EZ_BIT)
84-
msr cptr_el3, x0
85-
isb
85+
/*
86+
* Reset vector
87+
*
88+
* Ran when the system comes out of reset. The processor is in thread mode with
89+
* privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
90+
* area in SRAM.
91+
*/
8692

87-
/* Platform specific configurations needed in EL3 */
88-
bl z_arch_el3_plat_init
93+
GTEXT(__reset)
94+
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
8995

90-
/* Enable access control configuration from lower EL */
91-
mrs x0, actlr_el3
92-
orr x0, x0, #(ACTLR_EL3_L2ACTLR_BIT | ACTLR_EL3_L2ECTLR_BIT \
93-
| ACTLR_EL3_L2CTLR_BIT)
94-
orr x0, x0, #(ACTLR_EL3_CPUACTLR_BIT | ACTLR_EL3_CPUECTLR_BIT)
95-
msr actlr_el3, x0
96+
GTEXT(__start)
97+
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
98+
/* Mask all exceptions */
99+
msr DAIFSet, #0xf
96100

97-
/* Initialize SCTLR_EL1 to reset value */
98-
mov_imm x0, SCTLR_EL1_RES1
99-
msr sctlr_el1, x0
101+
/* Prepare for calling C code */
102+
bl __reset_prep_c
100103

101-
/* Disable EA/IRQ/FIQ routing to EL3 and set EL1 to AArch64 */
102-
mov x0, xzr
103-
orr x0, x0, #(SCR_RW_BIT)
104-
msr scr_el3, x0
104+
/* Platform hook for highest EL */
105+
bl z_arm64_el_highest_init
105106

106-
/* On eret return to secure EL1h with DAIF masked */
107-
mov_imm x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1H)
108-
msr spsr_el3, x0
107+
switch_el:
108+
switch_el x0, 3f, 2f, 1f
109+
3:
110+
/* EL3 init */
111+
bl z_arm64_el3_init
109112

110-
adr x0, 1f
111-
msr elr_el3, x0
113+
/* Get next EL */
114+
adr x0, switch_el
115+
bl z_arm64_el3_get_next_el
112116
eret
113117

114118
2:
115-
/* Booting from EL2 is not supported */
116-
b .
117-
118-
1:
119-
/* Initialize VBAR */
120-
msr vbar_el1, x19
121-
isb
119+
/* EL2 init */
120+
bl z_arm64_el2_init
122121

123-
/* Switch to SP_EL0 and setup the stack */
124-
msr spsel, #0
125-
126-
ldr x0, =(z_interrupt_stacks)
127-
add x0, x0, #(CONFIG_ISR_STACK_SIZE)
128-
mov sp, x0
122+
/* Move to EL1 with all exceptions masked */
123+
mov_imm x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1T)
124+
msr spsr_el2, x0
129125

130-
/* Disable access trapping in EL1 for NEON/FP */
131-
mov_imm x0, CPACR_EL1_FPEN_NOTRAP
132-
msr cpacr_el1, x0
126+
adr x0, 1f
127+
msr elr_el2, x0
128+
eret
133129

134-
/* Enable the instruction cache and EL1 stack alignment check. */
135-
mov_imm x1, (SCTLR_I_BIT | SCTLR_SA_BIT)
136-
mrs x0, sctlr_el1
137-
orr x0, x0, x1
138-
msr sctlr_el1, x0
130+
1:
131+
/* EL1 init */
132+
bl z_arm64_el1_init
139133

140-
0:
134+
/* Enable SError interrupts */
135+
msr DAIFClr, #(DAIFCLR_ABT_BIT)
141136
isb
142137

143-
/* Enable the SError interrupt */
144-
msr daifclr, #(DAIFCLR_ABT_BIT)
145-
146-
bl z_arm64_prep_c
138+
b z_arm64_prep_c

arch/arm/core/aarch64/reset.c

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
/*
2+
* Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
#include <kernel_internal.h>
8+
#include "vector_table.h"
9+
10+
void __weak z_arm64_el_highest_plat_init(void)
11+
{
12+
/* do nothing */
13+
}
14+
15+
void __weak z_arm64_el3_plat_init(void)
16+
{
17+
/* do nothing */
18+
}
19+
20+
void __weak z_arm64_el2_plat_init(void)
21+
{
22+
/* do nothing */
23+
}
24+
25+
void __weak z_arm64_el1_plat_init(void)
26+
{
27+
/* do nothing */
28+
}
29+
30+
void z_arm64_el_highest_init(void)
31+
{
32+
write_cntfrq_el0(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
33+
34+
z_arm64_el_highest_plat_init();
35+
36+
isb();
37+
}
38+
39+
void z_arm64_el3_init(void)
40+
{
41+
uint64_t reg;
42+
43+
/* Setup vector table */
44+
write_vbar_el3((uint64_t)_vector_table);
45+
isb();
46+
47+
reg = 0U; /* Mostly RES0 */
48+
reg &= ~(CPTR_TTA_BIT | /* Do not trap sysreg accesses */
49+
CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
50+
CPTR_TCPAC_BIT); /* Do not trap CPTR_EL2 / CPACR_EL1 accesses */
51+
write_cptr_el3(reg);
52+
53+
reg = 0U; /* Reset */
54+
#ifdef CONFIG_ARMV8_A_NS
55+
reg |= SCR_NS_BIT; /* EL2 / EL3 non-secure */
56+
#endif
57+
reg |= (SCR_RES1 | /* RES1 */
58+
SCR_RW_BIT | /* EL2 execution state is AArch64 */
59+
SCR_ST_BIT | /* Do not trap EL1 accesses to timer */
60+
SCR_HCE_BIT | /* Do not trap HVC */
61+
SCR_SMD_BIT); /* Do not trap SMC */
62+
write_scr_el3(reg);
63+
64+
z_arm64_el3_plat_init();
65+
66+
isb();
67+
}
68+
69+
void z_arm64_el2_init(void)
70+
{
71+
uint64_t reg;
72+
73+
reg = read_sctlr_el2();
74+
reg |= (SCTLR_EL2_RES1 | /* RES1 */
75+
SCTLR_I_BIT | /* Enable i-cache */
76+
SCTLR_SA_BIT); /* Enable SP alignment check */
77+
write_sctlr_el2(reg);
78+
79+
reg = read_hcr_el2();
80+
reg |= HCR_RW_BIT; /* EL1 Execution state is AArch64 */
81+
write_hcr_el2(reg);
82+
83+
reg = 0U; /* RES0 */
84+
reg |= CPTR_EL2_RES1; /* RES1 */
85+
reg &= ~(CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
86+
CPTR_TCPAC_BIT); /* Do not trap CPACR_EL1 accesses */
87+
write_cptr_el2(reg);
88+
89+
zero_cntvoff_el2(); /* Set 64-bit virtual timer offset to 0 */
90+
zero_cnthctl_el2();
91+
zero_cnthp_ctl_el2();
92+
93+
z_arm64_el2_plat_init();
94+
95+
isb();
96+
}
97+
98+
void z_arm64_el1_init(void)
99+
{
100+
uint64_t reg;
101+
102+
/* Setup vector table */
103+
write_vbar_el1((uint64_t)_vector_table);
104+
isb();
105+
106+
reg = 0U; /* RES0 */
107+
reg |= CPACR_EL1_FPEN_NOTRAP; /* Do not trap NEON/SIMD/FP */
108+
/* TODO: CONFIG_FLOAT_*_FORBIDDEN */
109+
write_cpacr_el1(reg);
110+
111+
reg = read_sctlr_el1();
112+
reg |= (SCTLR_EL1_RES1 | /* RES1 */
113+
SCTLR_I_BIT | /* Enable i-cache */
114+
SCTLR_SA_BIT); /* Enable SP alignment check */
115+
write_sctlr_el1(reg);
116+
117+
z_arm64_el1_plat_init();
118+
119+
isb();
120+
}
121+
122+
void z_arm64_el3_get_next_el(uint64_t switch_addr)
123+
{
124+
uint64_t spsr;
125+
126+
write_elr_el3(switch_addr);
127+
128+
/* Mask the DAIF */
129+
spsr = SPSR_DAIF_MASK;
130+
131+
/*
132+
* Is considered an illegal return "[..] a return to EL2 when EL3 is
133+
* implemented and the value of the SCR_EL3.NS bit is 0 if
134+
* ARMv8.4-SecEL2 is not implemented" (D1.11.2 from ARM DDI 0487E.a)
135+
*/
136+
if (is_el_implemented(2) &&
137+
((is_in_secure_state() && is_el2_sec_supported()) || !is_in_secure_state())) {
138+
/* Dropping into EL2 */
139+
spsr |= SPSR_MODE_EL2T;
140+
} else {
141+
/* Dropping into EL1 */
142+
spsr |= SPSR_MODE_EL1T;
143+
}
144+
145+
write_spsr_el3(spsr);
146+
}

0 commit comments

Comments
 (0)