Skip to content

Commit e9ee186

Browse files
James Morsectmarinas
authored andcommitted
KVM: arm64: Add kvm_extable for vaxorcism code
KVM has a one instruction window where it will allow an SError exception to be consumed by the hypervisor without treating it as a hypervisor bug. This is used to consume asynchronous external abort that were caused by the guest. As we are about to add another location that survives unexpected exceptions, generalise this code to make it behave like the host's extable. KVM's version has to be mapped to EL2 to be accessible on nVHE systems. The SError vaxorcism code is a one instruction window, so has two entries in the extable. Because the KVM code is copied for VHE and nVHE, we end up with four entries, half of which correspond with code that isn't mapped. Signed-off-by: James Morse <james.morse@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 5d28ba5 commit e9ee186

File tree

8 files changed

+108
-26
lines changed

8 files changed

+108
-26
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
193193
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
194194
.endm
195195

196+
/*
197+
* KVM extable for unexpected exceptions.
198+
* In the same format _asm_extable, but output to a different section so that
199+
* it can be mapped to EL2. The KVM version is not sorted. The caller must
200+
* ensure:
201+
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
202+
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
203+
*/
204+
.macro _kvm_extable, from, to
205+
.pushsection __kvm_ex_table, "a"
206+
.align 3
207+
.long (\from - .), (\to - .)
208+
.popsection
209+
.endm
210+
196211
#endif
197212

198213
#endif /* __ARM_KVM_ASM_H__ */

arch/arm64/kernel/image-vars.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
103103
KVM_NVHE_ALIAS(gic_pmr_sync);
104104
#endif
105105

106+
/* EL2 exception handling */
107+
KVM_NVHE_ALIAS(__start___kvm_ex_table);
108+
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
109+
106110
#endif /* CONFIG_KVM */
107111

108112
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */

arch/arm64/kernel/vmlinux.lds.S

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,13 @@ ENTRY(_text)
2020

2121
jiffies = jiffies_64;
2222

23+
24+
#define HYPERVISOR_EXTABLE \
25+
. = ALIGN(SZ_8); \
26+
__start___kvm_ex_table = .; \
27+
*(__kvm_ex_table) \
28+
__stop___kvm_ex_table = .;
29+
2330
#define HYPERVISOR_TEXT \
2431
/* \
2532
* Align to 4 KB so that \
@@ -35,6 +42,7 @@ jiffies = jiffies_64;
3542
__hyp_idmap_text_end = .; \
3643
__hyp_text_start = .; \
3744
*(.hyp.text) \
45+
HYPERVISOR_EXTABLE \
3846
__hyp_text_end = .;
3947

4048
#define IDMAP_TEXT \

arch/arm64/kvm/hyp/entry.S

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -196,20 +196,23 @@ alternative_endif
196196
// This is our single instruction exception window. A pending
197197
// SError is guaranteed to occur at the earliest when we unmask
198198
// it, and at the latest just after the ISB.
199-
.global abort_guest_exit_start
200199
abort_guest_exit_start:
201200

202201
isb
203202

204-
.global abort_guest_exit_end
205203
abort_guest_exit_end:
206204

207205
msr daifset, #4 // Mask aborts
206+
ret
207+
208+
_kvm_extable abort_guest_exit_start, 9997f
209+
_kvm_extable abort_guest_exit_end, 9997f
210+
9997:
211+
msr daifset, #4 // Mask aborts
212+
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
208213

209-
// If the exception took place, restore the EL1 exception
210-
// context so that we can report some information.
211-
// Merge the exception code with the SError pending bit.
212-
tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
214+
// restore the EL1 exception context so that we can report some
215+
// information. Merge the exception code with the SError pending bit.
213216
msr elr_el2, x2
214217
msr esr_el2, x3
215218
msr spsr_el2, x4

arch/arm64/kvm/hyp/hyp-entry.S

Lines changed: 31 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,30 @@
1515
#include <asm/kvm_mmu.h>
1616
#include <asm/mmu.h>
1717

18+
.macro save_caller_saved_regs_vect
19+
/* x0 and x1 were saved in the vector entry */
20+
stp x2, x3, [sp, #-16]!
21+
stp x4, x5, [sp, #-16]!
22+
stp x6, x7, [sp, #-16]!
23+
stp x8, x9, [sp, #-16]!
24+
stp x10, x11, [sp, #-16]!
25+
stp x12, x13, [sp, #-16]!
26+
stp x14, x15, [sp, #-16]!
27+
stp x16, x17, [sp, #-16]!
28+
.endm
29+
30+
.macro restore_caller_saved_regs_vect
31+
ldp x16, x17, [sp], #16
32+
ldp x14, x15, [sp], #16
33+
ldp x12, x13, [sp], #16
34+
ldp x10, x11, [sp], #16
35+
ldp x8, x9, [sp], #16
36+
ldp x6, x7, [sp], #16
37+
ldp x4, x5, [sp], #16
38+
ldp x2, x3, [sp], #16
39+
ldp x0, x1, [sp], #16
40+
.endm
41+
1842
.text
1943

2044
.macro do_el2_call
@@ -157,27 +181,14 @@ el2_sync:
157181

158182

159183
el2_error:
160-
ldp x0, x1, [sp], #16
184+
save_caller_saved_regs_vect
185+
stp x29, x30, [sp, #-16]!
186+
187+
bl kvm_unexpected_el2_exception
188+
189+
ldp x29, x30, [sp], #16
190+
restore_caller_saved_regs_vect
161191

162-
/*
163-
* Only two possibilities:
164-
* 1) Either we come from the exit path, having just unmasked
165-
* PSTATE.A: change the return code to an EL2 fault, and
166-
* carry on, as we're already in a sane state to handle it.
167-
* 2) Or we come from anywhere else, and that's a bug: we panic.
168-
*
169-
* For (1), x0 contains the original return code and x1 doesn't
170-
* contain anything meaningful at that stage. We can reuse them
171-
* as temp registers.
172-
* For (2), who cares?
173-
*/
174-
mrs x0, elr_el2
175-
adr x1, abort_guest_exit_start
176-
cmp x0, x1
177-
adr x1, abort_guest_exit_end
178-
ccmp x0, x1, #4, ne
179-
b.ne __hyp_panic
180-
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
181192
eret
182193
sb
183194

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#include <asm/barrier.h>
1919
#include <asm/cpufeature.h>
20+
#include <asm/extable.h>
2021
#include <asm/kprobes.h>
2122
#include <asm/kvm_asm.h>
2223
#include <asm/kvm_emulate.h>
@@ -29,6 +30,9 @@
2930

3031
extern const char __hyp_panic_string[];
3132

33+
extern struct exception_table_entry __start___kvm_ex_table;
34+
extern struct exception_table_entry __stop___kvm_ex_table;
35+
3236
/* Check whether the FP regs were dirtied while in the host-side run loop: */
3337
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
3438
{
@@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
508512
#endif
509513
}
510514

515+
static inline void __kvm_unexpected_el2_exception(void)
516+
{
517+
unsigned long addr, fixup;
518+
struct kvm_cpu_context *host_ctxt;
519+
struct exception_table_entry *entry, *end;
520+
unsigned long elr_el2 = read_sysreg(elr_el2);
521+
522+
entry = hyp_symbol_addr(__start___kvm_ex_table);
523+
end = hyp_symbol_addr(__stop___kvm_ex_table);
524+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
525+
526+
while (entry < end) {
527+
addr = (unsigned long)&entry->insn + entry->insn;
528+
fixup = (unsigned long)&entry->fixup + entry->fixup;
529+
530+
if (addr != elr_el2) {
531+
entry++;
532+
continue;
533+
}
534+
535+
write_sysreg(fixup, elr_el2);
536+
return;
537+
}
538+
539+
hyp_panic(host_ctxt);
540+
}
541+
511542
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
270270
read_sysreg(hpfar_el2), par, vcpu);
271271
unreachable();
272272
}
273+
274+
asmlinkage void kvm_unexpected_el2_exception(void)
275+
{
276+
return __kvm_unexpected_el2_exception();
277+
}

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
217217
__hyp_call_panic(spsr, elr, par, host_ctxt);
218218
unreachable();
219219
}
220+
221+
asmlinkage void kvm_unexpected_el2_exception(void)
222+
{
223+
return __kvm_unexpected_el2_exception();
224+
}

0 commit comments

Comments
 (0)