Skip to content

Commit 7dd9b5a

Browse files
author
Marc Zyngier
committed
KVM: arm64: Move __get_fault_info() and co into their own include file
In order to avoid including the whole of the switching helpers in unrelated files, move the __get_fault_info() and related helpers into their own include file. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> Link: https://lore.kernel.org/r/20211010145636.1950948-2-tabba@google.com
1 parent 9e1ff30 commit 7dd9b5a

File tree

3 files changed

+77
-61
lines changed

3 files changed

+77
-61
lines changed
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (C) 2015 - ARM Ltd
4+
* Author: Marc Zyngier <marc.zyngier@arm.com>
5+
*/
6+
7+
#ifndef __ARM64_KVM_HYP_FAULT_H__
8+
#define __ARM64_KVM_HYP_FAULT_H__
9+
10+
#include <asm/kvm_asm.h>
11+
#include <asm/kvm_emulate.h>
12+
#include <asm/kvm_hyp.h>
13+
#include <asm/kvm_mmu.h>
14+
15+
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
16+
{
17+
u64 par, tmp;
18+
19+
/*
20+
* Resolve the IPA the hard way using the guest VA.
21+
*
22+
* Stage-1 translation already validated the memory access
23+
* rights. As such, we can use the EL1 translation regime, and
24+
* don't have to distinguish between EL0 and EL1 access.
25+
*
26+
* We do need to save/restore PAR_EL1 though, as we haven't
27+
* saved the guest context yet, and we may return early...
28+
*/
29+
par = read_sysreg_par();
30+
if (!__kvm_at("s1e1r", far))
31+
tmp = read_sysreg_par();
32+
else
33+
tmp = SYS_PAR_EL1_F; /* back to the guest */
34+
write_sysreg(par, par_el1);
35+
36+
if (unlikely(tmp & SYS_PAR_EL1_F))
37+
return false; /* Translation failed, back to guest */
38+
39+
/* Convert PAR to HPFAR format */
40+
*hpfar = PAR_TO_HPFAR(tmp);
41+
return true;
42+
}
43+
44+
static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
45+
{
46+
u64 hpfar, far;
47+
48+
far = read_sysreg_el2(SYS_FAR);
49+
50+
/*
51+
* The HPFAR can be invalid if the stage 2 fault did not
52+
* happen during a stage 1 page table walk (the ESR_EL2.S1PTW
53+
* bit is clear) and one of the two following cases are true:
54+
* 1. The fault was due to a permission fault
55+
* 2. The processor carries errata 834220
56+
*
57+
* Therefore, for all non S1PTW faults where we either have a
58+
* permission fault or the errata workaround is enabled, we
59+
* resolve the IPA using the AT instruction.
60+
*/
61+
if (!(esr & ESR_ELx_S1PTW) &&
62+
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
63+
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
64+
if (!__translate_far_to_hpfar(far, &hpfar))
65+
return false;
66+
} else {
67+
hpfar = read_sysreg(hpfar_el2);
68+
}
69+
70+
fault->far_el2 = far;
71+
fault->hpfar_el2 = hpfar;
72+
return true;
73+
}
74+
75+
#endif

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 1 addition & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#define __ARM64_KVM_HYP_SWITCH_H__
99

1010
#include <hyp/adjust_pc.h>
11+
#include <hyp/fault.h>
1112

1213
#include <linux/arm-smccc.h>
1314
#include <linux/kvm_host.h>
@@ -133,66 +134,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
133134
}
134135
}
135136

136-
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
137-
{
138-
u64 par, tmp;
139-
140-
/*
141-
* Resolve the IPA the hard way using the guest VA.
142-
*
143-
* Stage-1 translation already validated the memory access
144-
* rights. As such, we can use the EL1 translation regime, and
145-
* don't have to distinguish between EL0 and EL1 access.
146-
*
147-
* We do need to save/restore PAR_EL1 though, as we haven't
148-
* saved the guest context yet, and we may return early...
149-
*/
150-
par = read_sysreg_par();
151-
if (!__kvm_at("s1e1r", far))
152-
tmp = read_sysreg_par();
153-
else
154-
tmp = SYS_PAR_EL1_F; /* back to the guest */
155-
write_sysreg(par, par_el1);
156-
157-
if (unlikely(tmp & SYS_PAR_EL1_F))
158-
return false; /* Translation failed, back to guest */
159-
160-
/* Convert PAR to HPFAR format */
161-
*hpfar = PAR_TO_HPFAR(tmp);
162-
return true;
163-
}
164-
165-
static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
166-
{
167-
u64 hpfar, far;
168-
169-
far = read_sysreg_el2(SYS_FAR);
170-
171-
/*
172-
* The HPFAR can be invalid if the stage 2 fault did not
173-
* happen during a stage 1 page table walk (the ESR_EL2.S1PTW
174-
* bit is clear) and one of the two following cases are true:
175-
* 1. The fault was due to a permission fault
176-
* 2. The processor carries errata 834220
177-
*
178-
* Therefore, for all non S1PTW faults where we either have a
179-
* permission fault or the errata workaround is enabled, we
180-
* resolve the IPA using the AT instruction.
181-
*/
182-
if (!(esr & ESR_ELx_S1PTW) &&
183-
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
184-
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
185-
if (!__translate_far_to_hpfar(far, &hpfar))
186-
return false;
187-
} else {
188-
hpfar = read_sysreg(hpfar_el2);
189-
}
190-
191-
fault->far_el2 = far;
192-
fault->hpfar_el2 = hpfar;
193-
return true;
194-
}
195-
196137
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
197138
{
198139
u8 ec;

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
#include <asm/kvm_pgtable.h>
1212
#include <asm/stage2_pgtable.h>
1313

14-
#include <hyp/switch.h>
14+
#include <hyp/fault.h>
1515

1616
#include <nvhe/gfp.h>
1717
#include <nvhe/memory.h>

0 commit comments

Comments
 (0)