@@ -136,16 +136,7 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
136136
137137static inline bool __populate_fault_info (struct kvm_vcpu * vcpu )
138138{
139- u8 ec ;
140- u64 esr ;
141-
142- esr = vcpu -> arch .fault .esr_el2 ;
143- ec = ESR_ELx_EC (esr );
144-
145- if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW )
146- return true;
147-
148- return __get_fault_info (esr , & vcpu -> arch .fault );
139+ return __get_fault_info (vcpu -> arch .fault .esr_el2 , & vcpu -> arch .fault );
149140}
150141
151142static inline void __hyp_sve_save_host (struct kvm_vcpu * vcpu )
@@ -166,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
166157 write_sysreg_el1 (__vcpu_sys_reg (vcpu , ZCR_EL1 ), SYS_ZCR );
167158}
168159
169- /* Check for an FPSIMD/SVE trap and handle as appropriate */
170- static inline bool __hyp_handle_fpsimd (struct kvm_vcpu * vcpu )
160+ /*
161+ * We trap the first access to the FP/SIMD to save the host context and
162+ * restore the guest context lazily.
163+ * If FP/SIMD is not implemented, handle the trap and inject an undefined
164+ * instruction exception to the guest. Similarly for trapped SVE accesses.
165+ */
166+ static bool kvm_hyp_handle_fpsimd (struct kvm_vcpu * vcpu , u64 * exit_code )
171167{
172168 bool sve_guest , sve_host ;
173169 u8 esr_ec ;
@@ -185,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
185181 }
186182
187183 esr_ec = kvm_vcpu_trap_get_class (vcpu );
188- if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
189- esr_ec != ESR_ELx_EC_SVE )
190- return false;
191184
192185 /* Don't handle SVE traps for non-SVE vcpus here: */
193186 if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD )
@@ -325,7 +318,7 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
325318
326319DECLARE_PER_CPU (struct kvm_cpu_context , kvm_hyp_ctxt );
327320
328- static inline bool __hyp_handle_ptrauth (struct kvm_vcpu * vcpu )
321+ static bool kvm_hyp_handle_ptrauth (struct kvm_vcpu * vcpu , u64 * exit_code )
329322{
330323 struct kvm_cpu_context * ctxt ;
331324 u64 val ;
@@ -350,6 +343,87 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
350343 return true;
351344}
352345
346+ static bool kvm_hyp_handle_sysreg (struct kvm_vcpu * vcpu , u64 * exit_code )
347+ {
348+ if (cpus_have_final_cap (ARM64_WORKAROUND_CAVIUM_TX2_219_TVM ) &&
349+ handle_tx2_tvm (vcpu ))
350+ return true;
351+
352+ if (static_branch_unlikely (& vgic_v3_cpuif_trap ) &&
353+ __vgic_v3_perform_cpuif_access (vcpu ) == 1 )
354+ return true;
355+
356+ return false;
357+ }
358+
359+ static bool kvm_hyp_handle_cp15_32 (struct kvm_vcpu * vcpu , u64 * exit_code )
360+ {
361+ if (static_branch_unlikely (& vgic_v3_cpuif_trap ) &&
362+ __vgic_v3_perform_cpuif_access (vcpu ) == 1 )
363+ return true;
364+
365+ return false;
366+ }
367+
368+ static bool kvm_hyp_handle_iabt_low (struct kvm_vcpu * vcpu , u64 * exit_code )
369+ {
370+ if (!__populate_fault_info (vcpu ))
371+ return true;
372+
373+ return false;
374+ }
375+
376+ static bool kvm_hyp_handle_dabt_low (struct kvm_vcpu * vcpu , u64 * exit_code )
377+ {
378+ if (!__populate_fault_info (vcpu ))
379+ return true;
380+
381+ if (static_branch_unlikely (& vgic_v2_cpuif_trap )) {
382+ bool valid ;
383+
384+ valid = kvm_vcpu_trap_get_fault_type (vcpu ) == FSC_FAULT &&
385+ kvm_vcpu_dabt_isvalid (vcpu ) &&
386+ !kvm_vcpu_abt_issea (vcpu ) &&
387+ !kvm_vcpu_abt_iss1tw (vcpu );
388+
389+ if (valid ) {
390+ int ret = __vgic_v2_perform_cpuif_access (vcpu );
391+
392+ if (ret == 1 )
393+ return true;
394+
395+ /* Promote an illegal access to an SError.*/
396+ if (ret == -1 )
397+ * exit_code = ARM_EXCEPTION_EL1_SERROR ;
398+ }
399+ }
400+
401+ return false;
402+ }
403+
404+ typedef bool (* exit_handler_fn )(struct kvm_vcpu * , u64 * );
405+
406+ static const exit_handler_fn * kvm_get_exit_handler_array (void );
407+
408+ /*
409+ * Allow the hypervisor to handle the exit with an exit handler if it has one.
410+ *
411+ * Returns true if the hypervisor handled the exit, and control should go back
412+ * to the guest, or false if it hasn't.
413+ */
414+ static inline bool kvm_hyp_handle_exit (struct kvm_vcpu * vcpu , u64 * exit_code )
415+ {
416+ const exit_handler_fn * handlers = kvm_get_exit_handler_array ();
417+ exit_handler_fn fn ;
418+
419+ fn = handlers [kvm_vcpu_trap_get_class (vcpu )];
420+
421+ if (fn )
422+ return fn (vcpu , exit_code );
423+
424+ return false;
425+ }
426+
353427/*
354428 * Return true when we were able to fixup the guest exit and should return to
355429 * the guest, false when we should restore the host state and return to the
@@ -384,59 +458,9 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
384458 if (* exit_code != ARM_EXCEPTION_TRAP )
385459 goto exit ;
386460
387- if (cpus_have_final_cap (ARM64_WORKAROUND_CAVIUM_TX2_219_TVM ) &&
388- kvm_vcpu_trap_get_class (vcpu ) == ESR_ELx_EC_SYS64 &&
389- handle_tx2_tvm (vcpu ))
461+ /* Check if there's an exit handler and allow it to handle the exit. */
462+ if (kvm_hyp_handle_exit (vcpu , exit_code ))
390463 goto guest ;
391-
392- /*
393- * We trap the first access to the FP/SIMD to save the host context
394- * and restore the guest context lazily.
395- * If FP/SIMD is not implemented, handle the trap and inject an
396- * undefined instruction exception to the guest.
397- * Similarly for trapped SVE accesses.
398- */
399- if (__hyp_handle_fpsimd (vcpu ))
400- goto guest ;
401-
402- if (__hyp_handle_ptrauth (vcpu ))
403- goto guest ;
404-
405- if (!__populate_fault_info (vcpu ))
406- goto guest ;
407-
408- if (static_branch_unlikely (& vgic_v2_cpuif_trap )) {
409- bool valid ;
410-
411- valid = kvm_vcpu_trap_get_class (vcpu ) == ESR_ELx_EC_DABT_LOW &&
412- kvm_vcpu_trap_get_fault_type (vcpu ) == FSC_FAULT &&
413- kvm_vcpu_dabt_isvalid (vcpu ) &&
414- !kvm_vcpu_abt_issea (vcpu ) &&
415- !kvm_vcpu_abt_iss1tw (vcpu );
416-
417- if (valid ) {
418- int ret = __vgic_v2_perform_cpuif_access (vcpu );
419-
420- if (ret == 1 )
421- goto guest ;
422-
423- /* Promote an illegal access to an SError.*/
424- if (ret == -1 )
425- * exit_code = ARM_EXCEPTION_EL1_SERROR ;
426-
427- goto exit ;
428- }
429- }
430-
431- if (static_branch_unlikely (& vgic_v3_cpuif_trap ) &&
432- (kvm_vcpu_trap_get_class (vcpu ) == ESR_ELx_EC_SYS64 ||
433- kvm_vcpu_trap_get_class (vcpu ) == ESR_ELx_EC_CP15_32 )) {
434- int ret = __vgic_v3_perform_cpuif_access (vcpu );
435-
436- if (ret == 1 )
437- goto guest ;
438- }
439-
440464exit :
441465 /* Return to the host kernel and handle the exit */
442466 return false;
0 commit comments