Skip to content

Commit 09f9849

Browse files
Jose Ricardo Zivianipaulusmack
authored andcommitted
KVM: PPC: Book3S: Add MMIO emulation for VMX instructions
This patch provides the MMIO load/store vector indexed X-Form emulation. Instructions implemented: lvx: the quadword in storage addressed by the result of EA & 0xffff_ffff_ffff_fff0 is loaded into VRT. stvx: the contents of VRS are stored into the quadword in storage addressed by the result of EA & 0xffff_ffff_ffff_fff0. Reported-by: Gopesh Kumar Chaudhary <gopchaud@in.ibm.com> Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com> Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
1 parent d20fe50 commit 09f9849

File tree

5 files changed

+198
-0
lines changed

5 files changed

+198
-0
lines changed

arch/powerpc/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -690,6 +690,7 @@ struct kvm_vcpu_arch {
690690
u8 mmio_vsx_offset;
691691
u8 mmio_vsx_copy_type;
692692
u8 mmio_vsx_tx_sx_enabled;
693+
u8 mmio_vmx_copy_nums;
693694
u8 osi_needed;
694695
u8 osi_enabled;
695696
u8 papr_enabled;
@@ -804,6 +805,7 @@ struct kvm_vcpu_arch {
804805
#define KVM_MMIO_REG_QPR 0x0040
805806
#define KVM_MMIO_REG_FQPR 0x0060
806807
#define KVM_MMIO_REG_VSX 0x0080
808+
#define KVM_MMIO_REG_VMX 0x00c0
807809

808810
#define __KVM_HAVE_ARCH_WQP
809811
#define __KVM_HAVE_CREATE_DEVICE

arch/powerpc/include/asm/kvm_ppc.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
8181
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
8282
unsigned int rt, unsigned int bytes,
8383
int is_default_endian, int mmio_sign_extend);
84+
extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
85+
struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
86+
extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
87+
struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
8488
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
8589
u64 val, unsigned int bytes,
8690
int is_default_endian);

arch/powerpc/include/asm/ppc-opcode.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,12 @@
156156
#define OP_31_XOP_LFDX 599
157157
#define OP_31_XOP_LFDUX 631
158158

159+
/* VMX Vector Load Instructions */
160+
#define OP_31_XOP_LVX 103
161+
162+
/* VMX Vector Store Instructions */
163+
#define OP_31_XOP_STVX 231
164+
159165
#define OP_LWZ 32
160166
#define OP_STFS 52
161167
#define OP_STFSU 53

arch/powerpc/kvm/emulate_loadstore.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
5858
}
5959
#endif /* CONFIG_VSX */
6060

61+
#ifdef CONFIG_ALTIVEC
62+
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
63+
{
64+
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
65+
kvmppc_core_queue_vec_unavail(vcpu);
66+
return true;
67+
}
68+
69+
return false;
70+
}
71+
#endif /* CONFIG_ALTIVEC */
72+
6173
/*
6274
* XXX to do:
6375
* lfiwax, lfiwzx
@@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
98110
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
99111
vcpu->arch.mmio_sp64_extend = 0;
100112
vcpu->arch.mmio_sign_extend = 0;
113+
vcpu->arch.mmio_vmx_copy_nums = 0;
101114

102115
switch (get_op(inst)) {
103116
case 31:
@@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
459472
rs, 4, 1);
460473
break;
461474
#endif /* CONFIG_VSX */
475+
476+
#ifdef CONFIG_ALTIVEC
477+
case OP_31_XOP_LVX:
478+
if (kvmppc_check_altivec_disabled(vcpu))
479+
return EMULATE_DONE;
480+
vcpu->arch.vaddr_accessed &= ~0xFULL;
481+
vcpu->arch.paddr_accessed &= ~0xFULL;
482+
vcpu->arch.mmio_vmx_copy_nums = 2;
483+
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
484+
KVM_MMIO_REG_VMX|rt, 1);
485+
break;
486+
487+
case OP_31_XOP_STVX:
488+
if (kvmppc_check_altivec_disabled(vcpu))
489+
return EMULATE_DONE;
490+
vcpu->arch.vaddr_accessed &= ~0xFULL;
491+
vcpu->arch.paddr_accessed &= ~0xFULL;
492+
vcpu->arch.mmio_vmx_copy_nums = 2;
493+
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
494+
rs, 1);
495+
break;
496+
#endif /* CONFIG_ALTIVEC */
497+
462498
default:
463499
emulated = EMULATE_FAIL;
464500
break;

arch/powerpc/kvm/powerpc.c

Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -924,6 +924,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
924924
}
925925
#endif /* CONFIG_VSX */
926926

927+
#ifdef CONFIG_ALTIVEC
928+
static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
929+
u64 gpr)
930+
{
931+
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
932+
u32 hi, lo;
933+
u32 di;
934+
935+
#ifdef __BIG_ENDIAN
936+
hi = gpr >> 32;
937+
lo = gpr & 0xffffffff;
938+
#else
939+
lo = gpr >> 32;
940+
hi = gpr & 0xffffffff;
941+
#endif
942+
943+
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
944+
if (di > 1)
945+
return;
946+
947+
if (vcpu->arch.mmio_host_swabbed)
948+
di = 1 - di;
949+
950+
VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
951+
VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
952+
}
953+
#endif /* CONFIG_ALTIVEC */
954+
927955
#ifdef CONFIG_PPC_FPU
928956
static inline u64 sp_to_dp(u32 fprs)
929957
{
@@ -1026,6 +1054,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
10261054
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
10271055
kvmppc_set_vsr_dword_dump(vcpu, gpr);
10281056
break;
1057+
#endif
1058+
#ifdef CONFIG_ALTIVEC
1059+
case KVM_MMIO_REG_VMX:
1060+
kvmppc_set_vmx_dword(vcpu, gpr);
1061+
break;
10291062
#endif
10301063
default:
10311064
BUG();
@@ -1302,6 +1335,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
13021335
}
13031336
#endif /* CONFIG_VSX */
13041337

1338+
#ifdef CONFIG_ALTIVEC
1339+
/* handle quadword load access in two halves */
1340+
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1341+
unsigned int rt, int is_default_endian)
1342+
{
1343+
enum emulation_result emulated;
1344+
1345+
while (vcpu->arch.mmio_vmx_copy_nums) {
1346+
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
1347+
is_default_endian, 0);
1348+
1349+
if (emulated != EMULATE_DONE)
1350+
break;
1351+
1352+
vcpu->arch.paddr_accessed += run->mmio.len;
1353+
vcpu->arch.mmio_vmx_copy_nums--;
1354+
}
1355+
1356+
return emulated;
1357+
}
1358+
1359+
static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1360+
{
1361+
vector128 vrs = VCPU_VSX_VR(vcpu, rs);
1362+
u32 di;
1363+
u64 w0, w1;
1364+
1365+
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
1366+
if (di > 1)
1367+
return -1;
1368+
1369+
if (vcpu->arch.mmio_host_swabbed)
1370+
di = 1 - di;
1371+
1372+
w0 = vrs.u[di * 2];
1373+
w1 = vrs.u[di * 2 + 1];
1374+
1375+
#ifdef __BIG_ENDIAN
1376+
*val = (w0 << 32) | w1;
1377+
#else
1378+
*val = (w1 << 32) | w0;
1379+
#endif
1380+
return 0;
1381+
}
1382+
1383+
/* handle quadword store in two halves */
1384+
int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1385+
unsigned int rs, int is_default_endian)
1386+
{
1387+
u64 val = 0;
1388+
enum emulation_result emulated = EMULATE_DONE;
1389+
1390+
vcpu->arch.io_gpr = rs;
1391+
1392+
while (vcpu->arch.mmio_vmx_copy_nums) {
1393+
if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
1394+
return EMULATE_FAIL;
1395+
1396+
emulated = kvmppc_handle_store(run, vcpu, val, 8,
1397+
is_default_endian);
1398+
if (emulated != EMULATE_DONE)
1399+
break;
1400+
1401+
vcpu->arch.paddr_accessed += run->mmio.len;
1402+
vcpu->arch.mmio_vmx_copy_nums--;
1403+
}
1404+
1405+
return emulated;
1406+
}
1407+
1408+
static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1409+
struct kvm_run *run)
1410+
{
1411+
enum emulation_result emulated = EMULATE_FAIL;
1412+
int r;
1413+
1414+
vcpu->arch.paddr_accessed += run->mmio.len;
1415+
1416+
if (!vcpu->mmio_is_write) {
1417+
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
1418+
vcpu->arch.io_gpr, 1);
1419+
} else {
1420+
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
1421+
vcpu->arch.io_gpr, 1);
1422+
}
1423+
1424+
switch (emulated) {
1425+
case EMULATE_DO_MMIO:
1426+
run->exit_reason = KVM_EXIT_MMIO;
1427+
r = RESUME_HOST;
1428+
break;
1429+
case EMULATE_FAIL:
1430+
pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1431+
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1432+
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1433+
r = RESUME_HOST;
1434+
break;
1435+
default:
1436+
r = RESUME_GUEST;
1437+
break;
1438+
}
1439+
return r;
1440+
}
1441+
#endif /* CONFIG_ALTIVEC */
1442+
13051443
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
13061444
{
13071445
int r = 0;
@@ -1420,6 +1558,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
14201558
return r;
14211559
}
14221560
}
1561+
#endif
1562+
#ifdef CONFIG_ALTIVEC
1563+
if (vcpu->arch.mmio_vmx_copy_nums > 0)
1564+
vcpu->arch.mmio_vmx_copy_nums--;
1565+
1566+
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1567+
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1568+
if (r == RESUME_HOST) {
1569+
vcpu->mmio_needed = 1;
1570+
return r;
1571+
}
1572+
}
14231573
#endif
14241574
} else if (vcpu->arch.osi_needed) {
14251575
u64 *gprs = run->osi.gprs;

0 commit comments

Comments
 (0)