@@ -1227,6 +1227,9 @@ void vcpu_save_host_state(struct vcpu_t *vcpu)
1227
1227
vmwrite (vcpu , HOST_EFER , hstate -> _efer );
1228
1228
}
1229
1229
1230
+ hstate -> _pat = ia32_rdmsr (IA32_CR_PAT );
1231
+ vmwrite (vcpu , HOST_PAT , hstate -> _pat );
1232
+
1230
1233
#ifdef HAX_ARCH_X86_64
1231
1234
vmwrite (vcpu , HOST_CS_SELECTOR , get_kernel_cs ());
1232
1235
#else
@@ -1393,15 +1396,15 @@ static void fill_common_vmcs(struct vcpu_t *vcpu)
1393
1396
1394
1397
#ifdef HAX_ARCH_X86_64
1395
1398
exit_ctls = EXIT_CONTROL_HOST_ADDR_SPACE_SIZE | EXIT_CONTROL_LOAD_EFER |
1396
- EXIT_CONTROL_SAVE_DEBUG_CONTROLS ;
1399
+ EXIT_CONTROL_SAVE_DEBUG_CONTROLS | EXIT_CONTROL_LOAD_PAT ;
1397
1400
#endif
1398
1401
1399
1402
#ifdef HAX_ARCH_X86_32
1400
1403
if (is_compatible ()) {
1401
1404
exit_ctls = EXIT_CONTROL_HOST_ADDR_SPACE_SIZE | EXIT_CONTROL_LOAD_EFER |
1402
- EXIT_CONTROL_SAVE_DEBUG_CONTROLS ;
1405
+ EXIT_CONTROL_SAVE_DEBUG_CONTROLS | EXIT_CONTROL_LOAD_PAT ;
1403
1406
} else {
1404
- exit_ctls = EXIT_CONTROL_SAVE_DEBUG_CONTROLS ;
1407
+ exit_ctls = EXIT_CONTROL_SAVE_DEBUG_CONTROLS | EXIT_CONTROL_LOAD_PAT ;
1405
1408
}
1406
1409
#endif
1407
1410
@@ -1473,6 +1476,9 @@ static void fill_common_vmcs(struct vcpu_t *vcpu)
1473
1476
vmwrite (vcpu , HOST_EFER , ia32_rdmsr (IA32_EFER ));
1474
1477
}
1475
1478
1479
+ vmwrite (vcpu , HOST_PAT , ia32_rdmsr (IA32_CR_PAT ));
1480
+ vmwrite (vcpu , GUEST_PAT , vcpu -> cr_pat );
1481
+
1476
1482
WRITE_CONTROLS (vcpu , VMX_ENTRY_CONTROLS , entry_ctls );
1477
1483
1478
1484
vmwrite (vcpu , VMX_PAGE_FAULT_ERROR_CODE_MASK , 0 );
@@ -2040,6 +2046,8 @@ static void vmwrite_cr(struct vcpu_t *vcpu)
2040
2046
entry_ctls &= ~ENTRY_CONTROL_LOAD_EFER ;
2041
2047
}
2042
2048
2049
+ entry_ctls |= ENTRY_CONTROL_LOAD_PAT ;
2050
+
2043
2051
if (pcpu_ctls != vmx (vcpu , pcpu_ctls )) {
2044
2052
vmx (vcpu , pcpu_ctls ) = pcpu_ctls ;
2045
2053
vcpu -> pcpu_ctls_dirty = 1 ;
@@ -2530,7 +2538,7 @@ static void handle_cpuid_virtual(struct vcpu_t *vcpu, uint32_t a, uint32_t c)
2530
2538
uint8_t physical_address_size ;
2531
2539
2532
2540
static uint32_t cpuid_1_features_edx =
2533
- // pat is disabled!
2541
+ FEATURE ( PAT ) |
2534
2542
FEATURE (FPU ) |
2535
2543
FEATURE (VME ) |
2536
2544
FEATURE (DE ) |
@@ -2660,10 +2668,13 @@ static void handle_cpuid_virtual(struct vcpu_t *vcpu, uint32_t a, uint32_t c)
2660
2668
state -> _edx = 0x0c040844 ;
2661
2669
return ;
2662
2670
}
2663
- case 3 : // Reserved
2671
+ case 3 : { // Reserved
2672
+ state -> _eax = state -> _ebx = state -> _ecx = state -> _edx = 0 ;
2673
+ return ;
2674
+ }
2664
2675
case 4 : { // Deterministic Cache Parameters
2665
2676
// [31:26] cores per package - 1
2666
- state -> _eax = state -> _ebx = state -> _ecx = state -> _edx = 0 ;
2677
+ // Use host cache values.
2667
2678
return ;
2668
2679
}
2669
2680
case 5 : // MONITOR/MWAIT
@@ -3560,6 +3571,15 @@ static int misc_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val)
3560
3571
return 1 ;
3561
3572
}
3562
3573
3574
+ static inline bool is_pat_valid (uint64_t val )
3575
+ {
3576
+ if (val & 0xF8F8F8F8F8F8F8F8 )
3577
+ return false;
3578
+
3579
+ // 0, 1, 4, 5, 6, 7 are valid values.
3580
+ return (val | ((val & 0x0202020202020202 ) << 1 )) == val ;
3581
+ }
3582
+
3563
3583
static int handle_msr_write (struct vcpu_t * vcpu , uint32_t msr , uint64_t val ,
3564
3584
bool by_host )
3565
3585
{
@@ -3718,7 +3738,15 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val,
3718
3738
break ;
3719
3739
}
3720
3740
case IA32_CR_PAT : {
3741
+ // Attempting to write an undefined memory type encoding into the
3742
+ // PAT causes a general-protection (#GP) exception to be generated
3743
+ if (!is_pat_valid (val )) {
3744
+ r = 1 ;
3745
+ break ;
3746
+ }
3747
+
3721
3748
vcpu -> cr_pat = val ;
3749
+ vmwrite (vcpu , GUEST_PAT , vcpu -> cr_pat );
3722
3750
break ;
3723
3751
}
3724
3752
case IA32_MTRR_DEF_TYPE : {
@@ -3912,6 +3940,13 @@ static int _copy_desc(segment_desc_t *old, segment_desc_t *new)
3912
3940
return flags ;
3913
3941
}
3914
3942
3943
+ int vcpu_get_state_size (struct vcpu_t * vcpu )
3944
+ {
3945
+ if (vcpu -> vm -> features & VM_FEATURES_CR8 )
3946
+ return sizeof (struct vcpu_state_t );
3947
+ return offsetof(struct vcpu_state_t , _cr8 );
3948
+ }
3949
+
3915
3950
int vcpu_get_regs (struct vcpu_t * vcpu , struct vcpu_state_t * ustate )
3916
3951
{
3917
3952
struct vcpu_state_t * state = vcpu -> state ;
@@ -3947,6 +3982,9 @@ int vcpu_get_regs(struct vcpu_t *vcpu, struct vcpu_state_t *ustate)
3947
3982
_copy_desc (& state -> _gdt , & ustate -> _gdt );
3948
3983
_copy_desc (& state -> _idt , & ustate -> _idt );
3949
3984
3985
+ if (vcpu -> vm -> features & VM_FEATURES_CR8 )
3986
+ ustate -> _cr8 = state -> _cr8 ;
3987
+
3950
3988
return 0 ;
3951
3989
}
3952
3990
@@ -4072,6 +4110,9 @@ int vcpu_set_regs(struct vcpu_t *vcpu, struct vcpu_state_t *ustate)
4072
4110
VMWRITE_DESC (vcpu , IDTR , state -> _idt );
4073
4111
}
4074
4112
4113
+ if (vcpu -> vm -> features & VM_FEATURES_CR8 )
4114
+ state -> _cr8 = ustate -> _cr8 ;
4115
+
4075
4116
if ((vmcs_err = put_vmcs (vcpu , & flags ))) {
4076
4117
vcpu_set_panic (vcpu );
4077
4118
hax_log (HAX_LOGPANIC , "put_vmcs failed on vcpu_set_regs: %x\n" ,
0 commit comments