@@ -2459,5 +2459,303 @@ mod tests {
24592459 normalize_sregs_no_granularity ( & mut expected_reset, & sregs) ;
24602460 assert_eq ! ( sregs, expected_reset) ;
24612461 }
2462+
2463+ /// Verifies guest-visible FPU state (via FXSAVE) is properly reset.
2464+ /// Unlike tests using hypervisor API, this runs actual guest code with FXSAVE.
2465+ #[ test]
2466+ fn reset_vcpu_fpu_guest_visible_state ( ) {
2467+ let mut ctx = hyperlight_vm_with_mem_mgr_fxsave ( ) ;
2468+
2469+ // Verify FPU was dirtied after first run
2470+ let fpu_before_reset = ctx. vm . vm . fpu ( ) . unwrap ( ) ;
2471+ assert_eq ! (
2472+ fpu_before_reset. fcw, 0x0F7F ,
2473+ "FCW should be dirty after first run"
2474+ ) ;
2475+ assert_ne ! (
2476+ fpu_before_reset. ftwx, 0 ,
2477+ "FTW should indicate valid registers after first run"
2478+ ) ;
2479+
2480+ let fxsave_before = ctx. read_fxsave ( ) ;
2481+ let fcw_before = u16:: from_le_bytes ( fxsave_before[ 0 ..2 ] . try_into ( ) . unwrap ( ) ) ;
2482+ assert_eq ! ( fcw_before, 0x0F7F , "Guest FXSAVE FCW should be dirty" ) ;
2483+ let mxcsr_before = u32:: from_le_bytes ( fxsave_before[ 24 ..28 ] . try_into ( ) . unwrap ( ) ) ;
2484+ assert_eq ! ( mxcsr_before, 0x3F80 , "Guest FXSAVE MXCSR should be dirty" ) ;
2485+ let xmm0_before = u32:: from_le_bytes ( fxsave_before[ 160 ..164 ] . try_into ( ) . unwrap ( ) ) ;
2486+ assert_eq ! ( xmm0_before, 0x11111111 , "Guest FXSAVE XMM0 should be dirty" ) ;
2487+
2488+ ctx. vm . reset_vcpu ( ) . unwrap ( ) ;
2489+
2490+ // Re-run from entrypoint (flag=1 means guest skips dirty phase, just does FXSAVE)
2491+ let regs = CommonRegisters {
2492+ rip : ctx. vm . entrypoint ,
2493+ rsp : ctx. vm . orig_rsp . absolute ( ) . unwrap ( ) ,
2494+ rflags : 1 << 1 ,
2495+ ..Default :: default ( )
2496+ } ;
2497+ ctx. vm . vm . set_regs ( & regs) . unwrap ( ) ;
2498+ ctx. run ( ) ;
2499+
2500+ // Verify guest-visible state is reset
2501+ let fxsave_after = ctx. read_fxsave ( ) ;
2502+ let fcw_after = u16:: from_le_bytes ( fxsave_after[ 0 ..2 ] . try_into ( ) . unwrap ( ) ) ;
2503+ assert_eq ! (
2504+ fcw_after, 0x037F ,
2505+ "Guest FXSAVE FCW should be reset to default 0x037F, got 0x{:04X}" ,
2506+ fcw_after
2507+ ) ;
2508+
2509+ let fsw_after = u16:: from_le_bytes ( fxsave_after[ 2 ..4 ] . try_into ( ) . unwrap ( ) ) ;
2510+ assert_eq ! ( fsw_after, 0 , "FSW should be reset" ) ;
2511+
2512+ let ftw_after = fxsave_after[ 4 ] ;
2513+ assert_eq ! ( ftw_after, 0 , "FTW should be 0 (all empty)" ) ;
2514+
2515+ let fop_after = u16:: from_le_bytes ( fxsave_after[ 6 ..8 ] . try_into ( ) . unwrap ( ) ) ;
2516+ assert_eq ! ( fop_after, 0 , "FOP should be 0" ) ;
2517+
2518+ let fip_after = u64:: from_le_bytes ( fxsave_after[ 8 ..16 ] . try_into ( ) . unwrap ( ) ) ;
2519+ assert_eq ! ( fip_after, 0 , "FIP should be 0" ) ;
2520+
2521+ let fdp_after = u64:: from_le_bytes ( fxsave_after[ 16 ..24 ] . try_into ( ) . unwrap ( ) ) ;
2522+ assert_eq ! ( fdp_after, 0 , "FDP should be 0" ) ;
2523+
2524+ let mxcsr_after = u32:: from_le_bytes ( fxsave_after[ 24 ..28 ] . try_into ( ) . unwrap ( ) ) ;
2525+ assert_eq ! (
2526+ mxcsr_after, MXCSR_DEFAULT ,
2527+ "Guest FXSAVE MXCSR should be reset to 0x{:08X}, got 0x{:08X}" ,
2528+ MXCSR_DEFAULT , mxcsr_after
2529+ ) ;
2530+
2531+ // ST0-ST7 should be zeroed
2532+ for i in 0 ..8 {
2533+ let offset = 32 + i * 16 ;
2534+ let st_bytes = & fxsave_after[ offset..offset + 10 ] ;
2535+ assert ! ( st_bytes. iter( ) . all( |& b| b == 0 ) , "ST{} should be zeroed" , i) ;
2536+ }
2537+
2538+ // XMM0-XMM15 should be zeroed
2539+ for i in 0 ..16 {
2540+ let offset = 160 + i * 16 ;
2541+ let xmm_bytes = & fxsave_after[ offset..offset + 16 ] ;
2542+ assert ! (
2543+ xmm_bytes. iter( ) . all( |& b| b == 0 ) ,
2544+ "XMM{} should be zeroed" ,
2545+ i
2546+ ) ;
2547+ }
2548+ }
2549+
2550+ struct FxsaveTestContext {
2551+ vm : HyperlightVm ,
2552+ hshm : SandboxMemoryManager < HostSharedMemory > ,
2553+ host_funcs : Arc < Mutex < FunctionRegistry > > ,
2554+ fxsave_offset : usize ,
2555+ #[ cfg( gdb) ]
2556+ dbg_mem_access_hdl : Arc < Mutex < SandboxMemoryManager < HostSharedMemory > > > ,
2557+ }
2558+
2559+ impl FxsaveTestContext {
2560+ fn run ( & mut self ) {
2561+ self . vm
2562+ . run (
2563+ & mut self . hshm ,
2564+ & self . host_funcs ,
2565+ #[ cfg( gdb) ]
2566+ self . dbg_mem_access_hdl . clone ( ) ,
2567+ )
2568+ . unwrap ( ) ;
2569+ }
2570+
2571+ fn read_fxsave ( & self ) -> [ u8 ; 512 ] {
2572+ let mut fxsave = [ 0u8 ; 512 ] ;
2573+ self . hshm
2574+ . shared_mem
2575+ . copy_to_slice ( & mut fxsave, self . fxsave_offset )
2576+ . unwrap ( ) ;
2577+ fxsave
2578+ }
2579+ }
2580+
2581+ /// Creates VM with guest code that: dirtys FPU (if flag==0), does FXSAVE, sets flag=1.
2582+ fn hyperlight_vm_with_mem_mgr_fxsave ( ) -> FxsaveTestContext {
2583+ use iced_x86:: code_asm:: * ;
2584+
2585+ // Stack: RSP+0 = FXSAVE (512 bytes), RSP+512 = flag
2586+ const FXSAVE_STACK_OFFSET : i32 = 0 ;
2587+ const FLAG_STACK_OFFSET : i32 = 512 ;
2588+ const STACK_ALLOC_SIZE : i32 = 528 ;
2589+
2590+ let mut a = CodeAssembler :: new ( 64 ) . unwrap ( ) ;
2591+
2592+ // Align stack and allocate space
2593+ a. and ( rsp, -16i32 ) . unwrap ( ) ;
2594+ a. sub ( rsp, STACK_ALLOC_SIZE ) . unwrap ( ) ;
2595+
2596+ // Check flag: if flag != 0, skip dirty phase
2597+ a. mov ( al, byte_ptr ( rsp + FLAG_STACK_OFFSET ) ) . unwrap ( ) ;
2598+ a. test ( al, al) . unwrap ( ) ;
2599+ let mut skip_dirty = a. create_label ( ) ;
2600+ a. jnz ( skip_dirty) . unwrap ( ) ;
2601+
2602+ // Dirty x87 FPU (7 pushes so TOP=1)
2603+ a. fldz ( ) . unwrap ( ) ;
2604+ a. fldl2e ( ) . unwrap ( ) ;
2605+ a. fldl2t ( ) . unwrap ( ) ;
2606+ a. fldlg2 ( ) . unwrap ( ) ;
2607+ a. fldln2 ( ) . unwrap ( ) ;
2608+ a. fldpi ( ) . unwrap ( ) ;
2609+ a. fld1 ( ) . unwrap ( ) ;
2610+
2611+ // Dirty FCW (0x0F7F vs default 0x037F)
2612+ a. sub ( rsp, 16i32 ) . unwrap ( ) ;
2613+ a. mov ( dword_ptr ( rsp) , 0x0F7Fu32 ) . unwrap ( ) ;
2614+ a. fldcw ( word_ptr ( rsp) ) . unwrap ( ) ;
2615+ a. add ( rsp, 16i32 ) . unwrap ( ) ;
2616+
2617+ // Dirty MXCSR (0x3F80 vs default 0x1F80)
2618+ a. sub ( rsp, 16i32 ) . unwrap ( ) ;
2619+ a. mov ( dword_ptr ( rsp) , 0x3F80u32 ) . unwrap ( ) ;
2620+ a. ldmxcsr ( dword_ptr ( rsp) ) . unwrap ( ) ;
2621+ a. add ( rsp, 16i32 ) . unwrap ( ) ;
2622+
2623+ // Dirty XMM0-7
2624+ let xmm_regs = [ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7] ;
2625+ for ( i, xmm) in xmm_regs. iter ( ) . enumerate ( ) {
2626+ a. mov ( eax, 0x11111111u32 * ( i as u32 + 1 ) ) . unwrap ( ) ;
2627+ a. movd ( * xmm, eax) . unwrap ( ) ;
2628+ a. pshufd ( * xmm, * xmm, 0 ) . unwrap ( ) ;
2629+ }
2630+
2631+ a. mov ( byte_ptr ( rsp + FLAG_STACK_OFFSET ) , 1u32 ) . unwrap ( ) ;
2632+
2633+ // FXSAVE (runs on both executions)
2634+ a. set_label ( & mut skip_dirty) . unwrap ( ) ;
2635+ a. lea ( rax, ptr ( rsp + FXSAVE_STACK_OFFSET ) ) . unwrap ( ) ;
2636+ a. fxsave ( ptr ( rax) ) . unwrap ( ) ;
2637+ a. mov ( r15, rsp) . unwrap ( ) ; // Store RSP for host to read
2638+
2639+ a. hlt ( ) . unwrap ( ) ;
2640+
2641+ let code = a. assemble ( 0 ) . unwrap ( ) ;
2642+
2643+ // Create VM with the code
2644+ let config: SandboxConfiguration = Default :: default ( ) ;
2645+ #[ cfg( any( crashdump, gdb) ) ]
2646+ let rt_cfg: SandboxRuntimeConfig = Default :: default ( ) ;
2647+
2648+ let mut layout =
2649+ SandboxMemoryLayout :: new ( config, code. len ( ) , 4096 , 4096 , 0x3000 , 0 , None ) . unwrap ( ) ;
2650+
2651+ let pt_base_gpa = SandboxMemoryLayout :: BASE_ADDRESS + layout. get_pt_offset ( ) ;
2652+ let pt_buf = GuestPageTableBuffer :: new ( pt_base_gpa) ;
2653+
2654+ for rgn in layout
2655+ . get_memory_regions_ :: < GuestMemoryRegion > ( ( ) )
2656+ . unwrap ( )
2657+ . iter ( )
2658+ {
2659+ let readable = rgn. flags . contains ( MemoryRegionFlags :: READ ) ;
2660+ let writable = rgn. flags . contains ( MemoryRegionFlags :: WRITE )
2661+ || rgn. flags . contains ( MemoryRegionFlags :: STACK_GUARD ) ;
2662+ let executable = rgn. flags . contains ( MemoryRegionFlags :: EXECUTE ) ;
2663+ let mapping = Mapping {
2664+ phys_base : rgn. guest_region . start as u64 ,
2665+ virt_base : rgn. guest_region . start as u64 ,
2666+ len : rgn. guest_region . len ( ) as u64 ,
2667+ kind : MappingKind :: BasicMapping ( BasicMapping {
2668+ readable,
2669+ writable,
2670+ executable,
2671+ } ) ,
2672+ } ;
2673+ unsafe { vmem:: map ( & pt_buf, mapping) } ;
2674+ }
2675+
2676+ let mut pt_size_mapped = 0 ;
2677+ while pt_buf. size ( ) > pt_size_mapped {
2678+ let mapping = Mapping {
2679+ phys_base : ( pt_base_gpa + pt_size_mapped) as u64 ,
2680+ virt_base : ( hyperlight_common:: layout:: SNAPSHOT_PT_GVA + pt_size_mapped) as u64 ,
2681+ len : ( pt_buf. size ( ) - pt_size_mapped) as u64 ,
2682+ kind : MappingKind :: BasicMapping ( BasicMapping {
2683+ readable : true ,
2684+ writable : true ,
2685+ executable : false ,
2686+ } ) ,
2687+ } ;
2688+ unsafe { vmem:: map ( & pt_buf, mapping) } ;
2689+ pt_size_mapped = pt_buf. size ( ) ;
2690+ }
2691+
2692+ let pt_bytes = pt_buf. into_bytes ( ) ;
2693+ layout. set_pt_size ( pt_bytes. len ( ) ) ;
2694+
2695+ let mem_size = layout. get_memory_size ( ) . unwrap ( ) ;
2696+ let mut eshm = ExclusiveSharedMemory :: new ( mem_size) . unwrap ( ) ;
2697+ eshm. copy_from_slice ( & pt_bytes, layout. get_pt_offset ( ) )
2698+ . unwrap ( ) ;
2699+ eshm. copy_from_slice ( & code, layout. get_guest_code_offset ( ) )
2700+ . unwrap ( ) ;
2701+
2702+ let load_addr = RawPtr :: from ( layout. get_guest_code_address ( ) as u64 ) ;
2703+ let scratch_mem = ExclusiveSharedMemory :: new ( config. get_scratch_size ( ) ) . unwrap ( ) ;
2704+ let mut mem_mgr = SandboxMemoryManager :: new (
2705+ layout,
2706+ eshm,
2707+ scratch_mem,
2708+ load_addr,
2709+ Some ( Offset :: from ( 0 ) ) ,
2710+ [ 0u8 ; 16 ] ,
2711+ ) ;
2712+ mem_mgr. write_memory_layout ( ) . unwrap ( ) ;
2713+
2714+ let ( mut hshm, gshm) = mem_mgr. build ( ) ;
2715+
2716+ let peb_address = gshm. layout . peb_address ;
2717+ let mut vm = set_up_hypervisor_partition (
2718+ gshm,
2719+ & config,
2720+ #[ cfg( any( crashdump, gdb) ) ]
2721+ & rt_cfg,
2722+ crate :: mem:: exe:: LoadInfo :: dummy ( ) ,
2723+ )
2724+ . unwrap ( ) ;
2725+
2726+ let seed = rand:: rng ( ) . random :: < u64 > ( ) ;
2727+ let peb_addr = RawPtr :: from ( u64:: try_from ( peb_address) . unwrap ( ) ) ;
2728+ let page_size = u32:: try_from ( page_size:: get ( ) ) . unwrap ( ) ;
2729+
2730+ #[ cfg( gdb) ]
2731+ let dbg_mem_access_hdl = Arc :: new ( Mutex :: new ( hshm. clone ( ) ) ) ;
2732+
2733+ let host_funcs = Arc :: new ( Mutex :: new ( FunctionRegistry :: default ( ) ) ) ;
2734+
2735+ vm. initialise (
2736+ peb_addr,
2737+ seed,
2738+ page_size,
2739+ & mut hshm,
2740+ & host_funcs,
2741+ None ,
2742+ #[ cfg( gdb) ]
2743+ dbg_mem_access_hdl. clone ( ) ,
2744+ )
2745+ . unwrap ( ) ;
2746+
2747+ // R15 = RSP after stack allocation, convert to host offset
2748+ let regs = vm. vm . regs ( ) . unwrap ( ) ;
2749+ let fxsave_offset = regs. r15 as usize - SandboxMemoryLayout :: BASE_ADDRESS ;
2750+
2751+ FxsaveTestContext {
2752+ vm,
2753+ hshm,
2754+ host_funcs,
2755+ fxsave_offset,
2756+ #[ cfg( gdb) ]
2757+ dbg_mem_access_hdl,
2758+ }
2759+ }
24622760 }
24632761}
0 commit comments