Skip to content

Commit 7ca0a78

Browse files
committed
Add scratch region
This adds a second region of guest physical memory, which is intended to become the only mutable region of memory, but which is currently unused. Signed-off-by: Lucy Menon <168595099+syntactically@users.noreply.github.com>
1 parent ad01764 commit 7ca0a78

File tree

15 files changed

+284
-29
lines changed

15 files changed

+284
-29
lines changed

src/hyperlight_common/src/arch/amd64/layout.rs

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,14 @@ See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
1616

17-
// Keep in mind that the minimum upper half GVA is 0xffff_8000_0000_0000
18-
pub const SNAPSHOT_PT_GVA: usize = 0xffff_ff00_0000_0000;
17+
/// We have this the top of the page below the top of memory in order
18+
/// to make working with start/end ptrs in a few places more
19+
/// convenient (not needing to worry about overflow)
20+
pub const MAX_GVA: usize = 0xffff_ffff_ffff_efff;
21+
pub const SNAPSHOT_PT_GVA: usize = 0xffff_8000_0000_0000;
22+
23+
/// We assume 36-bit IPAs for now, since every amd64 processor
24+
/// supports at least 36 bits. Almost all of them support at least 40
25+
/// bits, so we could consider bumping this in the future if we were
26+
/// ever memory-constrained.
27+
pub const MAX_GPA: usize = 0x0000_000f_ffff_ffff;

src/hyperlight_common/src/arch/amd64/vmem.rs

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,15 @@ impl<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableOps> Iterator
145145
let next_vmin = if self.n == 0 {
146146
self.request.vmin
147147
} else {
148-
// Align to the next boundary by adding one entry's worth and masking off lower bits
149-
(self.request.vmin + (self.n << LOW_BIT)) & !lower_bits_mask
148+
// Align to the next boundary by adding one entry's worth
149+
// and masking off lower bits. Masking off before adding
150+
// is safe, since n << LOW_BIT must always have zeros in
151+
// these positions.
152+
let aligned_min = self.request.vmin & !lower_bits_mask;
153+
// Use checked_add here because going past the end of the
154+
// address space counts as "the next one would be out of
155+
// range"
156+
aligned_min.checked_add(self.n << LOW_BIT)?
150157
};
151158

152159
// Check if we've processed the entire requested range

src/hyperlight_common/src/layout.rs

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,21 @@ mod arch;
2121

2222
// The constraint on the feature is temporary and will be removed when other arch i686 is added
2323
#[cfg(feature = "init-paging")]
24-
pub use arch::SNAPSHOT_PT_GVA;
24+
pub use arch::MAX_GPA;
25+
#[cfg(feature = "init-paging")]
26+
pub use arch::{MAX_GVA, SNAPSHOT_PT_GVA};
27+
28+
// offsets down from the top of scratch memory for various things
29+
pub const SCRATCH_TOP_SIZE_OFFSET: u64 = 0x08;
30+
pub const SCRATCH_TOP_USED_OFFSET: u64 = 0x10;
31+
pub const SCRATCH_TOP_ALLOCATOR_OFFSET: u64 = 0x18;
32+
pub const SCRATCH_TOP_EXN_STACK_OFFSET: u64 = 0x20;
33+
34+
#[cfg(feature = "init-paging")]
35+
pub fn scratch_base_gpa(size: usize) -> u64 {
36+
(MAX_GPA - size + 1) as u64
37+
}
38+
#[cfg(feature = "init-paging")]
39+
pub fn scratch_base_gva(size: usize) -> u64 {
40+
(MAX_GVA - size + 1) as u64
41+
}

src/hyperlight_host/src/error.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -351,6 +351,7 @@ impl HyperlightError {
351351
| HyperlightError::HyperlightVmError(HyperlightVmError::Initialize(_))
352352
| HyperlightError::HyperlightVmError(HyperlightVmError::MapRegion(_))
353353
| HyperlightError::HyperlightVmError(HyperlightVmError::UnmapRegion(_))
354+
| HyperlightError::HyperlightVmError(HyperlightVmError::UpdateScratch(_))
354355
| HyperlightError::IOError(_)
355356
| HyperlightError::IntConversionFailure(_)
356357
| HyperlightError::InvalidFlatBuffer(_)

src/hyperlight_host/src/hypervisor/hyperlight_vm.rs

Lines changed: 43 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ use crate::hypervisor::{InterruptHandle, InterruptHandleImpl, get_max_log_level}
6565
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
6666
use crate::mem::mgr::SandboxMemoryManager;
6767
use crate::mem::ptr::{GuestPtr, RawPtr};
68-
use crate::mem::shared_mem::HostSharedMemory;
68+
use crate::mem::shared_mem::{GuestSharedMemory, HostSharedMemory, SharedMemory};
6969
use crate::metrics::{METRIC_ERRONEOUS_VCPU_KICKS, METRIC_GUEST_CANCELLATION};
7070
use crate::sandbox::SandboxConfiguration;
7171
use crate::sandbox::host_funcs::FunctionRegistry;
@@ -95,6 +95,10 @@ pub(crate) struct HyperlightVm {
9595
mmap_regions: Vec<(u32, MemoryRegion)>, // Later mapped regions (slot number, region)
9696
next_slot: u32, // Monotonically increasing slot number
9797
freed_slots: Vec<u32>, // Reusable slots from unmapped regions
98+
scratch_slot: u32, // The slot number used for the scratch region
99+
// The current scratch region, used to keep it alive as long as it
100+
// is used & when unmapping
101+
scratch_memory: Option<GuestSharedMemory>,
98102

99103
#[cfg(gdb)]
100104
gdb_conn: Option<DebugCommChannel<DebugResponse, DebugMsg>>,
@@ -247,6 +251,15 @@ pub enum UnmapRegionError {
247251
UnmapMemory(#[from] UnmapMemoryError),
248252
}
249253

254+
/// Errors that can occur when updating the scratch mapping
255+
#[derive(Debug, thiserror::Error)]
256+
pub enum UpdateScratchError {
257+
#[error("VM map memory error: {0}")]
258+
MapMemory(#[from] MapMemoryError),
259+
#[error("VM unmap memory error: {0}")]
260+
UnmapMemory(#[from] UnmapMemoryError),
261+
}
262+
250263
/// Errors that can occur during HyperlightVm creation
251264
#[derive(Debug, thiserror::Error)]
252265
pub enum CreateHyperlightVmError {
@@ -262,6 +275,8 @@ pub enum CreateHyperlightVmError {
262275
SendDbgMsg(#[from] SendDbgMsgError),
263276
#[error("VM operation error: {0}")]
264277
Vm(#[from] VmError),
278+
#[error("Set scratch error: {0}")]
279+
UpdateScratch(#[from] UpdateScratchError),
265280
}
266281

267282
/// Errors that can occur during debug exit handling
@@ -311,6 +326,8 @@ pub enum HyperlightVmError {
311326
MapRegion(#[from] MapRegionError),
312327
#[error("Unmap region error: {0}")]
313328
UnmapRegion(#[from] UnmapRegionError),
329+
#[error("Update scratch error: {0}")]
330+
UpdateScratch(#[from] UpdateScratchError),
314331
}
315332

316333
impl HyperlightVm {
@@ -319,6 +336,7 @@ impl HyperlightVm {
319336
#[allow(clippy::too_many_arguments)]
320337
pub(crate) fn new(
321338
mem_regions: Vec<MemoryRegion>,
339+
scratch_mem: GuestSharedMemory,
322340
_pml4_addr: u64,
323341
entrypoint: u64,
324342
rsp: u64,
@@ -395,6 +413,7 @@ impl HyperlightVm {
395413
}),
396414
});
397415

416+
let scratch_slot = mem_regions.len() as u32;
398417
#[cfg_attr(not(gdb), allow(unused_mut))]
399418
let mut ret = Self {
400419
vm,
@@ -403,10 +422,12 @@ impl HyperlightVm {
403422
interrupt_handle,
404423
page_size: 0, // Will be set in `initialise`
405424

406-
next_slot: mem_regions.len() as u32,
425+
next_slot: scratch_slot + 1,
407426
sandbox_regions: mem_regions,
408427
mmap_regions: Vec::new(),
409428
freed_slots: Vec::new(),
429+
scratch_slot,
430+
scratch_memory: None,
410431

411432
#[cfg(gdb)]
412433
gdb_conn,
@@ -418,6 +439,8 @@ impl HyperlightVm {
418439
rt_cfg,
419440
};
420441

442+
ret.update_scratch_mapping(scratch_mem)?;
443+
421444
// Send the interrupt handle to the GDB thread if debugging is enabled
422445
// This is used to allow the GDB thread to stop the vCPU
423446
#[cfg(gdb)]
@@ -542,6 +565,24 @@ impl HyperlightVm {
542565
self.mmap_regions.iter().map(|(_, region)| region)
543566
}
544567

568+
/// Update the scratch mapping to point to a new GuestSharedMemory
569+
pub(crate) fn update_scratch_mapping(
570+
&mut self,
571+
scratch: GuestSharedMemory,
572+
) -> Result<(), UpdateScratchError> {
573+
let guest_base = hyperlight_common::layout::scratch_base_gpa(scratch.mem_size());
574+
let rgn = scratch.mapping_at(guest_base, MemoryRegionType::Scratch);
575+
576+
if let Some(old_scratch) = self.scratch_memory.replace(scratch) {
577+
let old_base = hyperlight_common::layout::scratch_base_gpa(old_scratch.mem_size());
578+
let old_rgn = old_scratch.mapping_at(old_base, MemoryRegionType::Scratch);
579+
self.vm.unmap_memory((self.scratch_slot, &old_rgn))?;
580+
}
581+
unsafe { self.vm.map_memory((self.scratch_slot, &rgn))? };
582+
583+
Ok(())
584+
}
585+
545586
/// Dispatch a call from the host to the guest using the given pointer
546587
/// to the dispatch function _in the guest's address space_.
547588
///

src/hyperlight_host/src/hypervisor/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -525,9 +525,9 @@ pub(crate) mod tests {
525525
let rt_cfg: SandboxRuntimeConfig = Default::default();
526526
let sandbox =
527527
UninitializedSandbox::new(GuestBinary::FilePath(filename.clone()), Some(config))?;
528-
let (mut mem_mgr, mut gshm) = sandbox.mgr.build();
528+
let (mut mem_mgr, gshm) = sandbox.mgr.build();
529529
let mut vm = set_up_hypervisor_partition(
530-
&mut gshm,
530+
gshm,
531531
&config,
532532
#[cfg(any(crashdump, gdb))]
533533
&rt_cfg,

src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,23 @@ impl KvmVm {
8383
.create_vcpu(0)
8484
.map_err(|e| CreateVmError::CreateVcpuFd(e.into()))?;
8585

86+
// Set the CPUID leaf for MaxPhysAddr. KVM allows this to
87+
// easily be overridden by the hypervisor and defaults it very
88+
// low, while mshv passes it through from hardware unless an
89+
// intercept is installed.
90+
let mut kvm_cpuid = hv
91+
.get_supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)
92+
.map_err(|e| CreateVmError::InitializeVm(e.into()))?;
93+
for entry in kvm_cpuid.as_mut_slice().iter_mut() {
94+
if entry.function == 0x8000_0008 {
95+
entry.eax &= !0xff;
96+
entry.eax |= hyperlight_common::layout::MAX_GPA.ilog2() + 1;
97+
}
98+
}
99+
vcpu_fd
100+
.set_cpuid2(&kvm_cpuid)
101+
.map_err(|e| CreateVmError::InitializeVm(e.into()))?;
102+
86103
Ok(Self {
87104
vm_fd,
88105
vcpu_fd,

src/hyperlight_host/src/mem/layout.rs

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,10 @@ pub(crate) struct SandboxMemoryLayout {
126126
// The offset in the sandbox memory where the code starts
127127
guest_code_offset: usize,
128128
pub(crate) init_data_permissions: Option<MemoryRegionFlags>,
129+
130+
// The size of the scratch region in physical memory; note that
131+
// this will appear under the top of physical memory.
132+
scratch_size: usize,
129133
}
130134

131135
impl Debug for SandboxMemoryLayout {
@@ -202,6 +206,10 @@ impl Debug for SandboxMemoryLayout {
202206
"Guest Code Offset",
203207
&format_args!("{:#x}", self.guest_code_offset),
204208
)
209+
.field(
210+
"Scratch region size",
211+
&format_args!("{:#x}", self.scratch_size),
212+
)
205213
.finish()
206214
}
207215
}
@@ -229,6 +237,7 @@ impl SandboxMemoryLayout {
229237
code_size: usize,
230238
stack_size: usize,
231239
heap_size: usize,
240+
scratch_size: usize,
232241
init_data_size: usize,
233242
init_data_permissions: Option<MemoryRegionFlags>,
234243
) -> Result<Self> {
@@ -295,6 +304,7 @@ impl SandboxMemoryLayout {
295304
init_data_permissions,
296305
pt_offset,
297306
pt_size: None,
307+
scratch_size,
298308
})
299309
}
300310

@@ -324,6 +334,11 @@ impl SandboxMemoryLayout {
324334
self.stack_size
325335
}
326336

337+
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
338+
pub(super) fn get_scratch_size(&self) -> usize {
339+
self.scratch_size
340+
}
341+
327342
/// Get the offset in guest memory to the output data pointer.
328343
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
329344
fn get_output_data_pointer_offset(&self) -> usize {
@@ -818,7 +833,7 @@ mod tests {
818833
fn test_get_memory_size() {
819834
let sbox_cfg = SandboxConfiguration::default();
820835
let sbox_mem_layout =
821-
SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096, 0, None).unwrap();
836+
SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096, 0x3000, 0, None).unwrap();
822837
assert_eq!(
823838
sbox_mem_layout.get_memory_size().unwrap(),
824839
get_expected_memory_size(&sbox_mem_layout)

src/hyperlight_host/src/mem/memory_region.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,8 @@ pub enum MemoryRegionType {
138138
GuardPage,
139139
/// The region contains the Stack
140140
Stack,
141+
/// The scratch region
142+
Scratch,
141143
}
142144

143145
/// A trait that distinguishes between different kinds of memory region representations.

src/hyperlight_host/src/mem/mgr.rs

Lines changed: 40 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ pub(crate) const STACK_COOKIE_LEN: usize = 16;
4545
pub(crate) struct SandboxMemoryManager<S> {
4646
/// Shared memory for the Sandbox
4747
pub(crate) shared_mem: S,
48+
/// Scratch memory for the Sandbox
49+
pub(crate) scratch_mem: S,
4850
/// The memory layout of the underlying shared memory
4951
pub(crate) layout: SandboxMemoryLayout,
5052
/// Pointer to where to load memory from
@@ -145,13 +147,15 @@ where
145147
pub(crate) fn new(
146148
layout: SandboxMemoryLayout,
147149
shared_mem: S,
150+
scratch_mem: S,
148151
load_addr: RawPtr,
149152
entrypoint_offset: Option<Offset>,
150153
stack_cookie: [u8; STACK_COOKIE_LEN],
151154
) -> Self {
152155
Self {
153156
layout,
154157
shared_mem,
158+
scratch_mem,
155159
load_addr,
156160
entrypoint_offset,
157161
mapped_rgns: 0,
@@ -191,26 +195,22 @@ where
191195
mapped_regions,
192196
)
193197
}
194-
195-
/// This function restores a memory snapshot from a given snapshot.
196-
pub(crate) fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
197-
self.shared_mem.restore_from_snapshot(snapshot)?;
198-
Ok(())
199-
}
200198
}
201199

202200
impl SandboxMemoryManager<ExclusiveSharedMemory> {
203201
pub(crate) fn from_snapshot(s: &Snapshot) -> Result<Self> {
204202
let layout = *s.layout();
205203
let mut shared_mem = ExclusiveSharedMemory::new(s.mem_size())?;
206204
shared_mem.copy_from_slice(s.memory(), 0)?;
205+
let scratch_mem = ExclusiveSharedMemory::new(s.layout().get_scratch_size())?;
207206
let load_addr: RawPtr = RawPtr::try_from(layout.get_guest_code_address())?;
208207
let stack_cookie = rand::random::<[u8; STACK_COOKIE_LEN]>();
209208
let entrypoint_gva = s.preinitialise();
210209
let entrypoint_offset = entrypoint_gva.map(|x| (x - u64::from(&load_addr)).into());
211210
Ok(Self::new(
212211
layout,
213212
shared_mem,
213+
scratch_mem,
214214
load_addr,
215215
entrypoint_offset,
216216
stack_cookie,
@@ -236,9 +236,11 @@ impl SandboxMemoryManager<ExclusiveSharedMemory> {
236236
SandboxMemoryManager<GuestSharedMemory>,
237237
) {
238238
let (hshm, gshm) = self.shared_mem.build();
239+
let (hscratch, gscratch) = self.scratch_mem.build();
239240
(
240241
SandboxMemoryManager {
241242
shared_mem: hshm,
243+
scratch_mem: hscratch,
242244
layout: self.layout,
243245
load_addr: self.load_addr.clone(),
244246
entrypoint_offset: self.entrypoint_offset,
@@ -248,6 +250,7 @@ impl SandboxMemoryManager<ExclusiveSharedMemory> {
248250
},
249251
SandboxMemoryManager {
250252
shared_mem: gshm,
253+
scratch_mem: gscratch,
251254
layout: self.layout,
252255
load_addr: self.load_addr.clone(),
253256
entrypoint_offset: self.entrypoint_offset,
@@ -382,6 +385,37 @@ impl SandboxMemoryManager<HostSharedMemory> {
382385
};
383386
}
384387
}
388+
389+
/// This function restores a memory snapshot from a given snapshot.
390+
pub(crate) fn restore_snapshot(
391+
&mut self,
392+
snapshot: &Snapshot,
393+
) -> Result<Option<GuestSharedMemory>> {
394+
if self.shared_mem.mem_size() != snapshot.mem_size() {
395+
return Err(new_error!(
396+
"Snapshot size does not match current memory size: {} != {}",
397+
self.shared_mem.raw_mem_size(),
398+
snapshot.mem_size()
399+
));
400+
}
401+
self.shared_mem.restore_from_snapshot(snapshot)?;
402+
let new_scratch_size = snapshot.layout().get_scratch_size();
403+
if new_scratch_size == self.scratch_mem.mem_size() {
404+
self.scratch_mem.zero()?;
405+
Ok(None)
406+
} else {
407+
let new_scratch_mem = ExclusiveSharedMemory::new(new_scratch_size)?;
408+
let (hscratch, gscratch) = new_scratch_mem.build();
409+
// Even though this destroys the reference to the host
410+
// side of the old scratch mapping, the VM should still
411+
// own the reference to the guest side of the old scratch
412+
// mapping, so it won't actually be deallocated until it
413+
// has been unmapped from the VM.
414+
self.scratch_mem = hscratch;
415+
416+
Ok(Some(gscratch))
417+
}
418+
}
385419
}
386420

387421
#[cfg(test)]

0 commit comments

Comments
 (0)