Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Review/remove and prevent unintentional use of expect/assert*/panic/unwrap in hyperlight_host and hyperlght_common #325

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/hyperlight_common/src/clippy.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
disallowed-macros = [
{ path = "std::assert", reason = "no asserts in release builds" },
{ path = "std::assert_eq", reason = "no asserts in release builds" },
{ path = "std::assert_ne", reason = "no asserts in release builds" },
{ path = "std::assert_true", reason = "no asserts in release builds" },
{ path = "std::assert_false", reason = "no asserts in release builds" },
]
4 changes: 4 additions & 0 deletions src/hyperlight_common/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

#![cfg_attr(not(any(test, debug_assertions)), warn(clippy::panic))]
#![cfg_attr(not(any(test, debug_assertions)), warn(clippy::expect_used))]
#![cfg_attr(not(any(test, debug_assertions)), warn(clippy::unwrap_used))]
// We use Arbitrary during fuzzing, which requires std
#![cfg_attr(not(feature = "fuzzing"), no_std)]

Expand All @@ -26,6 +29,7 @@ pub mod flatbuffer_wrappers;
dead_code,
unused_imports,
clippy::all,
clippy::unwrap_used,
unsafe_op_in_unsafe_fn,
non_camel_case_types
)]
Expand Down
7 changes: 7 additions & 0 deletions src/hyperlight_host/src/clippy.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
disallowed-macros = [
{ path = "std::assert", reason = "no asserts in release builds" },
{ path = "std::assert_eq", reason = "no asserts in release builds" },
{ path = "std::assert_ne", reason = "no asserts in release builds" },
{ path = "std::assert_true", reason = "no asserts in release builds" },
{ path = "std::assert_false", reason = "no asserts in release builds" },
]
21 changes: 14 additions & 7 deletions src/hyperlight_host/src/hypervisor/hypervisor_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,11 @@ impl HypervisorHandler {
#[cfg(target_os = "windows")]
let in_process = sandbox_memory_manager.is_in_process();

*self.execution_variables.shm.try_lock().unwrap() = Some(sandbox_memory_manager);
*self
.execution_variables
.shm
.try_lock()
.map_err(|e| new_error!("Failed to lock shm: {}", e))? = Some(sandbox_memory_manager);

// Other than running initialization and code execution, the handler thread also handles
// cancellation. When we need to cancel the execution there are 2 possible cases
Expand Down Expand Up @@ -299,13 +303,13 @@ impl HypervisorHandler {
HypervisorHandlerAction::Initialise => {
{
hv = Some(set_up_hypervisor_partition(
execution_variables.shm.try_lock().unwrap().deref_mut().as_mut().unwrap(),
execution_variables.shm.try_lock().map_err(|e| new_error!("Failed to lock shm: {}", e))?.deref_mut().as_mut().ok_or_else(|| new_error!("shm not set"))?,
configuration.outb_handler.clone(),
#[cfg(gdb)]
&debug_info,
)?);
}
let hv = hv.as_mut().unwrap();
let hv = hv.as_mut().ok_or_else(|| new_error!("Hypervisor not set"))?;

#[cfg(target_os = "windows")]
if !in_process {
Expand Down Expand Up @@ -386,7 +390,7 @@ impl HypervisorHandler {
}
}
HypervisorHandlerAction::DispatchCallFromHost(function_name) => {
let hv = hv.as_mut().unwrap();
let hv = hv.as_mut().ok_or_else(|| new_error!("Hypervisor not initialized"))?;

// Lock to indicate an action is being performed in the hypervisor
execution_variables.running.store(true, Ordering::SeqCst);
Expand Down Expand Up @@ -647,6 +651,8 @@ impl HypervisorHandler {
// If the thread has finished, we try to join it and return the error if it has one
let res = handle.join();
if res.as_ref().is_ok_and(|inner_res| inner_res.is_err()) {
#[allow(clippy::unwrap_used)]
// We know that the thread has finished and that the inner result is an error, so we can safely unwrap the result and the contained err
return Err(res.unwrap().unwrap_err());
}
Err(HyperlightError::HypervisorHandlerMessageReceiveTimedout())
Expand Down Expand Up @@ -757,7 +763,7 @@ impl HypervisorHandler {
if thread_id == u64::MAX {
log_then_return!("Failed to get thread id to signal thread");
}
let mut count: i32 = 0;
let mut count: u128 = 0;
// We need to send the signal multiple times in case the thread was between checking if it
// should be cancelled and entering the run loop

Expand All @@ -771,7 +777,7 @@ impl HypervisorHandler {
while !self.execution_variables.run_cancelled.load() {
count += 1;

if count > number_of_iterations.try_into().unwrap() {
if count > number_of_iterations {
break;
}

Expand All @@ -797,7 +803,8 @@ impl HypervisorHandler {
// partition handle only set when running in-hypervisor (not in-process)
unsafe {
WHvCancelRunVirtualProcessor(
self.execution_variables.get_partition_handle()?.unwrap(), // safe unwrap
#[allow(clippy::unwrap_used)]
self.execution_variables.get_partition_handle()?.unwrap(), // safe unwrap as we checked is some
0,
0,
)
Expand Down
27 changes: 21 additions & 6 deletions src/hyperlight_host/src/hypervisor/surrogate_process_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use std::path::{Path, PathBuf};
use crossbeam_channel::{unbounded, Receiver, Sender};
use hyperlight_common::mem::PAGE_SIZE_USIZE;
use rust_embed::RustEmbed;
use tracing::{info, instrument, Span};
use tracing::{error, info, instrument, Span};
use windows::core::{s, PCSTR};
use windows::Win32::Foundation::HANDLE;
use windows::Win32::Security::SECURITY_ATTRIBUTES;
Expand Down Expand Up @@ -251,27 +251,42 @@ impl Drop for SurrogateProcessManager {
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn drop(&mut self) {
let handle: HANDLE = self.job_handle.into();
unsafe {
if unsafe {
// Terminating the job object will terminate all the surrogate
// processes.

TerminateJobObject(handle, 0)
}
.expect("surrogate job objects were not all terminated error:");
.is_err()
{
error!("surrogate job objects were not all terminated");
}
}
}

lazy_static::lazy_static! {
// see the large comment inside `SurrogateProcessManager` describing
// our reasoning behind using `lazy_static`.
static ref SURROGATE_PROCESSES_MANAGER: SurrogateProcessManager =
SurrogateProcessManager::new().unwrap();
static ref SURROGATE_PROCESSES_MANAGER: std::result::Result<SurrogateProcessManager, &'static str> =
match SurrogateProcessManager::new() {
Ok(manager) => Ok(manager),
Err(e) => {
error!("Failed to create SurrogateProcessManager: {:?}", e);
Err("Failed to create SurrogateProcessManager")
}
};
}

/// Gets the singleton SurrogateProcessManager. This should be called when a new HyperV on Windows Driver is created.
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_surrogate_process_manager() -> Result<&'static SurrogateProcessManager> {
Ok(&SURROGATE_PROCESSES_MANAGER)
match &*SURROGATE_PROCESSES_MANAGER {
Ok(manager) => Ok(manager),
Err(e) => {
error!("Failed to get SurrogateProcessManager: {:?}", e);
Err(new_error!("Failed to get SurrogateProcessManager {}", e))
}
}
}

// Creates a job object that will terminate all the surrogate processes when the struct instance is dropped.
Expand Down
30 changes: 18 additions & 12 deletions src/hyperlight_host/src/hypervisor/windows_hypervisor_platform.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,24 +104,28 @@ impl VMPartition {
};

regions.iter().try_for_each(|region| unsafe {
let flags = region
.flags
.iter()
.map(|flag| match flag {
MemoryRegionFlags::NONE => Ok(WHvMapGpaRangeFlagNone),
MemoryRegionFlags::READ => Ok(WHvMapGpaRangeFlagRead),
MemoryRegionFlags::WRITE => Ok(WHvMapGpaRangeFlagWrite),
MemoryRegionFlags::EXECUTE => Ok(WHvMapGpaRangeFlagExecute),
MemoryRegionFlags::STACK_GUARD => Ok(WHvMapGpaRangeFlagNone),
_ => Err(new_error!("Invalid Memory Region Flag")),
})
.collect::<Result<Vec<WHV_MAP_GPA_RANGE_FLAGS>>>()?
.iter()
.fold(WHvMapGpaRangeFlagNone, |acc, flag| acc | *flag); // collect using bitwise OR

let res = whvmapgparange2_func(
self.0,
process_handle,
region.host_region.start as *const c_void,
region.guest_region.start as u64,
(region.guest_region.end - region.guest_region.start) as u64,
region
.flags
.iter()
.filter_map(|flag| match flag {
MemoryRegionFlags::NONE => Some(WHvMapGpaRangeFlagNone),
MemoryRegionFlags::READ => Some(WHvMapGpaRangeFlagRead),
MemoryRegionFlags::WRITE => Some(WHvMapGpaRangeFlagWrite),
MemoryRegionFlags::EXECUTE => Some(WHvMapGpaRangeFlagExecute),
MemoryRegionFlags::STACK_GUARD => None,
_ => panic!("Invalid flag"),
})
.fold(WHvMapGpaRangeFlagNone, |acc, flag| acc | flag), // collect using bitwise OR,
flags,
);
if res.is_err() {
return Err(new_error!("Call to WHvMapGpaRange2 failed"));
Expand Down Expand Up @@ -161,6 +165,8 @@ pub unsafe fn try_load_whv_map_gpa_range2() -> Result<WHvMapGpaRange2Func> {
return Err(new_error!("{}", e));
}

#[allow(clippy::unwrap_used)]
// We know this will succeed because we just checked for an error above
let library = library.unwrap();

let address = unsafe { GetProcAddress(library, s!("WHvMapGpaRange2")) };
Expand Down
5 changes: 5 additions & 0 deletions src/hyperlight_host/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,11 @@ limitations under the License.
//! This crate contains an SDK that is used to execute specially-
// compiled binaries within a very lightweight hypervisor environment.

#![cfg_attr(not(any(test, debug_assertions)), warn(clippy::panic))]
#![cfg_attr(not(any(test, debug_assertions)), warn(clippy::expect_used))]
#![cfg_attr(not(any(test, debug_assertions)), warn(clippy::unwrap_used))]
#![cfg_attr(any(test, debug_assertions), allow(clippy::disallowed_macros))]

use std::sync::Once;

use log::info;
Expand Down
6 changes: 4 additions & 2 deletions src/hyperlight_host/src/mem/elf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,20 +52,22 @@ impl ElfInfo {
self.entry
}
pub(crate) fn get_base_va(&self) -> u64 {
#[allow(clippy::unwrap_used)] // guaranteed not to panic because of the check in new()
let min_phdr = self
.phdrs
.iter()
.find(|phdr| phdr.p_type == PT_LOAD)
.unwrap(); // guaranteed not to panic because of the check in new()
.unwrap();
min_phdr.p_vaddr
}
pub(crate) fn get_va_size(&self) -> usize {
#[allow(clippy::unwrap_used)] // guaranteed not to panic because of the check in new()
let max_phdr = self
.phdrs
.iter()
.rev()
.find(|phdr| phdr.p_type == PT_LOAD)
.unwrap(); // guaranteed not to panic because of the check in new()
.unwrap();
(max_phdr.p_vaddr + max_phdr.p_memsz - self.get_base_va()) as usize
}
pub(crate) fn load_at(&self, load_addr: usize, target: &mut [u8]) -> Result<()> {
Expand Down
2 changes: 1 addition & 1 deletion src/hyperlight_host/src/mem/loaded_lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ impl LoadedLib {
// arc reference is dropped, but before the destructor is executed. This however
// is ok, as it means that the old library is not going to be used anymore and
// we can use it instead.
let mut lock = LOADED_LIB.lock().unwrap();
let mut lock = LOADED_LIB.lock()?;
if lock.upgrade().is_some() {
// An owning copy of the loaded library still exists somewhere,
// we can't load a new library yet
Expand Down
2 changes: 2 additions & 0 deletions src/hyperlight_host/src/mem/memory_region.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,8 @@ impl MemoryRegionVecBuilder {
return guest_end - self.guest_base_phys_addr;
}

#[allow(clippy::unwrap_used)]
// we know this is safe because we check if the regions are empty above
let last_region = self.regions.last().unwrap();
let new_region = MemoryRegion {
guest_region: last_region.guest_region.end..last_region.guest_region.end + size,
Expand Down
1 change: 1 addition & 0 deletions src/hyperlight_host/src/mem/mgr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ where
if last.is_none() {
log_then_return!(NoMemorySnapshot);
}
#[allow(clippy::unwrap_used)] // We know that last is not None because we checked it above
let snapshot = last.unwrap();
snapshot.restore_from_snapshot(&mut self.shared_mem)
}
Expand Down
8 changes: 5 additions & 3 deletions src/hyperlight_host/src/mem/pe/base_relocations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ impl<'a> Iterator for BaseRelocations<'a> {
let word = u16::from_le_bytes(block);

// Shift the word over so that we are only reading a number from the first (most significant) 4 bits
#[allow(clippy::unwrap_used)] // this is safe as we are only reading 4 bits from a u16
let typ = u8::try_from(word >> 12).unwrap();

// Set the first 4 bits to 0 so that we only use the lower 12 bits for the location of the RVA in the PE file
Expand Down Expand Up @@ -132,9 +133,10 @@ pub(super) fn get_base_relocations(
// Process each block of relocations until we have processed the expected amount of relocation data
while size_processed < table_size {
// Read the header for the block, which is the same format as a DataDirectory so I'm reusing its parse function
let block_header =
goblin::pe::data_directories::DataDirectory::parse(payload, &mut next_block_offset)
.expect("oops");
let block_header = goblin::pe::data_directories::DataDirectory::parse(
payload,
&mut next_block_offset,
)?;
// All relocation blocks in a page contain offsets against this address
let page_virtual_address = block_header.virtual_address;

Expand Down
22 changes: 10 additions & 12 deletions src/hyperlight_host/src/mem/pe/pe_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,10 @@ impl PEInfo {
log_then_return!("unsupported PE file, not an executable image")
}

let optional_header = pe
.header
.optional_header
.expect("unsupported PE file, missing optional header entry");
let optional_header = match pe.header.optional_header {
Some(optional_header) => optional_header,
None => log_then_return!("unsupported PE file, no optional header found"),
};

// check that the PE file was built with the option /DYNAMICBASE

Expand Down Expand Up @@ -117,10 +117,10 @@ impl PEInfo {
// we are going to take care of the data section
if name == ".data" {
// Make sure we fail if we enter this block more than once
assert_eq!(
data_section_additional_bytes, 0,
"Hyperlight currently only supports one .data section"
);

if data_section_additional_bytes > 0 {
log_then_return!("Hyperlight currently only supports one .data section")
}

data_section_raw_pointer = section.pointer_to_raw_data;
data_section_additional_bytes = virtual_size - raw_size;
Expand Down Expand Up @@ -251,8 +251,7 @@ impl PEInfo {
}

cur.set_position(patch.offset as u64);
cur.write_all(&patch.relocated_virtual_address.to_le_bytes())
.expect("failed to write patch to pe file contents");
cur.write_all(&patch.relocated_virtual_address.to_le_bytes())?;
applied += 1;
}

Expand All @@ -279,8 +278,7 @@ impl PEInfo {
}

let relocations =
base_relocations::get_base_relocations(&self.payload, &self.reloc_section)
.expect("error parsing base relocations");
base_relocations::get_base_relocations(&self.payload, &self.reloc_section)?;
let mut patches = Vec::with_capacity(relocations.len());

for reloc in relocations {
Expand Down
15 changes: 9 additions & 6 deletions src/hyperlight_host/src/mem/shared_mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,10 +328,13 @@ impl ExclusiveSharedMemory {
.checked_add(2 * PAGE_SIZE_USIZE) // guard page around the memory
.ok_or_else(|| new_error!("Memory required for sandbox exceeded usize::MAX"))?;

assert!(
total_size % PAGE_SIZE_USIZE == 0,
"shared memory must be a multiple of 4096"
);
if total_size % PAGE_SIZE_USIZE != 0 {
return Err(new_error!(
"shared memory must be a multiple of {}",
PAGE_SIZE_USIZE
));
}

// usize and isize are guaranteed to be the same size, and
// isize::MAX should be positive, so this cast should be safe.
if total_size > isize::MAX as usize {
Expand Down Expand Up @@ -885,7 +888,7 @@ impl HostSharedMemory {
buffer_size: usize,
data: &[u8],
) -> Result<()> {
let stack_pointer_rel = self.read::<u64>(buffer_start_offset).unwrap() as usize;
let stack_pointer_rel = self.read::<u64>(buffer_start_offset)? as usize;
let buffer_size_u64: u64 = buffer_size.try_into()?;

if stack_pointer_rel > buffer_size || stack_pointer_rel < 8 {
Expand Down Expand Up @@ -953,7 +956,7 @@ impl HostSharedMemory {

// go back 8 bytes to get offset to element on top of stack
let last_element_offset_rel: usize =
self.read::<u64>(last_element_offset_abs - 8).unwrap() as usize;
self.read::<u64>(last_element_offset_abs - 8)? as usize;

// make it absolute
let last_element_offset_abs = last_element_offset_rel + buffer_start_offset;
Expand Down
Loading
Loading